aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/samsung
diff options
context:
space:
mode:
authorcodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
committercodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
commitc6da2cfeb05178a11c6d062a06f8078150ee492f (patch)
treef3b4021d252c52d6463a9b3c1bb7245e399b009c /drivers/media/video/samsung
parentc6d7c4dbff353eac7919342ae6b3299a378160a6 (diff)
downloadkernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2
samsung update 1
Diffstat (limited to 'drivers/media/video/samsung')
-rw-r--r--drivers/media/video/samsung/Kconfig121
-rw-r--r--drivers/media/video/samsung/Makefile18
-rw-r--r--drivers/media/video/samsung/fimc/Kconfig59
-rw-r--r--drivers/media/video/samsung/fimc/Makefile22
-rw-r--r--drivers/media/video/samsung/fimc/csis.c468
-rw-r--r--drivers/media/video/samsung/fimc/csis.h60
-rw-r--r--drivers/media/video/samsung/fimc/fimc-ipc.h146
-rw-r--r--drivers/media/video/samsung/fimc/fimc.h770
-rw-r--r--drivers/media/video/samsung/fimc/fimc_capture.c3171
-rw-r--r--drivers/media/video/samsung/fimc/fimc_capture_u1.c2317
-rw-r--r--drivers/media/video/samsung/fimc/fimc_dev.c2378
-rw-r--r--drivers/media/video/samsung/fimc/fimc_dev_u1.c2341
-rw-r--r--drivers/media/video/samsung/fimc/fimc_output.c3274
-rw-r--r--drivers/media/video/samsung/fimc/fimc_overlay.c287
-rw-r--r--drivers/media/video/samsung/fimc/fimc_regs.c2119
-rw-r--r--drivers/media/video/samsung/fimc/fimc_v4l2.c327
-rw-r--r--drivers/media/video/samsung/fimc/ipc.c472
-rw-r--r--drivers/media/video/samsung/fimc/ipc_table.h314
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/Kconfig22
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/Makefile17
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d.h397
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d3x_regs.c376
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d3x_regs.h278
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_cache.c379
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c318
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_dev.c609
-rw-r--r--drivers/media/video/samsung/fimg2d3x/Kconfig22
-rw-r--r--drivers/media/video/samsung/fimg2d3x/Makefile17
-rw-r--r--drivers/media/video/samsung/fimg2d3x/fimg2d.h397
-rw-r--r--drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.c376
-rw-r--r--drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.h278
-rw-r--r--drivers/media/video/samsung/fimg2d3x/fimg2d_cache.c379
-rw-r--r--drivers/media/video/samsung/fimg2d3x/fimg2d_core.c314
-rw-r--r--drivers/media/video/samsung/fimg2d3x/fimg2d_dev.c609
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/Kconfig23
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/Makefile18
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d.h509
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x.h225
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_blt.c330
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_hw.c839
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_regs.h460
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_cache.c168
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_cache.h96
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_clk.c170
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_clk.h26
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c368
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.h42
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_drv.c515
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_helper.c182
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_helper.h151
-rw-r--r--drivers/media/video/samsung/fimg2d4x/Kconfig23
-rw-r--r--drivers/media/video/samsung/fimg2d4x/Makefile18
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d.h514
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d4x.h225
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d4x_blt.c334
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d4x_hw.c839
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d4x_regs.h460
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_cache.c168
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_cache.h101
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_clk.c170
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_clk.h26
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.c551
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.h46
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_drv.c507
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_helper.c118
-rw-r--r--drivers/media/video/samsung/fimg2d4x/fimg2d_helper.h117
-rw-r--r--drivers/media/video/samsung/jpeg/Kconfig15
-rw-r--r--drivers/media/video/samsung/jpeg/Makefile10
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_conf.h363
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_core.c126
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_core.h138
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_dev.c556
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_dev.h36
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_mem.c131
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_mem.h66
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_regs.c289
-rw-r--r--drivers/media/video/samsung/jpeg/jpeg_regs.h46
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/Kconfig29
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/Makefile8
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_conf.h92
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_core.h278
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_dec.c542
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_dev.c1122
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_dev.h26
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_enc.c572
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_mem.c79
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_mem.h39
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_regs.c629
-rw-r--r--drivers/media/video/samsung/jpeg_v2x/jpeg_regs.h51
-rw-r--r--drivers/media/video/samsung/mali/Kconfig63
-rw-r--r--drivers/media/video/samsung/mali/Makefile282
-rw-r--r--drivers/media/video/samsung/mali/Makefile.common59
l---------drivers/media/video/samsung/mali/arch1
-rw-r--r--drivers/media/video/samsung/mali/arch-orion-m400/config.h154
-rw-r--r--drivers/media/video/samsung/mali/common/mali_block_allocator.c391
-rw-r--r--drivers/media/video/samsung/mali/common/mali_block_allocator.h18
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_GP2.c1493
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_MALI200.c1304
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_common.h182
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_core.c911
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_core.h134
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.c183
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.h99
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_gp.h21
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_l2_cache.c517
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_l2_cache.h25
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_mem.h17
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_mem_buddy.c1427
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_mem_mmu.c3157
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_mem_mmu.h75
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_mem_os.c346
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_mem_os.h37
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c363
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.h148
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_pp.h21
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_profiling.c364
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_profiling.h127
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_rendercore.c2031
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_rendercore.h565
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_session_manager.h19
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_subsystem.h107
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_utilization.c210
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_utilization.h44
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_vsync.c49
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk.h1716
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk_bitops.h166
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk_list.h184
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk_mali.h252
-rw-r--r--drivers/media/video/samsung/mali/common/mali_uk_types.h1176
-rw-r--r--drivers/media/video/samsung/mali/common/mali_ukk.h723
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm.c1024
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm.h348
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_pmu.c350
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_pmu.h86
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy.c243
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy.h155
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_alwayson.c80
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_alwayson.h62
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_jobcontrol.c470
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_jobcontrol.h80
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_state.c716
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_state.h290
-rw-r--r--drivers/media/video/samsung/mali/common/pmm/mali_pmm_system.h61
-rw-r--r--drivers/media/video/samsung/mali/linux/license/gpl/mali_kernel_license.h31
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_device_pause_resume.c72
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_device_pause_resume.h19
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_kernel_ioctl.h79
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_kernel_linux.c594
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_kernel_linux.h41
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_kernel_pm.c709
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_kernel_pm.h20
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_kernel_sysfs.c401
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_kernel_sysfs.h30
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_linux_pm.h53
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_linux_pm_testsuite.h37
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_linux_trace.h93
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_atomics.c55
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_indir_mmap.c86
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_indir_mmap.h48
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_irq.c218
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_locks.c249
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_low_level_mem.c599
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_mali.c52
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_math.c22
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_memory.c61
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_misc.c63
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_notification.c199
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_pm.c210
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_profiling.c47
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_specific.h32
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_time.c51
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_osk_timers.c65
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_ukk_core.c142
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_ukk_gp.c128
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_ukk_mem.c336
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_ukk_pp.c103
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_ukk_profiling.c183
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_ukk_vsync.c41
-rw-r--r--drivers/media/video/samsung/mali/linux/mali_ukk_wrappers.h75
-rw-r--r--drivers/media/video/samsung/mali/platform/default/mali_platform.c41
-rw-r--r--drivers/media/video/samsung/mali/platform/mali_platform.h167
-rw-r--r--drivers/media/video/samsung/mali/platform/orion-m400/mali_platform.c658
-rw-r--r--drivers/media/video/samsung/mali/platform/orion-m400/mali_platform_dvfs.c414
-rw-r--r--drivers/media/video/samsung/mali/platform/pegasus-m400/mali_platform.c756
-rw-r--r--drivers/media/video/samsung/mali/platform/pegasus-m400/mali_platform_dvfs.c722
-rw-r--r--drivers/media/video/samsung/mali/regs/mali_200_regs.h170
-rw-r--r--drivers/media/video/samsung/mali/regs/mali_gp_regs.h219
-rw-r--r--drivers/media/video/samsung/mali/timestamp-arm11-cc/mali_timestamp.c13
-rw-r--r--drivers/media/video/samsung/mali/timestamp-arm11-cc/mali_timestamp.h48
-rw-r--r--drivers/media/video/samsung/mali/timestamp-default/mali_timestamp.c13
-rw-r--r--drivers/media/video/samsung/mali/timestamp-default/mali_timestamp.h26
-rw-r--r--drivers/media/video/samsung/mfc5x/Kconfig39
-rw-r--r--drivers/media/video/samsung/mfc5x/Makefile19
-rw-r--r--drivers/media/video/samsung/mfc5x/SsbSipMfcApi.h435
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc.h101
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_buf.c1037
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_buf.h195
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_cmd.c504
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_cmd.h90
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_ctrl.c186
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_ctrl.h22
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_dec.c2416
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_dec.h223
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_dev.c1684
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_dev.h130
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_enc.c1792
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_enc.h115
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_errno.h79
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_inst.c258
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_inst.h182
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_interface.h505
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_log.h59
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_mem.c944
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_mem.h76
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_pm.c198
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_pm.h29
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_reg.c32
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_reg.h21
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_shm.c88
-rw-r--r--drivers/media/video/samsung/mfc5x/mfc_shm.h82
-rw-r--r--drivers/media/video/samsung/tsi/Kconfig19
-rw-r--r--drivers/media/video/samsung/tsi/Makefile4
-rw-r--r--drivers/media/video/samsung/tsi/s3c-tsi.c959
-rw-r--r--drivers/media/video/samsung/tvout/Kconfig174
-rw-r--r--drivers/media/video/samsung/tvout/Makefile27
-rw-r--r--drivers/media/video/samsung/tvout/hw_if/Makefile19
-rw-r--r--drivers/media/video/samsung/tvout/hw_if/cec.c262
-rw-r--r--drivers/media/video/samsung/tvout/hw_if/hdcp.c1123
-rw-r--r--drivers/media/video/samsung/tvout/hw_if/hdmi.c2182
-rw-r--r--drivers/media/video/samsung/tvout/hw_if/hw_if.h1005
-rw-r--r--drivers/media/video/samsung/tvout/hw_if/mixer.c874
-rw-r--r--drivers/media/video/samsung/tvout/hw_if/sdo.c1122
-rw-r--r--drivers/media/video/samsung/tvout/hw_if/vp.c747
-rw-r--r--drivers/media/video/samsung/tvout/s5p_mixer_ctrl.c1146
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvif_ctrl.c2952
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout.c666
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_cec.c428
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_common_lib.c183
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_common_lib.h268
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_ctrl.h132
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_fb.c754
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_fb.h21
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_hpd.c672
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_v4l2.c1427
-rw-r--r--drivers/media/video/samsung/tvout/s5p_tvout_v4l2.h19
-rw-r--r--drivers/media/video/samsung/tvout/s5p_vp_ctrl.c742
-rw-r--r--drivers/media/video/samsung/ump/Kconfig51
-rw-r--r--drivers/media/video/samsung/ump/Makefile94
-rw-r--r--drivers/media/video/samsung/ump/Makefile.common17
-rw-r--r--drivers/media/video/samsung/ump/Makefile_backup80
l---------drivers/media/video/samsung/ump/arch1
-rw-r--r--drivers/media/video/samsung/ump/arch-marcopolo-vega1-m400/config.h18
-rwxr-xr-xdrivers/media/video/samsung/ump/arch-marcopolo-vega1-m400/config.h.org87
-rw-r--r--drivers/media/video/samsung/ump/arch-orion-m400/config.h22
-rw-r--r--drivers/media/video/samsung/ump/arch-pb-virtex5/config.h18
-rw-r--r--drivers/media/video/samsung/ump/common/ump_kernel_api.c346
-rw-r--r--drivers/media/video/samsung/ump/common/ump_kernel_common.c415
-rw-r--r--drivers/media/video/samsung/ump/common/ump_kernel_common.h126
-rw-r--r--drivers/media/video/samsung/ump/common/ump_kernel_descriptor_mapping.c166
-rw-r--r--drivers/media/video/samsung/ump/common/ump_kernel_descriptor_mapping.h91
-rw-r--r--drivers/media/video/samsung/ump/common/ump_kernel_memory_backend.h52
-rw-r--r--drivers/media/video/samsung/ump/common/ump_kernel_ref_drv.c258
-rw-r--r--drivers/media/video/samsung/ump/common/ump_kernel_types.h35
-rw-r--r--drivers/media/video/samsung/ump/common/ump_osk.h50
-rw-r--r--drivers/media/video/samsung/ump/common/ump_uk_types.h167
-rw-r--r--drivers/media/video/samsung/ump/common/ump_ukk.h53
-rw-r--r--drivers/media/video/samsung/ump/include/ump_kernel_interface.h236
-rw-r--r--drivers/media/video/samsung/ump/include/ump_kernel_interface_ref_drv.h35
-rw-r--r--drivers/media/video/samsung/ump/include/ump_kernel_interface_vcm.h37
-rw-r--r--drivers/media/video/samsung/ump/include/ump_kernel_platform.h48
-rw-r--r--drivers/media/video/samsung/ump/linux/license/gpl/ump_kernel_license.h31
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_ioctl.h56
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_kernel_linux.c472
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_kernel_linux.h18
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_dedicated.c274
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_dedicated.h23
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_os.c260
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_os.h23
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_vcm.c292
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_vcm.h22
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_memory_backend.c78
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_osk_atomics.c27
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c441
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_osk_misc.c37
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_ukk_ref_wrappers.c315
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_ukk_ref_wrappers.h43
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_ukk_wrappers.c173
-rw-r--r--drivers/media/video/samsung/ump/linux/ump_ukk_wrappers.h41
288 files changed, 103248 insertions, 0 deletions
diff --git a/drivers/media/video/samsung/Kconfig b/drivers/media/video/samsung/Kconfig
new file mode 100644
index 0000000..8421224
--- /dev/null
+++ b/drivers/media/video/samsung/Kconfig
@@ -0,0 +1,121 @@
+config VIDEO_SAMSUNG
+ bool "Samsung Multimedia Devices"
+ depends on VIDEO_CAPTURE_DRIVERS && VIDEO_V4L2
+ select VIDEO_FIXED_MINOR_RANGES
+ default n
+ help
+ This is a representative video4linux configuration for Samsung multimedia devices.
+
+config VIDEO_SAMSUNG_V4L2
+ bool "V4L2 API for digital camera to be contributed by samsung"
+ depends on VIDEO_DEV && VIDEO_SAMSUNG
+ default n
+ help
+ This feature is for new V4L2 APIs all about digital camera
+
+if CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
+ source "drivers/media/video/samsung/fimc/Kconfig"
+ source "drivers/media/video/samsung/tvout/Kconfig"
+ source "drivers/media/video/samsung/mfc5x/Kconfig"
+ source "drivers/media/video/samsung/ump/Kconfig"
+ source "drivers/media/video/samsung/tsi/Kconfig"
+endif
+if (CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412) && !SLP
+ source "drivers/media/video/samsung/mali/Kconfig"
+endif
+
+config VIDEO_FIMG2D
+ bool
+ depends on VIDEO_SAMSUNG
+ default n
+
+config VIDEO_FIMG2D_DEBUG
+ bool
+ depends on VIDEO_FIMG2D
+ default n
+
+if ARCH_EXYNOS5
+source "drivers/media/video/samsung/fimg2d3x/Kconfig"
+source "drivers/media/video/samsung/fimg2d4x/Kconfig"
+endif
+
+if ARCH_EXYNOS4
+source "drivers/media/video/samsung/fimg2d3x-exynos4/Kconfig"
+source "drivers/media/video/samsung/fimg2d4x-exynos4/Kconfig"
+endif
+
+
+if CPU_EXYNOS4210
+ source "drivers/media/video/samsung/jpeg/Kconfig"
+endif
+
+if CPU_EXYNOS4212 || CPU_EXYNOS4412 || CPU_EXYNOS5250
+ source "drivers/media/video/samsung/jpeg_v2x/Kconfig"
+endif
+
+if VIDEO_SAMSUNG
+comment "Reserved memory configurations"
+config VIDEO_SAMSUNG_USE_DMA_MEM
+ bool "Use common contigouse dma memory for Multimedia devices"
+ depends on SLP
+
+config VIDEO_SAMSUNG_MEMSIZE_DMA
+ int "Memory size in kbytes for DMA"
+ depends on VIDEO_SAMSUNG_USE_DMA_MEM
+ default "5120"
+
+config VIDEO_SAMSUNG_MEMSIZE_FIMC0
+ int "Memory size in kbytes for FIMC0"
+ depends on VIDEO_FIMC || VIDEO_SAMSUNG_S5P_FIMC
+ default "5120"
+
+config VIDEO_SAMSUNG_MEMSIZE_FIMC1
+ int "Memory size in kbytes for FIMC1"
+ depends on VIDEO_FIMC || VIDEO_SAMSUNG_S5P_FIMC
+ default "5120"
+
+config VIDEO_SAMSUNG_MEMSIZE_FIMC2
+ int "Memory size in kbytes for FIMC2"
+ depends on VIDEO_FIMC || VIDEO_SAMSUNG_S5P_FIMC
+ default "5120"
+
+config VIDEO_SAMSUNG_MEMSIZE_FIMC3
+ int "Memory size in kbytes for FIMC3"
+ depends on VIDEO_FIMC || VIDEO_SAMSUNG_S5P_FIMC
+ default "0"
+
+config VIDEO_SAMSUNG_MEMSIZE_MFC
+ int "Memory size in kbytes for MFC"
+ depends on VIDEO_MFC5X && (VIDEO_MFC_MEM_PORT_COUNT = 1) && (!EXYNOS_CONTENT_PATH_PROTECTION)
+ default "65536"
+
+config VIDEO_SAMSUNG_MEMSIZE_MFC0
+ int "Memory size in kbytes for MFC port0"
+ depends on VIDEO_MFC5X && (VIDEO_MFC_MEM_PORT_COUNT = 2) && (!EXYNOS_CONTENT_PATH_PROTECTION)
+ default "41984"
+
+config VIDEO_SAMSUNG_MEMSIZE_MFC1
+ int "Memory size in kbytes for MFC port1"
+ depends on VIDEO_MFC5X && (VIDEO_MFC_MEM_PORT_COUNT = 2) && (!EXYNOS_CONTENT_PATH_PROTECTION)
+ default "41984"
+
+config VIDEO_SAMSUNG_MEMSIZE_MFC_SECURE
+ int "Memory size in kbytes for MFC Secure"
+ depends on VIDEO_MFC5X && EXYNOS_CONTENT_PATH_PROTECTION
+ default "41984"
+
+config VIDEO_SAMSUNG_MEMSIZE_MFC_NORMAL
+ int "Memory size in kbytes for MFC Normal"
+ depends on VIDEO_MFC5X && EXYNOS_CONTENT_PATH_PROTECTION
+ default "41984"
+
+config VIDEO_SAMSUNG_MEMSIZE_JPEG
+ int "Memory size in kbytes for JPEG"
+ depends on VIDEO_JPEG || (VIDEO_JPEG_V2X && (CPU_EXYNOS4212 || CPU_EXYNOS4412))
+ default "40960"
+
+config VIDEO_SAMSUNG_MEMSIZE_TVOUT
+ int "Memory size in kbytes for TVOUT"
+ depends on VIDEO_TVOUT
+ default "16384"
+endif
diff --git a/drivers/media/video/samsung/Makefile b/drivers/media/video/samsung/Makefile
new file mode 100644
index 0000000..301cd9a
--- /dev/null
+++ b/drivers/media/video/samsung/Makefile
@@ -0,0 +1,18 @@
+obj-$(CONFIG_VIDEO_FIMC) += fimc/
+obj-$(CONFIG_VIDEO_JPEG) += jpeg/
+obj-$(CONFIG_VIDEO_JPEG_V2X) += jpeg_v2x/
+obj-$(CONFIG_VIDEO_TVOUT) += tvout/
+obj-$(CONFIG_VIDEO_MFC5X) += mfc5x/
+
+ifeq ($(CONFIG_ARCH_EXYNOS4), y)
+obj-$(CONFIG_VIDEO_FIMG2D3X) += fimg2d3x-exynos4/
+obj-$(CONFIG_VIDEO_FIMG2D4X) += fimg2d4x-exynos4/
+else
+obj-$(CONFIG_VIDEO_FIMG2D3X) += fimg2d3x/
+obj-$(CONFIG_VIDEO_FIMG2D4X) += fimg2d4x/
+endif
+obj-$(CONFIG_VIDEO_UMP) += ump/
+obj-$(CONFIG_VIDEO_TSI) += tsi/
+obj-$(CONFIG_VIDEO_MALI400MP) += mali/
+
+EXTRA_CFLAGS += -Idrivers/media/video
diff --git a/drivers/media/video/samsung/fimc/Kconfig b/drivers/media/video/samsung/fimc/Kconfig
new file mode 100644
index 0000000..68f0b14
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/Kconfig
@@ -0,0 +1,59 @@
+config VIDEO_FIMC
+ bool "Samsung Camera Interface (FIMC) driver"
+ depends on VIDEO_SAMSUNG && ARCH_EXYNOS4
+ select VIDEO_IPC if ARCH_S5PV210
+ default n
+ help
+ This is a video4linux driver for Samsung FIMC device.
+
+choice
+depends on VIDEO_FIMC
+prompt "Select CSC Range config"
+default VIDEO_FIMC_RANGE_NARROW
+config VIDEO_FIMC_RANGE_NARROW
+ bool "Narrow"
+ depends on VIDEO_FIMC && (ARCH_S5PV210 || ARCH_EXYNOS4)
+ ---help---
+ RGB <-> YUV Color Conversion Narrow Range Equation
+
+config VIDEO_FIMC_RANGE_WIDE
+ bool "Wide"
+ depends on VIDEO_FIMC && (ARCH_S5PV210 || ARCH_EXYNOS4)
+ ---help---
+ RGB <-> YUV Color Conversion Wide Range Equation
+endchoice
+
+config VIDEO_IPC
+ bool
+
+config VIDEO_FIMC_DEBUG
+ bool "FIMC driver debug messages"
+ depends on VIDEO_FIMC
+
+config VIDEO_FIMC_MIPI
+ bool "MIPI-CSI2 Slave Interface support"
+ depends on VIDEO_FIMC && (ARCH_S5PV210 || ARCH_EXYNOS4)
+ default y
+
+config VIDEO_FIMC_MIPI_IRQ_DEBUG
+ bool "FIMC MIPI Error interrupt message"
+ depends on VIDEO_FIMC_MIPI
+ default n
+
+choice
+depends on VIDEO_FIMC
+prompt "Select Output Mode"
+default VIDEO_FIMC_DMA_AUTO
+config VIDEO_FIMC_DMA_AUTO
+ bool "DMA AUTO MODE"
+ depends on VIDEO_FIMC
+ help
+ This enables support for FIMC1 DMA AUTO mode
+
+config VIDEO_FIMC_FIFO
+ bool "FIFO MODE"
+ depends on VIDEO_FIMC
+ help
+ This enables support for FIMC1 FIFO mode
+
+endchoice
diff --git a/drivers/media/video/samsung/fimc/Makefile b/drivers/media/video/samsung/fimc/Makefile
new file mode 100644
index 0000000..75d8750
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/Makefile
@@ -0,0 +1,22 @@
+obj-$(CONFIG_VIDEO_FIMC) += fimc_v4l2.o fimc_output.o fimc_overlay.o fimc_regs.o
+
+obj-$(CONFIG_CPU_EXYNOS4412) += fimc_capture.o fimc_dev.o
+obj-$(CONFIG_CPU_EXYNOS4212) += fimc_capture.o fimc_dev.o
+
+obj-$(CONFIG_CPU_EXYNOS4210) += fimc_capture_u1.o fimc_dev_u1.o
+obj-$(CONFIG_VIDEO_FIMC_MIPI) += csis.o
+obj-$(CONFIG_CPU_S5PV210) += ipc.o
+
+ifeq ($(CONFIG_CPU_S5PV210),y)
+EXTRA_CFLAGS += -DCONFIG_MIPI_CSI_ADV_FEATURE
+endif
+
+ifeq ($(CONFIG_ARCH_EXYNOS4),y)
+EXTRA_CFLAGS += -DCONFIG_MIPI_CSI_ADV_FEATURE
+endif
+
+EXTRA_CFLAGS += -Idrivers/media/video
+
+ifeq ($(CONFIG_VIDEO_FIMC_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/media/video/samsung/fimc/csis.c b/drivers/media/video/samsung/fimc/csis.c
new file mode 100644
index 0000000..1e27503
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/csis.c
@@ -0,0 +1,468 @@
+/* linux/drivers/media/video/samsung/csis.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co,. Ltd.
+ * http://www.samsung.com/
+ *
+ * MIPI-CSI2 Support file for FIMC driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/videodev2_exynos_camera.h>
+
+#include <linux/io.h>
+#include <linux/memory.h>
+#include <linux/slab.h>
+#include <plat/clock.h>
+#include <plat/regs-csis.h>
+#include <plat/csis.h>
+
+#include "csis.h"
+static s32 err_print_cnt;
+
+static struct s3c_csis_info *s3c_csis[S3C_CSIS_CH_NUM];
+
+static int s3c_csis_set_info(struct platform_device *pdev)
+{
+ s3c_csis[pdev->id] = (struct s3c_csis_info *)
+ kzalloc(sizeof(struct s3c_csis_info), GFP_KERNEL);
+ if (!s3c_csis[pdev->id]) {
+ err("no memory for configuration\n");
+ return -ENOMEM;
+ }
+
+ sprintf(s3c_csis[pdev->id]->name, "%s%d", S3C_CSIS_NAME, pdev->id);
+ s3c_csis[pdev->id]->nr_lanes = S3C_CSIS_NR_LANES;
+
+ return 0;
+}
+
+static void s3c_csis_reset(struct platform_device *pdev)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+ cfg |= S3C_CSIS_CONTROL_RESET;
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+}
+
+static void s3c_csis_set_nr_lanes(struct platform_device *pdev, int lanes)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_CONFIG);
+ cfg &= ~S3C_CSIS_CONFIG_NR_LANE_MASK;
+
+ if (lanes == 1)
+ cfg |= S3C_CSIS_CONFIG_NR_LANE_1;
+ else if (lanes == 2)
+ cfg |= S3C_CSIS_CONFIG_NR_LANE_2;
+ else if (lanes == 3)
+ cfg |= S3C_CSIS_CONFIG_NR_LANE_3;
+ else if (lanes == 4)
+ cfg |= S3C_CSIS_CONFIG_NR_LANE_4;
+ else
+ err("%d is not supported lane\n", lanes);
+
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_CONFIG);
+}
+
+static void s3c_csis_enable_interrupt(struct platform_device *pdev)
+{
+ u32 cfg = 0;
+
+ /* enable all interrupts */
+ cfg |= S3C_CSIS_INTMSK_EVEN_BEFORE_ENABLE | \
+ S3C_CSIS_INTMSK_EVEN_AFTER_ENABLE | \
+ S3C_CSIS_INTMSK_ODD_BEFORE_ENABLE | \
+ S3C_CSIS_INTMSK_ODD_AFTER_ENABLE | \
+ S3C_CSIS_INTMSK_ERR_SOT_HS_ENABLE | \
+ S3C_CSIS_INTMSK_ERR_LOST_FS_ENABLE | \
+ S3C_CSIS_INTMSK_ERR_LOST_FE_ENABLE | \
+ S3C_CSIS_INTMSK_ERR_OVER_ENABLE |\
+ S3C_CSIS_INTMSK_ERR_ECC_ENABLE | \
+ S3C_CSIS_INTMSK_ERR_CRC_ENABLE | \
+ S3C_CSIS_INTMSK_ERR_ID_ENABLE;
+
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_INTMSK);
+}
+
+static void s3c_csis_disable_interrupt(struct platform_device *pdev)
+{
+ /* disable all interrupts */
+ writel(0, s3c_csis[pdev->id]->regs + S3C_CSIS_INTMSK);
+}
+
+static void s3c_csis_system_on(struct platform_device *pdev)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+ cfg |= S3C_CSIS_CONTROL_ENABLE;
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+}
+
+static void s3c_csis_system_off(struct platform_device *pdev)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+ cfg &= ~S3C_CSIS_CONTROL_ENABLE;
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+}
+
+static void s3c_csis_phy_on(struct platform_device *pdev)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_DPHYCTRL);
+ cfg |= S3C_CSIS_DPHYCTRL_ENABLE;
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_DPHYCTRL);
+}
+
+static void s3c_csis_phy_off(struct platform_device *pdev)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_DPHYCTRL);
+ cfg &= ~S3C_CSIS_DPHYCTRL_ENABLE;
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_DPHYCTRL);
+}
+
+#ifdef CONFIG_MIPI_CSI_ADV_FEATURE
+static void s3c_csis_update_shadow(struct platform_device *pdev)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+ cfg |= S3C_CSIS_CONTROL_UPDATE_SHADOW;
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+}
+
+static void s3c_csis_set_data_align(struct platform_device *pdev, int align)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+ cfg &= ~S3C_CSIS_CONTROL_ALIGN_MASK;
+
+ if (align == 24)
+ cfg |= S3C_CSIS_CONTROL_ALIGN_24BIT;
+ else
+ cfg |= S3C_CSIS_CONTROL_ALIGN_32BIT;
+
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+}
+
+static void s3c_csis_set_wclk(struct platform_device *pdev, int extclk)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+ cfg &= ~S3C_CSIS_CONTROL_WCLK_MASK;
+
+ if (extclk)
+ cfg |= S3C_CSIS_CONTROL_WCLK_EXTCLK;
+ else
+ cfg |= S3C_CSIS_CONTROL_WCLK_PCLK;
+
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_CONTROL);
+}
+
+static void s3c_csis_set_format(struct platform_device *pdev, enum mipi_format fmt)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_CONFIG);
+ cfg &= ~S3C_CSIS_CONFIG_FORMAT_MASK;
+ cfg |= (fmt << S3C_CSIS_CONFIG_FORMAT_SHIFT);
+
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_CONFIG);
+}
+
+static void s3c_csis_set_resol(struct platform_device *pdev, int width, int height)
+{
+ u32 cfg = 0;
+
+ cfg |= width << S3C_CSIS_RESOL_HOR_SHIFT;
+ cfg |= height << S3C_CSIS_RESOL_VER_SHIFT;
+
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_RESOL);
+}
+
+static void s3c_csis_set_hs_settle(struct platform_device *pdev, int settle)
+{
+ u32 cfg;
+
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_DPHYCTRL);
+ cfg &= ~S3C_CSIS_DPHYCTRL_HS_SETTLE_MASK;
+ cfg |= (settle << S3C_CSIS_DPHYCTRL_HS_SETTLE_SHIFT);
+
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_DPHYCTRL);
+}
+#endif
+
+int s3c_csis_get_pkt(int csis_id, void *pktdata)
+{
+ memcpy(pktdata, s3c_csis[csis_id]->bufs.pktdata, CSIS_PKTSIZE);
+ return 0;
+}
+
+void s3c_csis_enable_pktdata(int csis_id, bool enable)
+{
+ s3c_csis[csis_id]->pktdata_enable = enable;
+}
+
+void s3c_csis_start(int csis_id, int lanes, int settle, int align, int width, \
+ int height, int pixel_format)
+{
+ struct platform_device *pdev = NULL;
+ struct s3c_platform_csis *pdata = NULL;
+ int i;
+
+ printk(KERN_INFO "csis width = %d, height = %d\n", width, height);
+
+ memset(&s3c_csis[csis_id]->bufs, 0, sizeof(s3c_csis[csis_id]->bufs));
+
+ /* clock & power on */
+ pdev = to_platform_device(s3c_csis[csis_id]->dev);
+ pdata = to_csis_plat(&pdev->dev);
+
+ if (pdata->clk_on)
+ pdata->clk_on(to_platform_device(s3c_csis[csis_id]->dev),
+ &s3c_csis[csis_id]->clock);
+ if (pdata->cfg_phy_global)
+ pdata->cfg_phy_global(1);
+
+ s3c_csis_reset(pdev);
+ s3c_csis_set_nr_lanes(pdev, lanes);
+
+#ifdef CONFIG_MIPI_CSI_ADV_FEATURE
+ /* FIXME: how configure the followings with FIMC dynamically? */
+ s3c_csis_set_hs_settle(pdev, settle); /* s5k6aa */
+ s3c_csis_set_data_align(pdev, align);
+ s3c_csis_set_wclk(pdev, 1);
+ if (pixel_format == V4L2_PIX_FMT_JPEG ||
+ pixel_format == V4L2_PIX_FMT_INTERLEAVED) {
+ printk(KERN_INFO "%s V4L2_PIX_FMT_JPEG or INTERLEAVED\n", __func__);
+ s3c_csis_set_format(pdev, MIPI_USER_DEF_PACKET_1);
+ } else if (pixel_format == V4L2_PIX_FMT_SGRBG10)
+ s3c_csis_set_format(pdev, MIPI_CSI_RAW10);
+ else
+ s3c_csis_set_format(pdev, MIPI_CSI_YCBCR422_8BIT);
+ s3c_csis_set_resol(pdev, width, height);
+ s3c_csis_update_shadow(pdev);
+#endif
+
+ s3c_csis_enable_interrupt(pdev);
+ s3c_csis_system_on(pdev);
+ s3c_csis_phy_on(pdev);
+
+ err_print_cnt = 0;
+ info("Samsung MIPI-CSIS%d operation started\n", pdev->id);
+}
+
+void s3c_csis_stop(int csis_id)
+{
+ struct platform_device *pdev = NULL;
+ struct s3c_platform_csis *pdata = NULL;
+
+ pdev = to_platform_device(s3c_csis[csis_id]->dev);
+ pdata = to_csis_plat(&pdev->dev);
+
+ s3c_csis_disable_interrupt(pdev);
+ s3c_csis_system_off(pdev);
+ s3c_csis_phy_off(pdev);
+ s3c_csis[csis_id]->pktdata_enable = 0;
+
+ if (pdata->cfg_phy_global)
+ pdata->cfg_phy_global(0);
+
+ if (pdata->clk_off) {
+ if (s3c_csis[csis_id]->clock != NULL)
+ pdata->clk_off(pdev, &s3c_csis[csis_id]->clock);
+ }
+}
+
+static irqreturn_t s3c_csis_irq(int irq, void *dev_id)
+{
+ u32 cfg;
+
+ struct platform_device *pdev = (struct platform_device *) dev_id;
+ int bufnum = 0;
+ /* just clearing the pends */
+ cfg = readl(s3c_csis[pdev->id]->regs + S3C_CSIS_INTSRC);
+ writel(cfg, s3c_csis[pdev->id]->regs + S3C_CSIS_INTSRC);
+ /* receiving non-image data is not error */
+ cfg &= 0xFFFFFFFF;
+
+#ifdef CONFIG_VIDEO_FIMC_MIPI_IRQ_DEBUG
+ if (unlikely(cfg & S3C_CSIS_INTSRC_ERR)) {
+ if (err_print_cnt < 30) {
+ err("csis error interrupt[%d]: %#x\n", err_print_cnt, cfg);
+ err_print_cnt++;
+ }
+ }
+#endif
+ if(s3c_csis[pdev->id]->pktdata_enable) {
+ if (unlikely(cfg & S3C_CSIS_INTSRC_NON_IMAGE_DATA)) {
+ /* printk(KERN_INFO "%s NON Image Data bufnum = %d 0x%x\n", __func__, bufnum, cfg); */
+
+ if (cfg & S3C_CSIS_INTSRC_EVEN_BEFORE) {
+ /* printk(KERN_INFO "S3C_CSIS_INTSRC_EVEN_BEFORE\n"); */
+ memcpy_fromio(s3c_csis[pdev->id]->bufs.pktdata,
+ (s3c_csis[pdev->id]->regs + S3C_CSIS_PKTDATA_EVEN), CSIS_PKTSIZE);
+ } else if (cfg & S3C_CSIS_INTSRC_EVEN_AFTER) {
+ /* printk(KERN_INFO "S3C_CSIS_INTSRC_EVEN_AFTER\n"); */
+ memcpy_fromio(s3c_csis[pdev->id]->bufs.pktdata,
+ (s3c_csis[pdev->id]->regs + S3C_CSIS_PKTDATA_EVEN), CSIS_PKTSIZE);
+ } else if (cfg & S3C_CSIS_INTSRC_ODD_BEFORE) {
+ /* printk(KERN_INFO "S3C_CSIS_INTSRC_ODD_BEFORE\n"); */
+ memcpy_fromio(s3c_csis[pdev->id]->bufs.pktdata,
+ (s3c_csis[pdev->id]->regs + S3C_CSIS_PKTDATA_ODD), CSIS_PKTSIZE);
+ } else if (cfg & S3C_CSIS_INTSRC_ODD_AFTER) {
+ /* printk(KERN_INFO "S3C_CSIS_INTSRC_ODD_AFTER\n"); */
+ memcpy_fromio(s3c_csis[pdev->id]->bufs.pktdata,
+ (s3c_csis[pdev->id]->regs + S3C_CSIS_PKTDATA_ODD), CSIS_PKTSIZE);
+ }
+ /* printk(KERN_INFO "0x%x\n", s3c_csis[pdev->id]->bufs.pktdata[0x2c/4]); */
+ /* printk(KERN_INFO "0x%x\n", s3c_csis[pdev->id]->bufs.pktdata[0x30/4]); */
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int s3c_csis_probe(struct platform_device *pdev)
+{
+ struct s3c_platform_csis *pdata;
+ struct resource *res;
+ int ret = 0;
+
+ ret = s3c_csis_set_info(pdev);
+
+ s3c_csis[pdev->id]->dev = &pdev->dev;
+
+ pdata = to_csis_plat(&pdev->dev);
+ if (pdata->cfg_gpio)
+ pdata->cfg_gpio();
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err("failed to get io memory region\n");
+ ret = -ENOENT;
+ goto err_info;
+ }
+
+ s3c_csis[pdev->id]->regs_res = request_mem_region(res->start,
+ resource_size(res), pdev->name);
+ if (!s3c_csis[pdev->id]->regs_res) {
+ err("failed to request io memory region\n");
+ ret = -ENOENT;
+ goto err_info;
+ }
+
+ /* ioremap for register block */
+ s3c_csis[pdev->id]->regs = ioremap(res->start, resource_size(res));
+ if (!s3c_csis[pdev->id]->regs) {
+ err("failed to remap io region\n");
+ ret = -ENXIO;
+ goto err_req_region;
+ }
+
+ /* irq */
+ s3c_csis[pdev->id]->irq = platform_get_irq(pdev, 0);
+ ret = request_irq(s3c_csis[pdev->id]->irq, s3c_csis_irq, IRQF_DISABLED,
+ s3c_csis[pdev->id]->name, pdev);
+ if (ret) {
+ err("request_irq failed\n");
+ goto err_regs_unmap;
+ }
+
+ info("Samsung MIPI-CSIS%d driver probed successfully\n", pdev->id);
+
+ return 0;
+
+err_regs_unmap:
+ iounmap(s3c_csis[pdev->id]->regs);
+err_req_region:
+ release_resource(s3c_csis[pdev->id]->regs_res);
+ kfree(s3c_csis[pdev->id]->regs_res);
+err_info:
+ kfree(s3c_csis[pdev->id]);
+
+ return ret;
+}
+
+static int s3c_csis_remove(struct platform_device *pdev)
+{
+ s3c_csis_stop(pdev->id);
+
+ free_irq(s3c_csis[pdev->id]->irq, s3c_csis[pdev->id]);
+ iounmap(s3c_csis[pdev->id]->regs);
+ release_resource(s3c_csis[pdev->id]->regs_res);
+
+ kfree(s3c_csis[pdev->id]);
+
+ return 0;
+}
+
+/* sleep */
+int s3c_csis_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct s3c_platform_csis *pdata = NULL;
+ pdata = to_csis_plat(&pdev->dev);
+
+ return 0;
+}
+
+/* wakeup */
+int s3c_csis_resume(struct platform_device *pdev)
+{
+ struct s3c_platform_csis *pdata = NULL;
+ pdata = to_csis_plat(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver s3c_csis_driver = {
+ .probe = s3c_csis_probe,
+ .remove = s3c_csis_remove,
+ .suspend = s3c_csis_suspend,
+ .resume = s3c_csis_resume,
+ .driver = {
+ .name = "s3c-csis",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int s3c_csis_register(void)
+{
+ return platform_driver_register(&s3c_csis_driver);
+}
+
+static void s3c_csis_unregister(void)
+{
+ platform_driver_unregister(&s3c_csis_driver);
+}
+
+module_init(s3c_csis_register);
+module_exit(s3c_csis_unregister);
+
+MODULE_AUTHOR("Jinsung, Yang <jsgood.yang@samsung.com>");
+MODULE_AUTHOR("Sewoon, Park <seuni.park@samsung.com>");
+MODULE_AUTHOR("Sungchun, Kang<sungchun.kang@samsung.com>");
+MODULE_DESCRIPTION("MIPI-CSI2 support for FIMC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/fimc/csis.h b/drivers/media/video/samsung/fimc/csis.h
new file mode 100644
index 0000000..fe0d689
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/csis.h
@@ -0,0 +1,60 @@
+/* linux/drivers/media/video/samsung/csis.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co,. Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file for Samsung MIPI-CSI2 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __CSIS_H
+#define __CSIS_H __FILE__
+
+#define S3C_CSIS_NAME "s3c-csis"
+#define S3C_CSIS_NR_LANES 1
+
+#define CSIS_PKTSIZE 0x1000
+
+#ifdef CONFIG_ARCH_EXYNOS4
+#define S3C_CSIS_CH_NUM 2
+#else
+#define S3C_CSIS_CH_NUM 1
+#endif
+
+#define info(args...) \
+ do { printk(KERN_INFO S3C_CSIS_NAME ": " args); } while (0)
+#define err(args...) \
+ do { printk(KERN_ERR S3C_CSIS_NAME ": " args); } while (0)
+
+enum mipi_format {
+ MIPI_CSI_YCBCR422_8BIT = 0x1e,
+ MIPI_CSI_RAW8 = 0x2a,
+ MIPI_CSI_RAW10 = 0x2b,
+ MIPI_CSI_RAW12 = 0x2c,
+ MIPI_USER_DEF_PACKET_1 = 0x30, /* User defined Byte-based packet 1 */
+};
+
+struct csis_pkt_set {
+ int id;
+ u32 pktdata[1024];
+ struct list_head list;
+};
+
+struct s3c_csis_info {
+ char name[16];
+ struct device *dev;
+ struct clk *clock;
+ void __iomem *regs;
+ struct resource *regs_res;
+ int irq;
+ int nr_lanes;
+ bool pktdata_enable;
+
+ struct csis_pkt_set bufs;
+ spinlock_t csis_spinlock;
+};
+
+#endif /* __CSIS_H */
diff --git a/drivers/media/video/samsung/fimc/fimc-ipc.h b/drivers/media/video/samsung/fimc/fimc-ipc.h
new file mode 100644
index 0000000..8e85d6e
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc-ipc.h
@@ -0,0 +1,146 @@
+/* linux/drivers/media/video/samsung/fimc/fimc-ipc.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file for Samsung IPC driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMC_IPC_H
+#define __FIMC_IPC_H __FILE__
+
+#define IPC_NAME "s3c-ipc"
+#define IPC_CLK_NAME "ipc"
+
+#define OFF 0
+#define ON 1
+
+#define IN_SC_MAX_WIDTH 1024
+#define IN_SC_MAX_HEIGHT 768
+
+#define ipc_err(args...) do { printk(KERN_ERR IPC_NAME ": " args); } while (0)
+
+enum ipc_enoff {
+ DISABLED,
+ ENABLED
+};
+
+enum ipc_field_id {
+ IPC_TOP_FIELD,
+ IPC_BOTTOM_FIELD
+};
+
+enum ipc_field_id_sel {
+ INTERNAL,
+ CAM_FIELD_SIG
+};
+
+enum ipc_field_id_togl {
+ BYUSER,
+ AUTO
+};
+
+enum ipc_2d {
+ IPC_HDS, /* Horizontal Double scaling */
+ IPC_2D /* 2D IPC */
+};
+
+enum scan_mode {
+ PROGRESSIVE
+};
+
+enum ipc_sharpness {
+ NO_EFFECT,
+ MIN_EDGE,
+ MODERATE_EDGE,
+ MAX_EDGE
+};
+
+enum ipc_pp_lineeq_val {
+ IPC_PP_LINEEQ_0 = 0,
+ IPC_PP_LINEEQ_1,
+ IPC_PP_LINEEQ_2,
+ IPC_PP_LINEEQ_3,
+ IPC_PP_LINEEQ_4,
+ IPC_PP_LINEEQ_5,
+ IPC_PP_LINEEQ_6,
+ IPC_PP_LINEEQ_7,
+ IPC_PP_LINEEQ_ALL
+};
+
+enum ipc_filter_h_pp {
+ /* Don't change the order and the value */
+ IPC_PP_H_NORMAL = 0,
+ IPC_PP_H_8_9, /* 720 to 640 */
+ IPC_PP_H_1_2,
+ IPC_PP_H_1_3,
+ IPC_PP_H_1_4
+};
+
+enum ipc_filter_v_pp{
+ /* Don't change the order and the value */
+ IPC_PP_V_NORMAL = 0,
+ IPC_PP_V_5_6, /* PAL to NTSC */
+ IPC_PP_V_3_4,
+ IPC_PP_V_1_2,
+ IPC_PP_V_1_3,
+ IPC_PP_V_1_4
+};
+
+struct ipc_source{
+ u32 srcstaddr;
+ u32 imghsz;
+ u32 imgvsz;
+ u32 srcxpos;
+ u32 srcypos;
+ u32 srchsz;
+ u32 srcvsz;
+ u32 srcnumoffrm;
+ u32 lastfrmbufidx;
+};
+
+struct ipc_destination {
+ enum scan_mode scanmode;
+ u32 orgdsthsz;
+ u32 orgdstvsz;
+ u32 dstxpos;
+ u32 dstypos;
+ u32 dsthsz;
+ u32 dstvsz;
+};
+
+struct ipc_controlvariable {
+ u32 modeval;
+ u32 lineeqval;
+ u32 scanconversionidx;
+};
+
+struct ipc_enhancingvariable {
+ u32 contrast[8];
+ u32 brightness[8];
+ u32 saturation;
+ enum ipc_sharpness sharpness;
+ u32 thhnoise;
+ u32 brightoffset;
+};
+
+struct ipc_control {
+ char name[16];
+ void __iomem *regs;
+ struct clk *clk;
+ struct device *dev;
+ struct ipc_source src;
+ struct ipc_destination dst;
+ struct ipc_controlvariable control_var;
+ struct ipc_enhancingvariable enhance_var;
+};
+
+extern int ipc_init(u32 input_width, u32 input_height, enum ipc_2d ipc2d);
+extern void ipc_start(void);
+extern void ipc_stop(void);
+
+#endif /* __FIMC_IPC_H */
diff --git a/drivers/media/video/samsung/fimc/fimc.h b/drivers/media/video/samsung/fimc/fimc.h
new file mode 100644
index 0000000..9527f52
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc.h
@@ -0,0 +1,770 @@
+/* linux/drivers/media/video/samsung/fimc/fimc.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+
+#ifndef __FIMC_H
+#define __FIMC_H __FILE__
+
+#ifdef __KERNEL__
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include <linux/fb.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf-core.h>
+#include <media/v4l2-mediabus.h>
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+#include <mach/dev.h>
+#endif
+#include <plat/media.h>
+#include <plat/fimc.h>
+#include <plat/cpu.h>
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+
+#define FIMC_NAME "s3c-fimc"
+#define FIMC_CMA_NAME "fimc"
+
+#define FIMC_CORE_CLK "sclk_fimc"
+#define FIMC_CLK_RATE 166750000
+#define EXYNOS_BUSFREQ_NAME "exynos-busfreq"
+
+#if defined(CONFIG_ARCH_EXYNOS4)
+#define FIMC_DEVICES 4
+#define FIMC_PHYBUFS 32
+#define FIMC_MAXCAMS 7
+#else
+#define FIMC_DEVICES 3
+#define FIMC_PHYBUFS 4
+#define FIMC_MAXCAMS 5
+#endif
+
+#define FIMC_SUBDEVS 3
+#define FIMC_OUTBUFS 3
+#define FIMC_INQUEUES 10
+#define FIMC_MAX_CTXS 4
+#define FIMC_TPID 3
+#define FIMC_CAPBUFS 32
+#define FIMC_ONESHOT_TIMEOUT 200
+#define FIMC_DQUEUE_TIMEOUT 1000
+
+#define FIMC_FIFOOFF_CNT 1000000 /* Sufficiently big value for stop */
+
+#define FORMAT_FLAGS_PACKED 0x1
+#define FORMAT_FLAGS_PLANAR 0x2
+
+#define FIMC_ADDR_Y 0
+#define FIMC_ADDR_CB 1
+#define FIMC_ADDR_CR 2
+
+#define FIMC_HD_WIDTH 1280
+#define FIMC_HD_HEIGHT 720
+
+#define FIMC_FHD_WIDTH 1920
+#define FIMC_FHD_HEIGHT 1080
+
+#define FIMC_MMAP_IDX -1
+#define FIMC_USERPTR_IDX -2
+
+#define FIMC_HCLK 0
+#define FIMC_SCLK 1
+#define CSI_CH_0 0
+#define CSI_CH_1 1
+#if defined(CONFIG_VIDEO_FIMC_FIFO)
+#define FIMC_OVLY_MODE FIMC_OVLY_FIFO
+#elif defined(CONFIG_VIDEO_FIMC_DMA_AUTO)
+#define FIMC_OVLY_MODE FIMC_OVLY_DMA_AUTO
+#endif
+
+#define PINGPONG_2ADDR_MODE
+#if defined(PINGPONG_2ADDR_MODE)
+#define FIMC_PINGPONG 2
+#endif
+
+#define check_bit(data, loc) ((data) & (0x1<<(loc)))
+#define FRAME_SEQ 0xf
+
+#define fimc_cam_use ((pdata->use_cam) ? 1 : 0)
+
+#define L2_FLUSH_ALL SZ_1M
+#define L1_FLUSH_ALL SZ_64K
+
+/*
+ * ENUMERATIONS
+*/
+enum fimc_status {
+ FIMC_READY_OFF = 0x00,
+ FIMC_STREAMOFF = 0x01,
+ FIMC_READY_ON = 0x02,
+ FIMC_STREAMON = 0x03,
+ FIMC_STREAMON_IDLE = 0x04, /* oneshot mode */
+ FIMC_OFF_SLEEP = 0x05,
+ FIMC_ON_SLEEP = 0x06,
+ FIMC_ON_IDLE_SLEEP = 0x07, /* oneshot mode */
+ FIMC_READY_RESUME = 0x08,
+ FIMC_BUFFER_STOP = 0x09,
+ FIMC_BUFFER_START = 0x0A,
+};
+
+enum fimc_fifo_state {
+ FIFO_CLOSE,
+ FIFO_SLEEP,
+};
+
+enum fimc_fimd_state {
+ FIMD_OFF,
+ FIMD_ON,
+};
+
+enum fimc_rot_flip {
+ FIMC_XFLIP = 0x01,
+ FIMC_YFLIP = 0x02,
+ FIMC_ROT = 0x10,
+};
+
+enum fimc_input {
+ FIMC_SRC_CAM,
+ FIMC_SRC_MSDMA,
+};
+
+enum fimc_overlay_mode {
+ FIMC_OVLY_NOT_FIXED = 0x0, /* Overlay mode isn't fixed. */
+ FIMC_OVLY_FIFO = 0x1, /* Non-destructive Overlay with FIFO */
+ FIMC_OVLY_DMA_AUTO = 0x2, /* Non-destructive Overlay with DMA */
+ FIMC_OVLY_DMA_MANUAL = 0x3, /* Non-destructive Overlay with DMA */
+ FIMC_OVLY_NONE_SINGLE_BUF = 0x4, /* Destructive Overlay with DMA single destination buffer */
+ FIMC_OVLY_NONE_MULTI_BUF = 0x5, /* Destructive Overlay with DMA multiple dstination buffer */
+};
+
+enum fimc_autoload {
+ FIMC_AUTO_LOAD,
+ FIMC_ONE_SHOT,
+};
+
+enum fimc_log {
+ FIMC_LOG_DEBUG = 0x1000,
+ FIMC_LOG_INFO_L2 = 0x0200,
+ FIMC_LOG_INFO_L1 = 0x0100,
+ FIMC_LOG_WARN = 0x0010,
+ FIMC_LOG_ERR = 0x0001,
+};
+
+enum fimc_range {
+ FIMC_RANGE_NARROW = 0x0,
+ FIMC_RANGE_WIDE = 0x1,
+};
+
+enum fimc_pixel_format_type{
+ FIMC_RGB,
+ FIMC_YUV420,
+ FIMC_YUV422,
+ FIMC_YUV444,
+};
+
+enum fimc_framecnt_seq {
+ FIMC_FRAMECNT_SEQ_DISABLE,
+ FIMC_FRAMECNT_SEQ_ENABLE,
+};
+
+enum fimc_sysmmu_flag {
+ FIMC_SYSMMU_OFF,
+ FIMC_SYSMMU_ON,
+};
+
+enum fimc_id {
+ FIMC0 = 0x0,
+ FIMC1 = 0x1,
+ FIMC2 = 0x2,
+ FIMC3 = 0x3,
+};
+
+enum fimc_power_status {
+ FIMC_POWER_OFF,
+ FIMC_POWER_ON,
+ FIMC_POWER_SUSPEND,
+};
+
+enum cam_mclk_status {
+ CAM_MCLK_OFF,
+ CAM_MCLK_ON,
+};
+
+/*
+ * STRUCTURES
+*/
+
+/* for reserved memory */
+struct fimc_meminfo {
+ dma_addr_t base; /* buffer base */
+ size_t size; /* total length */
+ dma_addr_t curr; /* current addr */
+ dma_addr_t vaddr_base; /* buffer base */
+ dma_addr_t vaddr_curr; /* current addr */
+};
+
+struct fimc_buf {
+ dma_addr_t base[3];
+ size_t length[3];
+};
+
+struct fimc_overlay_buf {
+ u32 vir_addr[3];
+ size_t size[3];
+ u32 phy_addr[3];
+};
+
+struct fimc_overlay {
+ enum fimc_overlay_mode mode;
+ struct fimc_overlay_buf buf;
+ s32 req_idx;
+};
+
+/* general buffer */
+struct fimc_buf_set {
+ int id;
+ dma_addr_t base[4];
+ dma_addr_t vaddr_base[4];
+ size_t length[4];
+ size_t garbage[4];
+ enum videobuf_state state;
+ u32 flags;
+ atomic_t mapped_cnt;
+ dma_addr_t paddr_pktdata;
+ u32 *vaddr_pktdata;
+ struct list_head list;
+};
+
+/* for capture device */
+struct fimc_capinfo {
+ struct v4l2_cropcap cropcap;
+ struct v4l2_rect crop;
+ struct v4l2_pix_format fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct fimc_buf_set bufs[FIMC_CAPBUFS];
+ /* using c110 */
+ struct list_head inq;
+ int outq[FIMC_PHYBUFS];
+ /* using c210 */
+ struct list_head outgoing_q;
+ int nr_bufs;
+ int irq;
+ int lastirq;
+
+ bool cacheable;
+ bool pktdata_enable;
+ u32 pktdata_size;
+ u32 pktdata_plane;
+
+ u32 cnt;
+ u32 poll_cnt;
+
+ /* flip: V4L2_CID_xFLIP, rotate: 90, 180, 270 */
+ u32 flip;
+ u32 rotate;
+ u32 dtp_mode;
+ u32 movie_mode;
+ u32 vt_mode;
+ u32 sensor_output_width;
+ u32 sensor_output_height;
+};
+
+/* for output overlay device */
+struct fimc_idx {
+ int ctx;
+ int idx;
+};
+
+struct fimc_ctx_idx {
+ struct fimc_idx prev;
+ struct fimc_idx active;
+ struct fimc_idx next;
+};
+
+/* scaler abstraction: local use recommended */
+struct fimc_scaler {
+ u32 bypass;
+ u32 hfactor;
+ u32 vfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ u32 pre_dst_width;
+ u32 pre_dst_height;
+ u32 scaleup_h;
+ u32 scaleup_v;
+ u32 main_hratio;
+ u32 main_vratio;
+ u32 real_width;
+ u32 real_height;
+ u32 shfactor;
+ u32 skipline;
+};
+
+struct fimc_ctx {
+ u32 ctx_num;
+ struct v4l2_cropcap cropcap;
+ struct v4l2_rect crop;
+ struct v4l2_pix_format pix;
+ struct v4l2_window win;
+ struct v4l2_framebuffer fbuf;
+ struct fimc_scaler sc;
+ struct fimc_overlay overlay;
+
+ u32 buf_num;
+ u32 is_requested;
+ struct fimc_buf_set src[FIMC_OUTBUFS];
+ struct fimc_buf_set dst[FIMC_OUTBUFS];
+ s32 inq[FIMC_OUTBUFS];
+ s32 outq[FIMC_OUTBUFS];
+
+ u32 flip;
+ u32 rotate;
+ enum fimc_status status;
+};
+
+struct fimc_outinfo {
+ int last_ctx;
+ spinlock_t lock_in;
+ spinlock_t lock_out;
+ spinlock_t slock;
+ struct fimc_idx inq[FIMC_INQUEUES];
+ struct fimc_ctx ctx[FIMC_MAX_CTXS];
+ bool ctx_used[FIMC_MAX_CTXS];
+ struct fimc_ctx_idx idxs;
+};
+
+struct s3cfb_user_window {
+ int x;
+ int y;
+};
+
+enum s3cfb_data_path_t {
+ DATA_PATH_FIFO = 0,
+ DATA_PATH_DMA = 1,
+ DATA_PATH_IPC = 2,
+};
+
+enum s3cfb_mem_owner_t {
+ DMA_MEM_NONE = 0,
+ DMA_MEM_FIMD = 1,
+ DMA_MEM_OTHER = 2,
+};
+#define S3CFB_WIN_OFF_ALL _IO('F', 202)
+#define S3CFB_WIN_POSITION _IOW('F', 203, struct s3cfb_user_window)
+#define S3CFB_GET_LCD_WIDTH _IOR('F', 302, int)
+#define S3CFB_GET_LCD_HEIGHT _IOR('F', 303, int)
+#define S3CFB_SET_WRITEBACK _IOW('F', 304, u32)
+#define S3CFB_SET_WIN_ON _IOW('F', 305, u32)
+#define S3CFB_SET_WIN_OFF _IOW('F', 306, u32)
+#define S3CFB_SET_WIN_PATH _IOW('F', 307, enum s3cfb_data_path_t)
+#define S3CFB_SET_WIN_ADDR _IOW('F', 308, unsigned long)
+#define S3CFB_SET_WIN_MEM _IOW('F', 309, enum s3cfb_mem_owner_t)
+/* ------------------------------------------------------------------------ */
+
+struct fimc_fbinfo {
+ struct fb_fix_screeninfo *fix;
+ struct fb_var_screeninfo *var;
+ int lcd_hres;
+ int lcd_vres;
+ u32 is_enable;
+ /* lcd fifo control */
+
+ int (*open_fifo)(int id, int ch, int (*do_priv)(void *), void *param);
+ int (*close_fifo)(int id, int (*do_priv)(void *), void *param);
+};
+
+struct fimc_limit {
+ u32 pre_dst_w;
+ u32 bypass_w;
+ u32 trg_h_no_rot;
+ u32 trg_h_rot;
+ u32 real_w_no_rot;
+ u32 real_h_rot;
+};
+
+enum FIMC_EFFECT_FIN {
+ FIMC_EFFECT_FIN_BYPASS = 0,
+ FIMC_EFFECT_FIN_ARBITRARY_CBCR,
+ FIMC_EFFECT_FIN_NEGATIVE,
+ FIMC_EFFECT_FIN_ART_FREEZE,
+ FIMC_EFFECT_FIN_EMBOSSING,
+ FIMC_EFFECT_FIN_SILHOUETTE,
+};
+
+
+struct fimc_effect {
+ int ie_on;
+ int ie_after_sc;
+ enum FIMC_EFFECT_FIN fin;
+ int pat_cb;
+ int pat_cr;
+};
+
+struct fimc_is {
+ struct v4l2_pix_format fmt;
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct v4l2_subdev *sd;
+ u32 frame_count;
+ u32 valid;
+ u32 bad_mark;
+ u32 offset_x;
+ u32 offset_y;
+ u32 zoom_in_width;
+ u32 zoom_in_height;
+};
+
+/* fimc controller abstration */
+struct fimc_control {
+ int id; /* controller id */
+ char name[16];
+ atomic_t in_use;
+ void __iomem *regs; /* register i/o */
+ struct clk *clk; /* interface clock */
+ struct fimc_meminfo mem; /* for reserved mem */
+ atomic_t irq_cnt; /* for interrupt cnt */
+ struct work_struct work_struct; /* for work queue */
+ struct workqueue_struct *fimc_irq_wq; /* for work queue */
+
+ /* kernel helpers */
+ struct mutex lock; /* controller lock */
+ struct mutex v4l2_lock;
+ spinlock_t outq_lock;
+ wait_queue_head_t wq;
+ struct device *dev;
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ struct device *bus_dev;
+#endif
+ int irq;
+
+ /* v4l2 related */
+ struct video_device *vd;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *flite_sd;
+ struct fimc_is is;
+ /* fimc specific */
+ struct fimc_limit *limit; /* H/W limitation */
+ struct s3c_platform_camera *cam; /* activated camera */
+ struct fimc_capinfo *cap; /* capture dev info */
+ struct fimc_outinfo *out; /* output dev info */
+ struct fimc_fbinfo fb; /* fimd info */
+ struct fimc_scaler sc; /* scaler info */
+ struct fimc_effect fe; /* fimc effect info */
+
+ enum fimc_status status;
+ enum fimc_log log;
+ enum fimc_range range;
+ /* for suspend mode */
+ int suspend_flag;
+ int suspend_framecnt;
+ enum fimc_sysmmu_flag sysmmu_flag;
+ enum fimc_power_status power_status;
+ struct timeval curr_time;
+ struct timeval before_time;
+ char cma_name[16];
+ bool restart;
+};
+
+/* global */
+struct fimc_global {
+ struct fimc_control ctrl[FIMC_DEVICES];
+ struct s3c_platform_camera *camera[FIMC_MAXCAMS];
+ int camera_isvalid[FIMC_MAXCAMS];
+ int active_camera;
+ int initialized;
+ enum cam_mclk_status mclk_status;
+ void __iomem *backup_regs[4];
+};
+
+struct fimc_prv_data {
+ struct fimc_control *ctrl;
+ int ctx_id;
+};
+
+/* debug macro */
+#define FIMC_LOG_DEFAULT (FIMC_LOG_WARN | FIMC_LOG_ERR)
+
+#define FIMC_DEBUG(fmt, ...) \
+ do { \
+ if (ctrl->log & FIMC_LOG_DEBUG) \
+ printk(KERN_DEBUG FIMC_NAME "%d: " \
+ fmt, ctrl->id, ##__VA_ARGS__); \
+ } while (0)
+
+#define FIMC_INFO_L2(fmt, ...) \
+ do { \
+ if (ctrl->log & FIMC_LOG_INFO_L2) \
+ printk(KERN_INFO FIMC_NAME "%d: " \
+ fmt, ctrl->id, ##__VA_ARGS__); \
+ } while (0)
+
+#define FIMC_INFO_L1(fmt, ...) \
+ do { \
+ if (ctrl->log & FIMC_LOG_INFO_L1) \
+ printk(KERN_INFO FIMC_NAME "%d: " \
+ fmt, ctrl->id, ##__VA_ARGS__); \
+ } while (0)
+
+#define FIMC_WARN(fmt, ...) \
+ do { \
+ if (ctrl->log & FIMC_LOG_WARN) \
+ printk(KERN_WARNING FIMC_NAME "%d: " \
+ fmt, ctrl->id, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define FIMC_ERROR(fmt, ...) \
+ do { \
+ if (ctrl->log & FIMC_LOG_ERR) \
+ printk(KERN_ERR FIMC_NAME "%d: " \
+ fmt, ctrl->id, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define fimc_dbg(fmt, ...) FIMC_DEBUG(fmt, ##__VA_ARGS__)
+#define fimc_info2(fmt, ...) FIMC_INFO_L2(fmt, ##__VA_ARGS__)
+#define fimc_info1(fmt, ...) FIMC_INFO_L1(fmt, ##__VA_ARGS__)
+#define fimc_warn(fmt, ...) FIMC_WARN(fmt, ##__VA_ARGS__)
+#define fimc_err(fmt, ...) FIMC_ERROR(fmt, ##__VA_ARGS__)
+
+/*
+ * EXTERNS
+*/
+extern struct fimc_global *fimc_dev;
+extern struct video_device fimc_video_device[FIMC_DEVICES];
+extern const struct v4l2_ioctl_ops fimc_v4l2_ops;
+extern struct fimc_limit fimc40_limits[FIMC_DEVICES];
+extern struct fimc_limit fimc43_limits[FIMC_DEVICES];
+extern struct fimc_limit fimc50_limits[FIMC_DEVICES];
+extern struct fimc_limit fimc51_limits[FIMC_DEVICES];
+
+/* FIMD */
+#ifdef CONFIG_FB_S5P /* Legacy FIMD */
+extern int s3cfb_direct_ioctl(int id, unsigned int cmd, unsigned long arg);
+extern int s3cfb_open_fifo(int id, int ch, int (*do_priv)(void *), void *param);
+extern int s3cfb_close_fifo(int id, int (*do_priv)(void *), void *param);
+#else /* Mainline FIMD */
+#ifdef CONFIG_DRM_EXYNOS_FIMD_WB
+extern int fimc_send_event(unsigned long val, void *v);
+static inline int s3cfb_direct_ioctl(int id, unsigned int cmd,
+unsigned long arg) { return fimc_send_event(cmd, (void *)arg); }
+#else
+static inline int s3cfb_direct_ioctl(int id, unsigned int cmd, unsigned long arg) { return 0; }
+#endif
+static inline int s3cfb_open_fifo(int id, int ch, int (*do_priv)(void *), void *param) { return 0; }
+static inline int s3cfb_close_fifo(int id, int (*do_priv)(void *), void *param) { return 0; }
+#endif
+
+/* general */
+extern void s3c_csis_start(int csis_id, int lanes, int settle, int align, int width, int height, int pixel_format);
+extern void s3c_csis_stop(int csis_id);
+extern int s3c_csis_get_pkt(int csis_id, void *pktdata);
+extern void s3c_csis_enable_pktdata(int csis_id, bool enable);
+extern int fimc_dma_alloc(struct fimc_control *ctrl, struct fimc_buf_set *bs, int i, int align);
+extern void fimc_dma_free(struct fimc_control *ctrl, struct fimc_buf_set *bs, int i);
+extern u32 fimc_mapping_rot_flip(u32 rot, u32 flip);
+extern int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift);
+extern void fimc_get_nv12t_size(int img_hres, int img_vres,
+ int *y_size, int *cb_size);
+extern int fimc_hwget_number_of_bits(u32 framecnt_seq);
+
+/* camera */
+extern int fimc_select_camera(struct fimc_control *ctrl);
+
+/* capture device */
+extern int fimc_enum_input(struct file *file, void *fh, struct v4l2_input *inp);
+extern int fimc_g_input(struct file *file, void *fh, unsigned int *i);
+extern int fimc_s_input(struct file *file, void *fh, unsigned int i);
+extern int fimc_enum_fmt_vid_capture(struct file *file, void *fh, struct v4l2_fmtdesc *f);
+extern int fimc_g_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f);
+extern int fimc_s_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f);
+extern int fimc_s_fmt_vid_private(struct file *file, void *fh, struct v4l2_format *f);
+extern int fimc_try_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f);
+extern int fimc_reqbufs_capture(void *fh, struct v4l2_requestbuffers *b);
+extern int fimc_querybuf_capture(void *fh, struct v4l2_buffer *b);
+extern int fimc_g_ctrl_capture(void *fh, struct v4l2_control *c);
+extern int fimc_g_ext_ctrls_capture(void *fh, struct v4l2_ext_controls *c);
+extern int fimc_s_ctrl_capture(void *fh, struct v4l2_control *c);
+extern int fimc_s_ext_ctrls_capture(void *fh, struct v4l2_ext_controls *c);
+#if defined(CONFIG_CPU_S5PV210)
+extern int fimc_change_clksrc(struct fimc_control *ctrl, int fimc_clk);
+#endif
+extern int fimc_cropcap_capture(void *fh, struct v4l2_cropcap *a);
+extern int fimc_g_crop_capture(void *fh, struct v4l2_crop *a);
+extern int fimc_s_crop_capture(void *fh, struct v4l2_crop *a);
+extern int fimc_streamon_capture(void *fh);
+extern int fimc_streamoff_capture(void *fh);
+extern int fimc_qbuf_capture(void *fh, struct v4l2_buffer *b);
+extern int fimc_dqbuf_capture(void *fh, struct v4l2_buffer *b);
+extern int fimc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a);
+extern int fimc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a);
+extern int fimc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc);
+extern int fimc_querymenu(struct file *file, void *fh, struct v4l2_querymenu *qm);
+extern int fimc_stop_capture(struct fimc_control *ctrl);
+extern int fimc_enum_framesizes(struct file *filp, void *fh, struct v4l2_frmsizeenum *fsize);
+extern int fimc_enum_frameintervals(struct file *filp, void *fh, struct v4l2_frmivalenum *fival);
+extern int fimc_release_subdev(struct fimc_control *ctrl);
+extern int fimc_is_release_subdev(struct fimc_control *ctrl);
+extern int fimc_is_set_zoom(struct fimc_control *ctrl, struct v4l2_control *c);
+/* output device */
+extern void fimc_outdev_set_src_addr(struct fimc_control *ctrl, dma_addr_t *base);
+extern int fimc_output_set_dst_addr(struct fimc_control *ctrl, struct fimc_ctx *ctx, int idx);
+extern int fimc_outdev_set_ctx_param(struct fimc_control *ctrl, struct fimc_ctx *ctx);
+extern int fimc_start_fifo(struct fimc_control *ctrl, struct fimc_ctx *ctx);
+extern int fimc_fimd_rect(const struct fimc_control *ctrl, const struct fimc_ctx *ctx, struct v4l2_rect *fimd_rect);
+extern int fimc_outdev_stop_streaming(struct fimc_control *ctrl, struct fimc_ctx *ctx);
+extern int fimc_outdev_resume_dma(struct fimc_control *ctrl, struct fimc_ctx *ctx);
+extern int fimc_outdev_start_camif(void *param);
+extern int fimc_reqbufs_output(void *fh, struct v4l2_requestbuffers *b);
+extern int fimc_querybuf_output(void *fh, struct v4l2_buffer *b);
+extern int fimc_g_ctrl_output(void *fh, struct v4l2_control *c);
+extern int fimc_s_ctrl_output(struct file *filp, void *fh, struct v4l2_control *c);
+extern int fimc_cropcap_output(void *fh, struct v4l2_cropcap *a);
+extern int fimc_g_crop_output(void *fh, struct v4l2_crop *a);
+extern int fimc_s_crop_output(void *fh, struct v4l2_crop *a);
+extern int fimc_streamon_output(void *fh);
+extern int fimc_streamoff_output(void *fh);
+extern int fimc_qbuf_output(void *fh, struct v4l2_buffer *b);
+extern int fimc_dqbuf_output(void *fh, struct v4l2_buffer *b);
+extern int fimc_g_fmt_vid_out(struct file *filp, void *fh, struct v4l2_format *f);
+extern int fimc_s_fmt_vid_out(struct file *filp, void *fh, struct v4l2_format *f);
+extern int fimc_try_fmt_vid_out(struct file *filp, void *fh, struct v4l2_format *f);
+
+extern int fimc_init_in_queue(struct fimc_control *ctrl, struct fimc_ctx *ctx);
+extern int fimc_push_inq(struct fimc_control *ctrl, struct fimc_ctx *ctx, int idx);
+extern int fimc_pop_inq(struct fimc_control *ctrl, int *ctx_num, int *idx);
+extern int fimc_push_outq(struct fimc_control *ctrl, struct fimc_ctx *ctx, int idx);
+extern int fimc_pop_outq(struct fimc_control *ctrl, struct fimc_ctx *ctx, int *idx);
+extern int fimc_init_out_queue(struct fimc_control *ctrl, struct fimc_ctx *ctx);
+extern void fimc_outdev_init_idxs(struct fimc_control *ctrl);
+
+extern void fimc_dump_context(struct fimc_control *ctrl, struct fimc_ctx *ctx);
+extern void fimc_print_signal(struct fimc_control *ctrl);
+extern void fimc_sfr_dump(struct fimc_control *ctrl);
+
+/* overlay device */
+extern int fimc_try_fmt_overlay(struct file *filp, void *fh, struct v4l2_format *f);
+extern int fimc_g_fmt_vid_overlay(struct file *filp, void *fh, struct v4l2_format *f);
+extern int fimc_s_fmt_vid_overlay(struct file *filp, void *fh, struct v4l2_format *f);
+extern int fimc_g_fbuf(struct file *filp, void *fh, struct v4l2_framebuffer *fb);
+extern int fimc_s_fbuf(struct file *filp, void *fh, struct v4l2_framebuffer *fb);
+
+/* Register access file */
+extern int fimc_hwset_camera_source(struct fimc_control *ctrl);
+extern int fimc_hwset_camera_change_source(struct fimc_control *ctrl);
+extern int fimc_hwset_enable_irq(struct fimc_control *ctrl, int overflow, int level);
+extern int fimc_hwset_disable_irq(struct fimc_control *ctrl);
+extern int fimc_hwset_clear_irq(struct fimc_control *ctrl);
+extern int fimc_hwset_reset(struct fimc_control *ctrl);
+extern int fimc_hwset_sw_reset(struct fimc_control *ctrl);
+extern int fimc_hwget_frame_end(struct fimc_control *ctrl);
+extern int fimc_hwset_clksrc(struct fimc_control *ctrl, int src_clk);
+extern int fimc_hwget_overflow_state(struct fimc_control *ctrl);
+extern int fimc_hwset_camera_offset(struct fimc_control *ctrl);
+extern int fimc_hwset_camera_polarity(struct fimc_control *ctrl);
+extern int fimc_hwset_camera_type(struct fimc_control *ctrl);
+extern int fimc_hwset_output_size(struct fimc_control *ctrl, int width, int height);
+extern int fimc_hwset_output_colorspace(struct fimc_control *ctrl, u32 pixelformat);
+extern int fimc_hwset_output_rot_flip(struct fimc_control *ctrl, u32 rot, u32 flip);
+extern int fimc_hwset_output_area(struct fimc_control *ctrl, u32 width, u32 height);
+extern int fimc_hwset_output_area_size(struct fimc_control *ctrl, u32 size);
+extern int fimc_hwset_output_scan(struct fimc_control *ctrl, struct v4l2_pix_format *fmt);
+extern int fimc_hwset_enable_lastirq(struct fimc_control *ctrl);
+extern int fimc_hwset_disable_lastirq(struct fimc_control *ctrl);
+extern int fimc_hwset_enable_lastend(struct fimc_control *ctrl);
+extern int fimc_hwset_disable_lastend(struct fimc_control *ctrl);
+extern int fimc_hwset_prescaler(struct fimc_control *ctrl, struct fimc_scaler *sc);
+extern int fimc_hwset_output_yuv(struct fimc_control *ctrl, u32 pixelformat);
+extern int fimc_hwset_output_address(struct fimc_control *ctrl, struct fimc_buf_set *bs, int id);
+extern int fimc_hwset_input_rot(struct fimc_control *ctrl, u32 rot, u32 flip);
+extern int fimc_hwset_scaler(struct fimc_control *ctrl, struct fimc_scaler *sc);
+extern int fimc_hwset_scaler_bypass(struct fimc_control *ctrl);
+extern int fimc_hwset_enable_lcdfifo(struct fimc_control *ctrl);
+extern int fimc_hwset_disable_lcdfifo(struct fimc_control *ctrl);
+extern int fimc_hwset_start_scaler(struct fimc_control *ctrl);
+extern int fimc_hwset_stop_scaler(struct fimc_control *ctrl);
+extern int fimc_hwset_input_rgb(struct fimc_control *ctrl, u32 pixelformat);
+extern int fimc_hwset_intput_field(struct fimc_control *ctrl, enum v4l2_field field);
+extern int fimc_hwset_output_rgb(struct fimc_control *ctrl, u32 pixelformat);
+extern int fimc_hwset_ext_rgb(struct fimc_control *ctrl, int enable);
+extern int fimc_hwset_enable_capture(struct fimc_control *ctrl, u32 bypass);
+extern int fimc_hwset_disable_capture(struct fimc_control *ctrl);
+extern void fimc_wait_disable_capture(struct fimc_control *ctrl);
+extern int fimc_hwset_input_address(struct fimc_control *ctrl, dma_addr_t *base);
+extern int fimc_hwset_enable_autoload(struct fimc_control *ctrl);
+extern int fimc_hwset_disable_autoload(struct fimc_control *ctrl);
+extern int fimc_hwset_real_input_size(struct fimc_control *ctrl, u32 width, u32 height);
+extern int fimc_hwset_addr_change_enable(struct fimc_control *ctrl);
+extern int fimc_hwset_addr_change_disable(struct fimc_control *ctrl);
+extern int fimc_hwset_input_burst_cnt(struct fimc_control *ctrl, u32 cnt);
+extern int fimc_hwset_input_colorspace(struct fimc_control *ctrl, u32 pixelformat);
+extern int fimc_hwset_input_yuv(struct fimc_control *ctrl, u32 pixelformat);
+extern int fimc_hwset_input_flip(struct fimc_control *ctrl, u32 rot, u32 flip);
+extern int fimc_hwset_input_source(struct fimc_control *ctrl, enum fimc_input path);
+extern int fimc_hwset_start_input_dma(struct fimc_control *ctrl);
+extern int fimc_hwset_stop_input_dma(struct fimc_control *ctrl);
+extern int fimc_hwset_output_offset(struct fimc_control *ctrl, u32 pixelformat, struct v4l2_rect *bound, struct v4l2_rect *crop);
+extern int fimc_hwset_input_offset(struct fimc_control *ctrl, u32 pixelformat, struct v4l2_rect *bound, struct v4l2_rect *crop);
+extern int fimc_hwset_org_input_size(struct fimc_control *ctrl, u32 width, u32 height);
+extern int fimc_hwset_org_output_size(struct fimc_control *ctrl, u32 width, u32 height);
+extern int fimc_hwset_ext_output_size(struct fimc_control *ctrl, u32 width, u32 height);
+extern int fimc_hwset_input_addr_style(struct fimc_control *ctrl, u32 pixelformat);
+extern int fimc_hwset_output_addr_style(struct fimc_control *ctrl, u32 pixelformat);
+extern int fimc_hwset_jpeg_mode(struct fimc_control *ctrl, bool enable);
+extern int fimc_hwget_frame_count(struct fimc_control *ctrl);
+extern int fimc_hw_wait_winoff(struct fimc_control *ctrl);
+extern int fimc_hw_wait_stop_input_dma(struct fimc_control *ctrl);
+extern int fimc_hwset_input_lineskip(struct fimc_control *ctrl);
+extern int fimc_hw_reset_camera(struct fimc_control *ctrl);
+extern void fimc_reset_status_reg(struct fimc_control *ctrl);
+void fimc_hwset_stop_processing(struct fimc_control *ctrl);
+extern int fimc_hw_reset_output_buf_sequence(struct fimc_control *ctrl);
+extern int fimc_hwset_output_buf_sequence(struct fimc_control *ctrl, u32 shift, u32 enable);
+extern void fimc_hwset_output_buf_sequence_all(struct fimc_control *ctrl, u32 framecnt_seq);
+extern int fimc_hwget_output_buf_sequence(struct fimc_control *ctrl);
+extern int fimc_hwget_before_frame_count(struct fimc_control *ctrl);
+extern int fimc_hwget_present_frame_count(struct fimc_control *ctrl);
+extern int fimc_hwget_output_buf_sequence(struct fimc_control *ctrl);
+extern int fimc_hwget_check_framecount_sequence(struct fimc_control *ctrl, u32 frame);
+extern int fimc_hwset_image_effect(struct fimc_control *ctrl);
+extern int fimc_hwset_sysreg_camblk_fimd0_wb(struct fimc_control *ctrl);
+extern int fimc_hwset_sysreg_camblk_fimd1_wb(struct fimc_control *ctrl);
+extern int fimc_hwset_sysreg_camblk_isp_wb(struct fimc_control *ctrl);
+extern int fimc_hwget_last_frame_end(struct fimc_control *ctrl);
+extern void fimc_hwset_enable_frame_end_irq(struct fimc_control *ctrl);
+extern void fimc_hwset_disable_frame_end_irq(struct fimc_control *ctrl);
+extern void fimc_reset_status_reg(struct fimc_control *ctrl);
+/* IPC related file */
+extern void ipc_start(void);
+
+/*
+ * DRIVER HELPERS
+ *
+*/
+#define to_fimc_plat(d) (to_platform_device(d)->dev.platform_data)
+
+static inline struct fimc_global *get_fimc_dev(void)
+{
+ return fimc_dev;
+}
+
+static inline struct fimc_control *get_fimc_ctrl(int id)
+{
+ return &fimc_dev->ctrl[id];
+}
+
+#endif /* __FIMC_H */
diff --git a/drivers/media/video/samsung/fimc/fimc_capture.c b/drivers/media/video/samsung/fimc/fimc_capture.c
new file mode 100644
index 0000000..fe0878a
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc_capture.c
@@ -0,0 +1,3171 @@
+/* linux/drivers/media/video/samsung/fimc_capture.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * V4L2 Capture device support file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/videodev2_exynos_camera.h>
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <plat/media.h>
+#include <plat/clock.h>
+#include <plat/fimc.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <linux/pm_qos_params.h>
+
+#include "fimc.h"
+
+static struct pm_qos_request_list bus_qos_pm_qos_req;
+
+static const struct v4l2_fmtdesc capture_fmts[] = {
+ {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "RGB-5-6-5",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ }, {
+ .index = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "RGB-8-8-8, unpacked 24 bpp",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ }, {
+ .index = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "YUV 4:2:2 packed, YCbYCr",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ }, {
+ .index = 3,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "YUV 4:2:2 packed, CbYCrY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ }, {
+ .index = 4,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "YUV 4:2:2 packed, CrYCbY",
+ .pixelformat = V4L2_PIX_FMT_VYUY,
+ }, {
+ .index = 5,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "YUV 4:2:2 packed, YCrYCb",
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ }, {
+ .index = 6,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ }, {
+ .index = 7,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ }, {
+ .index = 8,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/CbCr, Tiled",
+ .pixelformat = V4L2_PIX_FMT_NV12T,
+ }, {
+ .index = 9,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ }, {
+ .index = 10,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:2 planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ }, {
+ .index = 11,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:2 planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV61,
+ }, {
+ .index = 12,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ }, {
+ .index = 13,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/Cr/Cb",
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ }, {
+ .index = 14,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "JPEG encoded data",
+ .pixelformat = V4L2_PIX_FMT_JPEG,
+ }, {
+ .index = 15,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "Interleaved data",
+ .pixelformat = V4L2_PIX_FMT_INTERLEAVED,
+ },
+};
+
+static const struct v4l2_queryctrl fimc_controls[] = {
+ {
+ .id = V4L2_CID_ROTATION,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Roataion",
+ .minimum = 0,
+ .maximum = 270,
+ .step = 90,
+ .default_value = 0,
+ }, {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Horizontal Flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ }, {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Vertical Flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ }, {
+ .id = V4L2_CID_PADDR_Y,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Physical address Y",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .id = V4L2_CID_PADDR_CB,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Physical address Cb",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .id = V4L2_CID_PADDR_CR,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Physical address Cr",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .id = V4L2_CID_PADDR_CBCR,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Physical address CbCr",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .id = V4L2_CID_CACHEABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Cacheable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+};
+
+#ifndef CONFIG_VIDEO_FIMC_MIPI
+void s3c_csis_start(int csis_id, int lanes, int settle, \
+ int align, int width, int height, int pixel_format) {}
+void s3c_csis_stop(int csis_id) {}
+void s3c_csis_enable_pktdata(int csis_id, bool enable) {}
+#endif
+
+static int fimc_init_camera(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct s3c_platform_fimc *pdata;
+ struct s3c_platform_camera *cam;
+ int ret = 0, retry_cnt = 0;
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+#endif
+ pdata = to_fimc_plat(ctrl->dev);
+
+ cam = ctrl->cam;
+
+ /* do nothing if already initialized */
+ if (ctrl->cam->initialized)
+ return 0;
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_OFF) {
+ pm_runtime_get_sync(&pdev->dev);
+ }
+#endif
+ /*
+ * WriteBack mode doesn't need to set clock and power,
+ * but it needs to set source width, height depend on LCD resolution.
+ */
+ if ((cam->id == CAMERA_WB) || (cam->id == CAMERA_WB_B)) {
+ ret = s3cfb_direct_ioctl(0, S3CFB_GET_LCD_WIDTH,
+ (unsigned long)&cam->width);
+ if (ret) {
+ fimc_err("fail to get LCD size\n");
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ pm_runtime_put_sync(&pdev->dev);
+#endif
+ return ret;
+ }
+
+ ret = s3cfb_direct_ioctl(0, S3CFB_GET_LCD_HEIGHT,
+ (unsigned long)&cam->height);
+ if (ret) {
+ fimc_err("fail to get LCD size\n");
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ pm_runtime_put_sync(&pdev->dev);
+#endif
+ return ret;
+ }
+
+ cam->window.width = cam->width;
+ cam->window.height = cam->height;
+ cam->initialized = 1;
+
+ return ret;
+ }
+
+retry:
+ /* set rate for mclk */
+ if ((clk_get_rate(cam->clk)) && (fimc->mclk_status == CAM_MCLK_OFF)) {
+ clk_set_rate(cam->clk, cam->clk_rate);
+ clk_enable(cam->clk);
+ fimc->mclk_status = CAM_MCLK_ON;
+ fimc_info1("clock for camera: %d\n", cam->clk_rate);
+ }
+
+ /* enable camera power if needed */
+ if (cam->cam_power) {
+ ret = cam->cam_power(1);
+ if (unlikely(ret < 0)) {
+ fimc_err("\nfail to power on\n");
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ pm_runtime_put_sync(&pdev->dev);
+#endif
+ return ret;
+ }
+ }
+
+ /* "0" argument means preview init for s5k4ea */
+ ret = v4l2_subdev_call(cam->sd, core, init, 0);
+
+ /* Retry camera power-up if first i2c fails. */
+ if (unlikely(ret < 0)) {
+ if (cam->cam_power)
+ cam->cam_power(0);
+
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+ if (retry_cnt++ < 3) {
+ msleep(100);
+ fimc_err("Retry power on(%d/3)\n\n", retry_cnt);
+ goto retry;
+ } else {
+ fimc_err("Camera power/init failed!!!!\n\n");
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_ON) {
+ pm_runtime_put_sync(&pdev->dev);
+ }
+#endif
+ }
+ } else {
+ /* Apply things to interface register */
+ fimc_hwset_reset(ctrl);
+ cam->initialized = 1;
+ }
+
+ return ret;
+}
+
+static int fimc_camera_get_jpeg_memsize(struct fimc_control *ctrl)
+{
+ int ret = 0;
+ struct v4l2_control cam_ctrl;
+ cam_ctrl.id = V4L2_CID_CAM_JPEG_MEMSIZE;
+
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, g_ctrl, &cam_ctrl);
+ if (ret < 0) {
+ fimc_err("%s: Subdev doesn't support JEPG encoding.\n", \
+ __func__);
+ return 0;
+ }
+
+ return cam_ctrl.value;
+}
+
+
+static int fimc_capture_scaler_info(struct fimc_control *ctrl)
+{
+ struct fimc_scaler *sc = &ctrl->sc;
+ struct v4l2_rect *window = &ctrl->cam->window;
+ int tx, ty, sx, sy;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int rot = 0;
+
+ if (!ctrl->cam->use_isp) {
+ sx = window->width;
+ sy = window->height;
+ } else {
+ sx = ctrl->is.fmt.width;
+ sy = ctrl->is.fmt.height;
+ }
+
+ sc->real_width = sx;
+ sc->real_height = sy;
+
+ rot = fimc_mapping_rot_flip(ctrl->cap->rotate, ctrl->cap->flip);
+
+ if (rot & FIMC_ROT) {
+ tx = ctrl->cap->fmt.height;
+ ty = ctrl->cap->fmt.width;
+ } else {
+ tx = ctrl->cap->fmt.width;
+ ty = ctrl->cap->fmt.height;
+ }
+
+ fimc_dbg("%s: CamOut (%d, %d), TargetOut (%d, %d)\n",
+ __func__, sx, sy, tx, ty);
+
+ if (sx <= 0 || sy <= 0) {
+ fimc_err("%s: invalid source size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (tx <= 0 || ty <= 0) {
+ fimc_err("%s: invalid target size\n", __func__);
+ return -EINVAL;
+ }
+
+ fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
+ fimc_get_scaler_factor(sy, ty, &sc->pre_vratio, &sc->vfactor);
+
+ if (sx == sy) {
+ if (sx*10/tx >= 15 && sx*10/tx < 20) {
+ sc->pre_hratio = 2;
+ sc->hfactor = 1;
+ }
+ if (sy*10/ty >= 15 && sy*10/ty < 20) {
+ sc->pre_vratio = 2;
+ sc->vfactor = 1;
+ }
+ }
+
+
+ sc->pre_dst_width = sx / sc->pre_hratio;
+ sc->pre_dst_height = sy / sc->pre_vratio;
+
+ if (pdata->hw_ver >= 0x50) {
+ sc->main_hratio = (sx << 14) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 14) / (ty << sc->vfactor);
+ } else {
+ sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+ }
+
+ sc->scaleup_h = (tx >= sx) ? 1 : 0;
+ sc->scaleup_v = (ty >= sy) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_capture_change_scaler_info(struct fimc_control *ctrl)
+{
+ struct fimc_scaler *sc = &ctrl->sc;
+ struct v4l2_rect *window = &ctrl->cam->window;
+ int tx, ty, sx, sy;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int rot = 0;
+
+ if (!ctrl->cam->use_isp) {
+ sx = window->width;
+ sy = window->height;
+ } else {
+ sx = ctrl->is.zoom_in_width;
+ sy = ctrl->is.zoom_in_height;
+ }
+
+ sc->real_width = sx;
+ sc->real_height = sy;
+
+ rot = fimc_mapping_rot_flip(ctrl->cap->rotate, ctrl->cap->flip);
+
+ if (rot & FIMC_ROT) {
+ tx = ctrl->cap->fmt.height;
+ ty = ctrl->cap->fmt.width;
+ } else {
+ tx = ctrl->cap->fmt.width;
+ ty = ctrl->cap->fmt.height;
+ }
+
+ fimc_dbg("%s: CamOut (%d, %d), TargetOut (%d, %d)\n",
+ __func__, sx, sy, tx, ty);
+
+ if (sx <= 0 || sy <= 0) {
+ fimc_err("%s: invalid source size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (tx <= 0 || ty <= 0) {
+ fimc_err("%s: invalid target size\n", __func__);
+ return -EINVAL;
+ }
+
+ fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
+ fimc_get_scaler_factor(sy, ty, &sc->pre_vratio, &sc->vfactor);
+
+ sc->pre_dst_width = sx / sc->pre_hratio;
+ sc->pre_dst_height = sy / sc->pre_vratio;
+
+ if (pdata->hw_ver >= 0x50) {
+ sc->main_hratio = (sx << 14) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 14) / (ty << sc->vfactor);
+ } else {
+ sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+ }
+
+ sc->scaleup_h = (tx >= sx) ? 1 : 0;
+ sc->scaleup_v = (ty >= sy) ? 1 : 0;
+
+ return 0;
+}
+
+int fimc_start_zoom_capture(struct fimc_control *ctrl)
+{
+ fimc_dbg("%s\n", __func__);
+
+ fimc_hwset_start_scaler(ctrl);
+
+ fimc_hwset_enable_capture(ctrl, ctrl->sc.bypass);
+ fimc_hwset_disable_frame_end_irq(ctrl);
+
+ return 0;
+}
+
+int fimc_stop_zoom_capture(struct fimc_control *ctrl)
+{
+ fimc_dbg("%s\n", __func__);
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!ctrl->cap) {
+ fimc_err("%s: No cappure format.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ctrl->cap->lastirq) {
+ fimc_hwset_enable_lastirq(ctrl);
+ fimc_hwset_disable_capture(ctrl);
+ fimc_hwset_disable_lastirq(ctrl);
+ } else {
+ fimc_hwset_disable_capture(ctrl);
+ fimc_hwset_enable_frame_end_irq(ctrl);
+ }
+
+ fimc_hwset_stop_scaler(ctrl);
+ return 0;
+}
+
+static int fimc_add_inqueue(struct fimc_control *ctrl, int i)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_buf_set *tmp_buf;
+ struct list_head *count;
+
+ /* PINGPONG_2ADDR_MODE Only */
+ list_for_each(count, &cap->inq) {
+ tmp_buf = list_entry(count, struct fimc_buf_set, list);
+ /* skip list_add_tail if already buffer is in cap->inq list*/
+ if (tmp_buf->id == i)
+ return 0;
+ }
+ list_add_tail(&cap->bufs[i].list, &cap->inq);
+
+ return 0;
+}
+
+static int fimc_add_outqueue(struct fimc_control *ctrl, int i)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_buf_set *buf;
+ unsigned int mask = 0x2;
+
+ /* PINGPONG_2ADDR_MODE Only */
+ /* pair_buf_index stands for pair index of i. (0<->2) (1<->3) */
+ int pair_buf_index = (i^mask);
+
+ /* FIMC have 4 h/w registers */
+ if (i < 0 || i >= FIMC_PHYBUFS) {
+ fimc_err("%s: invalid queue index : %d\n", __func__, i);
+ return -ENOENT;
+ }
+
+ if (list_empty(&cap->inq))
+ return -ENOENT;
+
+ buf = list_first_entry(&cap->inq, struct fimc_buf_set, list);
+
+ /* pair index buffer should be allocated first */
+ cap->outq[pair_buf_index] = buf->id;
+ fimc_hwset_output_address(ctrl, buf, pair_buf_index);
+
+ cap->outq[i] = buf->id;
+ fimc_hwset_output_address(ctrl, buf, i);
+
+ list_del(&buf->list);
+
+ return 0;
+}
+
+int fimc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+
+ /* WriteBack doesn't have subdev_call */
+
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B))
+ return 0;
+
+ mutex_lock(&ctrl->v4l2_lock);
+ ret = v4l2_subdev_call(ctrl->cam->sd, video, g_parm, a);
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+int fimc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int ret = 0;
+ int new_fps = a->parm.capture.timeperframe.denominator /
+ a->parm.capture.timeperframe.numerator;
+
+ fimc_info2("%s fimc%d, %d\n", __func__, ctrl->id, new_fps);
+
+ /* WriteBack doesn't have subdev_call */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B))
+ return 0;
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ if (ctrl->cam->sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->cam->sd, video, s_parm, a);
+ else if (ctrl->is.sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->is.sd, video, s_parm, a);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+/* Enumerate controls */
+int fimc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int i, ret;
+
+ fimc_dbg("%s\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(fimc_controls); i++) {
+ if (fimc_controls[i].id == qc->id) {
+ memcpy(qc, &fimc_controls[i], sizeof(struct v4l2_queryctrl));
+ return 0;
+ }
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, queryctrl, qc);
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+/* Menu control items */
+int fimc_querymenu(struct file *file, void *fh, struct v4l2_querymenu *qm)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+
+ mutex_lock(&ctrl->v4l2_lock);
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, querymenu, qm);
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+int fimc_enum_input(struct file *file, void *fh, struct v4l2_input *inp)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+
+ fimc_dbg("%s: index %d\n", __func__, inp->index);
+
+ if (inp->index >= FIMC_MAXCAMS) {
+ fimc_err("%s: invalid input index, received = %d\n",
+ __func__, inp->index);
+ return -EINVAL;
+ }
+
+ if (!fimc->camera_isvalid[inp->index])
+ return -EINVAL;
+ mutex_lock(&ctrl->v4l2_lock);
+
+ if (fimc->camera[inp->index]->use_isp && !(fimc->camera[inp->index]->info))
+ strcpy(inp->name, "ISP Camera");
+ else
+ strcpy(inp->name, fimc->camera[inp->index]->info->type);
+
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_g_input(struct file *file, void *fh, unsigned int *i)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_global *fimc = get_fimc_dev();
+
+ /* In case of isueing g_input before s_input */
+ if (!ctrl->cam) {
+ fimc_err("no camera device selected yet. do VIDIOC_S_INPUT first\n");
+ return -ENODEV;
+ }
+ mutex_lock(&ctrl->v4l2_lock);
+
+ *i = (unsigned int) fimc->active_camera;
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ fimc_dbg("%s: index %d\n", __func__, *i);
+
+ return 0;
+}
+
+int fimc_release_subdev(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct i2c_client *client;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int ret;
+
+ if (ctrl->cam->sd && fimc_cam_use) {
+ fimc_dbg("%s called\n", __func__);
+
+ /* WriteBack doesn't need clock setting */
+ if ((ctrl->cam->id == CAMERA_WB) ||
+ (ctrl->cam->id == CAMERA_WB_B)) {
+ ctrl->cam->initialized = 0;
+ ctrl->cam = NULL;
+ fimc->active_camera = -1;
+ return 0;
+ }
+
+ client = v4l2_get_subdevdata(ctrl->cam->sd);
+ i2c_unregister_device(client);
+ ctrl->cam->sd = NULL;
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+
+ /* shutdown the MCLK */
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+
+ ctrl->cam->initialized = 0;
+ ctrl->cam = NULL;
+ fimc->active_camera = -1;
+ } else if (ctrl->cam->sd) {
+ ctrl->cam->initialized = 0;
+ ctrl->cam = NULL;
+ fimc->active_camera = -1;
+ }
+
+ if (ctrl->flite_sd && fimc_cam_use) {
+ ret = v4l2_subdev_call(ctrl->flite_sd, core, s_power, 0);
+ if (ret)
+ fimc_err("s_power failed: %d", ret);
+
+ ctrl->flite_sd = NULL;
+ }
+
+ return 0;
+}
+
+static int fimc_configure_subdev(struct fimc_control *ctrl)
+{
+ struct i2c_adapter *i2c_adap;
+ struct i2c_board_info *i2c_info;
+ struct i2c_client *client;
+ struct v4l2_subdev *sd;
+ unsigned short addr;
+ char *name;
+ int ret = 0;
+
+ i2c_adap = i2c_get_adapter(ctrl->cam->get_i2c_busnum());
+ if (!i2c_adap) {
+ fimc_err("subdev i2c_adapter missing-skip registration\n");
+ return -ENODEV;
+ }
+
+ i2c_info = ctrl->cam->info;
+ if (!i2c_info) {
+ fimc_err("%s: subdev i2c board info missing\n", __func__);
+ return -ENODEV;
+ }
+
+ name = i2c_info->type;
+ if (!name) {
+ fimc_err("subdev i2c driver name missing-skip registration\n");
+ return -ENODEV;
+ }
+
+ addr = i2c_info->addr;
+ if (!addr) {
+ fimc_err("subdev i2c address missing-skip registration\n");
+ return -ENODEV;
+ }
+ /*
+ * NOTE: first time subdev being registered,
+ * s_config is called and try to initialize subdev device
+ * but in this point, we are not giving MCLK and power to subdev
+ * so nothing happens but pass platform data through
+ */
+ sd = v4l2_i2c_new_subdev_board(&ctrl->v4l2_dev, i2c_adap,
+ i2c_info, &addr);
+ if (!sd) {
+ fimc_err("%s: v4l2 subdev board registering failed\n",
+ __func__);
+ }
+ /* Assign subdev to proper camera device pointer */
+ ctrl->cam->sd = sd;
+
+ if (!ctrl->cam->initialized) {
+ ret = fimc_init_camera(ctrl);
+ if (ret < 0) {
+ fimc_err("%s: fail to initialize subdev\n", __func__);
+ client = v4l2_get_subdevdata(sd);
+ i2c_unregister_device(client);
+ ctrl->cam->sd = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int flite_register_callback(struct device *dev, void *p)
+{
+ struct v4l2_subdev **sd_list = p;
+ struct v4l2_subdev *sd = NULL;
+
+ sd = dev_get_drvdata(dev);
+ if (sd) {
+ struct platform_device *pdev = v4l2_get_subdev_hostdata(sd);
+ *(sd_list + pdev->id) = sd;
+ }
+
+ return 0; /* non-zero value stops iteration */
+}
+
+static struct v4l2_subdev *exynos_flite_get_subdev(int id)
+{
+ const char *module_name = "exynos-fimc-lite";
+ struct device_driver *drv;
+ struct v4l2_subdev *sd[FLITE_MAX_NUM] = {NULL,};
+ int ret;
+
+ drv = driver_find(module_name, &platform_bus_type);
+ if (!drv) {
+ request_module(module_name);
+ drv = driver_find(module_name, &platform_bus_type);
+ }
+ if (!drv)
+ return ERR_PTR(-ENODEV);
+
+ ret = driver_for_each_device(drv, NULL, &sd[0],
+ flite_register_callback);
+ put_driver(drv);
+
+ return ret ? NULL : sd[id];
+}
+
+int fimc_subdev_attatch(struct fimc_control *ctrl)
+{
+ int ret = 0;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ ctrl->flite_sd = exynos_flite_get_subdev(ctrl->cam->flite_id);
+ if (IS_ERR_OR_NULL(ctrl->flite_sd)) {
+ ctrl->flite_sd = NULL;
+ return PTR_ERR(ctrl->flite_sd);
+ } else {
+ if (fimc_cam_use) {
+ ret = v4l2_subdev_call(ctrl->flite_sd, core, s_power, 1);
+ if (ret)
+ fimc_err("s_power failed: %d", ret);
+ }
+
+ }
+
+ return 0;
+}
+
+static int fimc_is_register_callback(struct device *dev, void *p)
+{
+ struct v4l2_subdev **sd = p;
+
+ *sd = dev_get_drvdata(dev);
+
+ if (!*sd)
+ return -EINVAL;
+
+ return 0; /* non-zero value stops iteration */
+}
+
+int fimc_is_release_subdev(struct fimc_control *ctrl)
+{
+ int ret;
+ struct fimc_global *fimc = get_fimc_dev();
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ if (ctrl->is.sd && ctrl->cam && fimc_cam_use) {
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+ /* shutdown the MCLK */
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+
+ ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, 0);
+ if (ret < 0) {
+ fimc_dbg("FIMC-IS init failed");
+ return -ENODEV;
+ }
+
+ v4l2_device_unregister_subdev(ctrl->is.sd);
+ ctrl->is.sd = NULL;
+ ctrl->cam->initialized = 0;
+ ctrl->cam = NULL;
+ fimc->active_camera = -1;
+ } else if (ctrl->is.sd && ctrl->cam) {
+ ctrl->is.sd = NULL;
+ ctrl->cam->initialized = 0;
+ ctrl->cam = NULL;
+ fimc->active_camera = -1;
+ }
+
+ return 0;
+}
+
+static struct v4l2_subdev *fimc_is_get_subdev(int id)
+{
+ const char *module_name = "exynos4-fimc-is";
+ struct device_driver *drv;
+ struct v4l2_subdev *sd = NULL;
+ int ret;
+
+ drv = driver_find(module_name, &platform_bus_type);
+ if (!drv) {
+ request_module(module_name);
+ drv = driver_find(module_name, &platform_bus_type);
+ }
+ if (!drv)
+ return ERR_PTR(-ENODEV);
+
+ ret = driver_for_each_device(drv, NULL, &sd,
+ fimc_is_register_callback);
+ put_driver(drv);
+ return ret ? NULL : sd;
+}
+
+static int fimc_is_init_cam(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct s3c_platform_camera *cam;
+ int ret = 0;
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+#endif
+
+ cam = ctrl->cam;
+ /* Do noting if already initialized */
+ if (ctrl->cam->initialized)
+ return 0;
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_OFF)
+ pm_runtime_get_sync(&pdev->dev);
+#endif
+
+ /* set rate for mclk */
+ if ((clk_get_rate(cam->clk)) && (fimc->mclk_status == CAM_MCLK_OFF)) {
+ clk_set_rate(cam->clk, cam->clk_rate);
+ clk_enable(cam->clk);
+ fimc->mclk_status = CAM_MCLK_ON;
+ fimc_info1("clock for camera (FIMC-IS): %d\n", cam->clk_rate);
+ }
+
+ /* enable camera power if needed */
+ if (cam->cam_power) {
+ ret = cam->cam_power(1);
+ if (unlikely(ret < 0))
+ fimc_err("\nfail to power on\n");
+ }
+
+
+ /* Retry camera power-up if first i2c fails. */
+ if (unlikely(ret < 0)) {
+ if (cam->cam_power)
+ cam->cam_power(0);
+
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+
+ fimc_err("Camera power/init failed!!!!\n\n");
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_ON)
+ pm_runtime_put_sync(&pdev->dev);
+#endif
+ } else {
+ /* Apply things to interface register */
+ fimc_hwset_reset(ctrl);
+ cam->initialized = 1;
+ }
+
+ return ret;
+}
+
+int fimc_s_input(struct file *file, void *fh, unsigned int i)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int ret = 0;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+#endif
+
+ fimc_dbg("%s: index %d\n", __func__, i);
+
+ if (i >= FIMC_MAXCAMS) {
+ fimc_err("%s: invalid input index\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!fimc->camera_isvalid[i])
+ return -EINVAL;
+
+ if (fimc->camera[i]->sd && fimc_cam_use) {
+ fimc_err("%s: Camera already in use.\n", __func__);
+ return -EBUSY;
+ }
+ mutex_lock(&ctrl->v4l2_lock);
+
+ /* If ctrl->cam is not NULL, there is one subdev already registered.
+ * We need to unregister that subdev first. */
+ if (i != fimc->active_camera) {
+ fimc_info1("\n\nfimc_s_input activating subdev\n");
+ if (ctrl->cam && (ctrl->cam->sd || ctrl->flite_sd))
+ fimc_release_subdev(ctrl);
+ else if (ctrl->is.sd)
+ fimc_is_release_subdev(ctrl);
+ ctrl->cam = fimc->camera[i];
+
+ if ((ctrl->cam->id != CAMERA_WB) && (ctrl->cam->id !=
+ CAMERA_WB_B) && (!ctrl->cam->use_isp) && fimc_cam_use) {
+ ret = fimc_configure_subdev(ctrl);
+ if (ret < 0) {
+#ifdef CONFIG_MACH_GC1
+ if (ret == -ENOSYS) {
+ /* return no error If firmware is bad.
+ Because F/W update app should access the sensor through HAL instance */
+ fimc_err("%s: please update the F/W\n", __func__);
+ } else {
+ ctrl->cam = NULL;
+ mutex_unlock(&ctrl->v4l2_lock);
+ fimc_err("%s: Could not register camera" \
+ " sensor with V4L2.\n", __func__);
+ return -ENODEV;
+ }
+#else
+ ctrl->cam = NULL;
+#ifdef CONFIG_MACH_P4NOTE
+ fimc_release_subdev(ctrl);
+#endif /* CONFIG_MACH_P4NOTE */
+ mutex_unlock(&ctrl->v4l2_lock);
+ fimc_err("%s: Could not register camera" \
+ " sensor with V4L2.\n", __func__);
+ return -ENODEV;
+#endif
+ }
+ }
+ fimc->active_camera = i;
+ fimc_info2("fimc_s_input activated subdev = %d\n", i);
+ }
+
+ if (!fimc_cam_use) {
+ if (i == fimc->active_camera) {
+ ctrl->cam = fimc->camera[i];
+ fimc_info2("fimc_s_input activating subdev FIMC%d\n",
+ ctrl->id);
+ } else {
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -EINVAL;
+ }
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+#ifdef CONFIG_DRM_EXYNOS_FIMD_WB
+ if ((ctrl->cam->id != CAMERA_WB) &&
+ (ctrl->cam->id != CAMERA_WB_B) &&
+ (ctrl->power_status == FIMC_POWER_OFF)) {
+#else
+ if (ctrl->power_status == FIMC_POWER_OFF) {
+#endif
+ pm_runtime_get_sync(&pdev->dev);
+ }
+ fimc_hwset_reset(ctrl);
+#endif
+ }
+
+ if (ctrl->cam->use_isp) {
+ /* fimc-lite attatch */
+ ret = fimc_subdev_attatch(ctrl);
+ if (ret) {
+ fimc_err("subdev_attatch failed\n");
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENODEV;
+ }
+ /* fimc-is attatch */
+ ctrl->is.sd = fimc_is_get_subdev(i);
+ if (IS_ERR_OR_NULL(ctrl->is.sd)) {
+ fimc_err("fimc-is subdev_attatch failed\n");
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENODEV;
+ }
+
+ ctrl->is.fmt.width = ctrl->cam->width;
+ ctrl->is.fmt.height = ctrl->cam->height;
+ ctrl->is.frame_count = 0;
+ if (fimc_cam_use) {
+ ret = fimc_is_init_cam(ctrl);
+ if (ret < 0) {
+ fimc_dbg("FIMC-IS init clock failed");
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENODEV;
+ }
+ ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, 1);
+ if (ret < 0) {
+ fimc_dbg("FIMC-IS init failed");
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENODEV;
+ }
+ ret = v4l2_subdev_call(ctrl->is.sd, core, load_fw);
+ if (ret < 0) {
+ fimc_dbg("FIMC-IS init failed");
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENODEV;
+ }
+ ret = v4l2_subdev_call(ctrl->is.sd, core, init, ctrl->cam->sensor_index);
+ if (ret < 0) {
+ fimc_dbg("FIMC-IS init failed");
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENODEV;
+ }
+ }
+ }
+
+
+ /*
+ * The first time alloc for struct cap_info, and will be
+ * released at the file close.
+ * Anyone has better idea to do this?
+ */
+ if (!cap) {
+ cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+ if (!cap) {
+ fimc_err("%s: no memory for "
+ "capture device info\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* assign to ctrl */
+ ctrl->cap = cap;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_OFF)
+ pm_runtime_get_sync(&pdev->dev);
+#endif
+ } else {
+ memset(cap, 0, sizeof(*cap));
+ }
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_enum_fmt_vid_capture(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int i = f->index;
+
+ fimc_dbg("%s\n", __func__);
+
+ if (i >= ARRAY_SIZE(capture_fmts)) {
+ fimc_err("%s: There is no support format index %d\n", __func__, i);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ memset(f, 0, sizeof(*f));
+ memcpy(f, &capture_fmts[i], sizeof(*f));
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_g_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cap) {
+ fimc_err("%s: no capture device info\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ memset(&f->fmt.pix, 0, sizeof(f->fmt.pix));
+ memcpy(&f->fmt.pix, &ctrl->cap->fmt, sizeof(f->fmt.pix));
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+/*
+ * Check for whether the requested format
+ * can be streamed out from FIMC
+ * depends on FIMC node
+ */
+static int fimc_fmt_avail(struct fimc_control *ctrl,
+ struct v4l2_pix_format *f)
+{
+ int i;
+
+ /*
+ * TODO: check for which FIMC is used.
+ * Available fmt should be varied for each FIMC
+ */
+
+ for (i = 0; i < ARRAY_SIZE(capture_fmts); i++) {
+ if (capture_fmts[i].pixelformat == f->pixelformat)
+ return 0;
+ }
+
+ fimc_info1("Not supported pixelformat requested\n");
+
+ return -1;
+}
+
+/*
+ * figures out the depth of requested format
+ */
+static int fimc_fmt_depth(struct fimc_control *ctrl, struct v4l2_pix_format *f)
+{
+ int err, depth = 0;
+
+ /* First check for available format or not */
+ err = fimc_fmt_avail(ctrl, f);
+ if (err < 0)
+ return -1;
+
+ /* handles only supported pixelformats */
+ switch (f->pixelformat) {
+ case V4L2_PIX_FMT_RGB32:
+ depth = 32;
+ fimc_dbg("32bpp\n");
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ depth = 16;
+ fimc_dbg("16bpp\n");
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12T:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ depth = 12;
+ fimc_dbg("12bpp\n");
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_INTERLEAVED:
+ depth = -1;
+ fimc_dbg("Compressed format.\n");
+ break;
+ default:
+ fimc_dbg("why am I here?\n");
+ break;
+ }
+
+ return depth;
+}
+
+int fimc_s_fmt_vid_private(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ struct v4l2_mbus_framefmt *mbus_fmt;
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+ if (ctrl->cam->sd) {
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ int depth;
+
+ fimc_info1("%s %d:\n", __func__, __LINE__);
+
+ mbus_fmt = &ctrl->cap->mbus_fmt;
+ mbus_fmt->width = pix->width;
+ mbus_fmt->height = pix->height;
+#ifdef CONFIG_MACH_P4NOTE
+/* Unfortuntely, we have to use pix->field (not pix->priv) since
+ * pix.field is already used in the below else condtion statement
+ * (in case that sub-devices are not registered)
+ */
+ mbus_fmt->field = pix->field;
+#endif
+#if (defined(CONFIG_MACH_S2PLUS) || defined(CONFIG_MACH_GC1))
+ mbus_fmt->field = pix->priv;
+#endif
+ printk(KERN_INFO "%s mbus_fmt->width = %d, height = %d,\n",
+ __func__,mbus_fmt->width ,mbus_fmt->height);
+
+ depth = fimc_fmt_depth(ctrl, pix);
+ if (depth == 0) {
+ fimc_err("%s: Invalid pixel format\n", __func__);
+ return -EINVAL;
+ } else if (depth < 0) { /* JPEG */
+ mbus_fmt->code = V4L2_MBUS_FMT_JPEG_1X8;
+ mbus_fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ mbus_fmt->code = V4L2_MBUS_FMT_VYUY8_2X8;
+ }
+
+ if (fimc_cam_use) {
+ ret = v4l2_subdev_call(ctrl->cam->sd, video,
+ s_mbus_fmt, mbus_fmt);
+ if (ret) {
+ fimc_err("%s: fail to s_mbus_fmt\n", __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+ } else {
+ mbus_fmt = kzalloc(sizeof(*mbus_fmt), GFP_KERNEL);
+ if (!mbus_fmt) {
+ fimc_err("%s: no memory for "
+ "mbus_fmt\n", __func__);
+ return -ENOMEM;
+ }
+ ctrl->is.fmt.width = f->fmt.pix.width;
+ ctrl->is.fmt.height = f->fmt.pix.height;
+ ctrl->is.fmt.pixelformat = f->fmt.pix.pixelformat;
+
+ mbus_fmt->width = f->fmt.pix.width;
+ mbus_fmt->height = f->fmt.pix.height;
+ mbus_fmt->code = V4L2_MBUS_FMT_YUYV8_2X8; /*dummy*/
+ mbus_fmt->field = f->fmt.pix.field;
+ mbus_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+
+ printk(KERN_INFO "%s mbus_fmt->width = %d, height = %d, \n",
+ __func__,mbus_fmt->width ,mbus_fmt->height);
+ if (fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->is.sd, video,
+ s_mbus_fmt, mbus_fmt);
+ kfree(mbus_fmt);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+int fimc_s_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ int ret = 0;
+ int depth;
+ struct v4l2_control is_ctrl;
+
+ is_ctrl.id = 0;
+ is_ctrl.value = 0;
+
+ printk(KERN_INFO "%s\n", __func__);
+
+ if (!ctrl->cap) {
+ fimc_err("%s: No capture structure." \
+ "you have to call s_input first.\n", __func__);
+ return -ENODEV;
+ }
+
+ /* rotaton, flip, dtp_mode, movie_mode and vt_mode,
+ * sensor_output_width,height should be maintained.(by TN) */
+ memset(cap, 0, sizeof(*cap) - sizeof(u32) * 7);
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ memset(&cap->fmt, 0, sizeof(cap->fmt));
+ memcpy(&cap->fmt, &f->fmt.pix, sizeof(cap->fmt));
+
+ /*
+ * Note that expecting format only can be with
+ * available output format from FIMC
+ * Following items should be handled in driver
+ * bytesperline = width * depth / 8
+ * sizeimage = bytesperline * height
+ */
+ /* This function may return 0 or -1 in case of error,
+ * hence need to check here.
+ */
+
+ depth = fimc_fmt_depth(ctrl, &cap->fmt);
+ if (depth == 0) {
+ mutex_unlock(&ctrl->v4l2_lock);
+ fimc_err("%s: Invalid pixel format\n", __func__);
+ return -EINVAL;
+ } else if (depth < 0) {
+ /*
+ * When the pixelformat is JPEG,
+ * the application is requesting for data
+ * in JPEG compressed format
+ */
+ cap->fmt.colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ cap->fmt.bytesperline = (cap->fmt.width * depth) >> 3;
+ cap->fmt.sizeimage = (cap->fmt.bytesperline * cap->fmt.height);
+ }
+
+
+ if (cap->fmt.pixelformat == V4L2_PIX_FMT_JPEG ||
+ cap->fmt.pixelformat == V4L2_PIX_FMT_INTERLEAVED) {
+ ctrl->sc.bypass = 1;
+ cap->lastirq = 0;
+ fimc_info1("fimc_s_fmt_vid_capture V4L2_COLORSPACE_JPEG or INTERLEAVED\n");
+ } else {
+#ifdef CONFIG_MACH_GC1
+ /*
+ Fimc scaler input Hsize is restricted to 4224 pixels.
+ So, GC1 has to bypass fimc scaler to use more than 12M YUV.
+ */
+ ctrl->sc.bypass = 1;
+#else
+ ctrl->sc.bypass = 0;
+#endif
+ cap->lastirq = 0;
+ }
+
+ printk(KERN_INFO "fimc%d s_fmt width = %d, height = %d\n", ctrl->id, \
+ cap->fmt.width, cap->fmt.height);
+
+ /* WriteBack doesn't have subdev_call */
+ if (ctrl->cam->id == CAMERA_WB || ctrl->cam->id == CAMERA_WB_B) {
+ mutex_unlock(&ctrl->v4l2_lock);
+ return 0;
+ }
+
+ if (ctrl->is.sd && fimc_cam_use) {
+ ctrl->is.mbus_fmt.code = V4L2_MBUS_FMT_SGRBG10_1X10;
+ is_ctrl.id = V4L2_CID_IS_GET_SENSOR_WIDTH;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl);
+ ctrl->is.fmt.width = ctrl->is.mbus_fmt.width = is_ctrl.value;
+
+ is_ctrl.id = V4L2_CID_IS_GET_SENSOR_HEIGHT;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl);
+ ctrl->is.fmt.height = ctrl->is.mbus_fmt.height = is_ctrl.value;
+ /* default offset values */
+ ctrl->is.offset_x = 16;
+ ctrl->is.offset_y = 12;
+ }
+
+ fimc_hwset_reset(ctrl);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+ printk(KERN_INFO "%s -- FIMC%d\n", __func__, ctrl->id);
+
+ return ret;
+}
+
+int fimc_try_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f)
+{
+ /* Not implement */
+ return -ENOTTY;
+}
+
+static int fimc_alloc_buffers(struct fimc_control *ctrl,
+ int plane, int size, int align, int bpp, int use_paddingbuf, int pad_size)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ int i, j;
+ int plane_length[4] = {0, };
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ int alloc_size, err;
+ struct cma_info mem_info;
+#endif
+
+ switch (plane) {
+ case 1:
+ if (align) {
+ plane_length[0] = PAGE_ALIGN((size*bpp) >> 3);
+ plane_length[1] = 0;
+ plane_length[2] = 0;
+ } else {
+ plane_length[0] = (size*bpp) >> 3;
+ plane_length[1] = 0;
+ plane_length[2] = 0;
+ }
+ break;
+ /* In case of 2, only NV12 and NV12T is supported. */
+ case 2:
+ if (align) {
+ plane_length[0] = PAGE_ALIGN((size*8) >> 3);
+ plane_length[1] = PAGE_ALIGN((size*(bpp-8)) >> 3);
+ plane_length[2] = 0;
+ fimc_info2("plane_length[0] = %d, plane_length[1] = %d\n" \
+ , plane_length[0], plane_length[1]);
+ } else {
+ plane_length[0] = ((size*8) >> 3);
+ plane_length[1] = ((size*(bpp-8)) >> 3);
+ plane_length[2] = 0;
+ fimc_info2("plane_length[0] = %d, plane_length[1] = %d\n" \
+ , plane_length[0], plane_length[1]);
+ }
+
+ break;
+ /* In case of 3
+ * YUV422 : 8 / 4 / 4 (bits)
+ * YUV420 : 8 / 2 / 2 (bits)
+ * 3rd plane have to consider page align for mmap */
+ case 3:
+ if (align) {
+ plane_length[0] = (size*8) >> 3;
+ plane_length[1] = (size*((bpp-8)/2)) >> 3;
+ plane_length[2] = PAGE_ALIGN((size*bpp)>>3) - plane_length[0]
+ - plane_length[1];
+ } else {
+ plane_length[0] = (size*8) >> 3;
+ plane_length[1] = (size*((bpp-8)/2)) >> 3;
+ plane_length[2] = ((size*bpp)>>3) - plane_length[0]
+ - plane_length[1];
+ }
+ break;
+ default:
+ fimc_err("impossible!\n");
+ return -ENOMEM;
+ }
+
+ if (use_paddingbuf) {
+ plane_length[plane] = pad_size;
+ cap->pktdata_plane = plane;
+ } else
+ plane_length[plane] = 0;
+
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ if (align) {
+ alloc_size = (ALIGN(plane_length[0], align) +
+ ALIGN(plane_length[1], align)
+ + ALIGN(plane_length[2], align))
+ * cap->nr_bufs;
+ } else {
+ alloc_size = (plane_length[0] + plane_length[1] +
+ plane_length[2]) * cap->nr_bufs;
+ }
+
+ err = cma_info(&mem_info, ctrl->dev, 0);
+ printk(KERN_DEBUG "%s : [cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x req_size : 0x%x\n",
+ __func__, mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size, alloc_size);
+
+ if (err || (mem_info.free_size < alloc_size)) {
+ fimc_err("%s: get cma info failed\n", __func__);
+ ctrl->mem.size = 0;
+ ctrl->mem.base = 0;
+ return -ENOMEM;
+ } else {
+ ctrl->mem.size = alloc_size;
+ ctrl->mem.base = (dma_addr_t)cma_alloc
+ (ctrl->dev, ctrl->cma_name, (size_t) alloc_size, align);
+ }
+
+ ctrl->mem.curr = ctrl->mem.base;
+#endif
+ for (i = 0; i < cap->nr_bufs; i++) {
+ for (j = 0; j < plane; j++) {
+ cap->bufs[i].length[j] = plane_length[j];
+ fimc_dma_alloc(ctrl, &cap->bufs[i], j, align);
+
+ if (!cap->bufs[i].base[j])
+ goto err_alloc;
+ }
+ if (use_paddingbuf) {
+ cap->bufs[i].length[plane] = plane_length[plane];
+ fimc_dma_alloc(ctrl, &cap->bufs[i], plane, align);
+
+ cap->bufs[i].vaddr_pktdata = phys_to_virt(cap->bufs[i].base[plane]);
+ /* printk(KERN_INFO "pktdata address = 0x%x, 0x%x\n"
+ ,cap->bufs[i].base[1], cap->bufs[i].vaddr_pktdata ); */
+
+ if (!cap->bufs[i].base[plane])
+ goto err_alloc;
+ }
+ cap->bufs[i].state = VIDEOBUF_PREPARED;
+ }
+
+ return 0;
+
+err_alloc:
+ for (i = 0; i < cap->nr_bufs; i++) {
+ for (j = 0; j < plane; j++) {
+ if (cap->bufs[i].base[j])
+ fimc_dma_free(ctrl, &cap->bufs[i], j);
+ }
+ if (use_paddingbuf) {
+ if (cap->bufs[i].base[plane])
+ fimc_dma_free(ctrl, &cap->bufs[i], plane);
+ }
+ memset(&cap->bufs[i], 0, sizeof(cap->bufs[i]));
+ }
+
+ return -ENOMEM;
+}
+
+static void fimc_free_buffers(struct fimc_control *ctrl)
+{
+ struct fimc_capinfo *cap;
+ int i;
+
+ if (ctrl && ctrl->cap)
+ cap = ctrl->cap;
+ else
+ return;
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ memset(&cap->bufs[i], 0, sizeof(cap->bufs[i]));
+ cap->bufs[i].state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ ctrl->mem.curr = ctrl->mem.base;
+}
+
+int fimc_reqbufs_capture_mmap(void *fh, struct v4l2_requestbuffers *b)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+#endif
+ int ret = 0, i;
+ int bpp = 0;
+ int size = 0;
+
+ if (!cap) {
+ fimc_err("%s: no capture device info\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ /* A count value of zero frees all buffers */
+ if ((b->count == 0) || (b->count >= FIMC_CAPBUFS)) {
+ /* aborting or finishing any DMA in progress */
+ if (ctrl->status == FIMC_STREAMON)
+ fimc_streamoff_capture(fh);
+ for (i = 0; i < FIMC_CAPBUFS; i++) {
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 0);
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 1);
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 2);
+ }
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ if (ctrl->mem.base) {
+ cma_free(ctrl->mem.base);
+ ctrl->mem.base = 0;
+ ctrl->mem.size = 0;
+ }
+#endif
+
+ mutex_unlock(&ctrl->v4l2_lock);
+ return 0;
+ }
+ /* free previous buffers */
+ if ((cap->nr_bufs >= 0) && (cap->nr_bufs < FIMC_CAPBUFS)) {
+ fimc_info1("%s : remained previous buffer count is %d\n", __func__,
+ cap->nr_bufs);
+ for (i = 0; i < cap->nr_bufs; i++) {
+ fimc_dma_free(ctrl, &cap->bufs[i], 0);
+ fimc_dma_free(ctrl, &cap->bufs[i], 1);
+ fimc_dma_free(ctrl, &cap->bufs[i], 2);
+ fimc_dma_free(ctrl, &cap->bufs[i], 3);
+ }
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ if (ctrl->mem.base) {
+ cma_free(ctrl->mem.base);
+ ctrl->mem.base = 0;
+ ctrl->mem.size = 0;
+ }
+#endif
+ }
+ fimc_free_buffers(ctrl);
+
+ cap->nr_bufs = b->count;
+ if (pdata->hw_ver >= 0x51) {
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_OFF) {
+ pm_runtime_get_sync(&pdev->dev);
+ }
+#endif
+ fimc_hw_reset_output_buf_sequence(ctrl);
+ for (i = 0; i < cap->nr_bufs; i++) {
+ fimc_hwset_output_buf_sequence(ctrl, i, 1);
+ cap->bufs[i].id = i;
+ cap->bufs[i].state = VIDEOBUF_NEEDS_INIT;
+
+ /* initialize list */
+ INIT_LIST_HEAD(&cap->bufs[i].list);
+ }
+ fimc_info1("%s: requested %d buffers\n", __func__, b->count);
+ fimc_info1("%s: sequence[%d]\n", __func__,
+ fimc_hwget_output_buf_sequence(ctrl));
+ INIT_LIST_HEAD(&cap->outgoing_q);
+ }
+ if (pdata->hw_ver < 0x51) {
+ INIT_LIST_HEAD(&cap->inq);
+ for (i = 0; i < cap->nr_bufs; i++) {
+ cap->bufs[i].id = i;
+ cap->bufs[i].state = VIDEOBUF_NEEDS_INIT;
+
+ /* initialize list */
+ INIT_LIST_HEAD(&cap->bufs[i].list);
+ }
+ }
+
+ if (cap->pktdata_enable)
+ cap->pktdata_size = 0x1000;
+
+ bpp = fimc_fmt_depth(ctrl, &cap->fmt);
+
+ switch (cap->fmt.pixelformat) {
+ case V4L2_PIX_FMT_RGB32: /* fall through */
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61: /* fall through */
+ fimc_info1("%s : 1plane\n", __func__);
+ ret = fimc_alloc_buffers(ctrl, 1,
+ cap->fmt.width * cap->fmt.height, SZ_4K, bpp, cap->pktdata_enable, cap->pktdata_size);
+ break;
+
+ case V4L2_PIX_FMT_NV21:
+ fimc_info1("%s : 2plane for NV21 w %d h %d\n", __func__,
+ cap->fmt.width, cap->fmt.height);
+ ret = fimc_alloc_buffers(ctrl, 2,
+ cap->fmt.width * cap->fmt.height, 0, bpp, cap->pktdata_enable, cap->pktdata_size);
+ break;
+
+ case V4L2_PIX_FMT_NV12:
+ fimc_info1("%s : 2plane for NV12\n", __func__);
+ ret = fimc_alloc_buffers(ctrl, 2,
+ cap->fmt.width * cap->fmt.height, SZ_64K, bpp, cap->pktdata_enable, cap->pktdata_size);
+ break;
+
+ case V4L2_PIX_FMT_NV12T:
+ fimc_info1("%s : 2plane for NV12T\n", __func__);
+ ret = fimc_alloc_buffers(ctrl, 2,
+ ALIGN(cap->fmt.width, 128) * ALIGN(cap->fmt.height, 32),
+ SZ_64K, bpp, cap->pktdata_enable, cap->pktdata_size);
+ break;
+
+ case V4L2_PIX_FMT_YUV422P: /* fall through */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ fimc_info1("%s : 3plane\n", __func__);
+ ret = fimc_alloc_buffers(ctrl, 3,
+ cap->fmt.width * cap->fmt.height, 0, bpp, cap->pktdata_enable, cap->pktdata_size);
+ break;
+
+ case V4L2_PIX_FMT_JPEG:
+ fimc_info1("%s : JPEG 1plane\n", __func__);
+ size = fimc_camera_get_jpeg_memsize(ctrl);
+ fimc_info2("%s : JPEG 1plane size = %x\n", __func__, size);
+ ret = fimc_alloc_buffers(ctrl, 1, size, 0, 8, cap->pktdata_enable, cap->pktdata_size);
+ break;
+ case V4L2_PIX_FMT_INTERLEAVED:
+ fimc_info1("%s : Interleaved Format\n", __func__);
+ size = fimc_camera_get_jpeg_memsize(ctrl); /*0xA00000*/
+ fimc_info2("%s : Interleaved size = %x\n", __func__, size);
+ ret = fimc_alloc_buffers(ctrl, 1, size, 0, 8, cap->pktdata_enable, cap->pktdata_size);
+ break;
+ default:
+ break;
+ }
+
+ if (ret) {
+ fimc_err("%s: no memory for capture buffer\n", __func__);
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_reqbufs_capture_userptr(void *fh, struct v4l2_requestbuffers *b)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+#endif
+ int i;
+
+ if (!cap) {
+ fimc_err("%s: no capture device info\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ /* A count value of zero frees all buffers */
+ if ((b->count == 0) || (b->count >= FIMC_CAPBUFS)) {
+ /* aborting or finishing any DMA in progress */
+ if (ctrl->status == FIMC_STREAMON)
+ fimc_streamoff_capture(fh);
+
+ fimc_free_buffers(ctrl);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+ return 0;
+ }
+
+ /* free previous buffers */
+ if ((cap->nr_bufs >= 0) && (cap->nr_bufs < FIMC_CAPBUFS)) {
+ fimc_info1("%s: prev buf cnt(%d)\n", __func__, cap->nr_bufs);
+ fimc_free_buffers(ctrl);
+ }
+
+ cap->nr_bufs = b->count;
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_OFF) {
+ pm_runtime_get_sync(&pdev->dev);
+ }
+#endif
+ fimc_hw_reset_output_buf_sequence(ctrl);
+ for (i = 0; i < cap->nr_bufs; i++) {
+ fimc_hwset_output_buf_sequence(ctrl, i, 1);
+ cap->bufs[i].id = i;
+ cap->bufs[i].state = VIDEOBUF_IDLE;
+
+ /* initialize list */
+ INIT_LIST_HEAD(&cap->bufs[i].list);
+ }
+ fimc_info1("%s: requested %d buffers\n", __func__, b->count);
+ fimc_info1("%s: sequence[%d]\n", __func__,
+ fimc_hwget_output_buf_sequence(ctrl));
+ INIT_LIST_HEAD(&cap->outgoing_q);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_reqbufs_capture(void *fh, struct v4l2_requestbuffers *b)
+{
+ int ret = 0;
+
+ if (b->memory == V4L2_MEMORY_MMAP)
+ ret = fimc_reqbufs_capture_mmap(fh, b);
+ else
+ ret = fimc_reqbufs_capture_userptr(fh, b);
+
+ return ret;
+}
+
+int fimc_querybuf_capture(void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+
+ if (ctrl->status != FIMC_STREAMOFF) {
+ fimc_err("fimc is running\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ switch (cap->fmt.pixelformat) {
+ case V4L2_PIX_FMT_JPEG: /* fall through */
+ case V4L2_PIX_FMT_RGB32: /* fall through */
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_INTERLEAVED:
+ b->length = cap->bufs[b->index].length[0];
+ break;
+
+ case V4L2_PIX_FMT_NV21:
+ b->length = ctrl->cap->bufs[b->index].length[0]
+ + ctrl->cap->bufs[b->index].length[1];
+ break;
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ b->length = ALIGN(ctrl->cap->bufs[b->index].length[0], SZ_64K)
+ + ALIGN(ctrl->cap->bufs[b->index].length[1], SZ_64K);
+ break;
+ case V4L2_PIX_FMT_YUV422P: /* fall through */
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420:
+ b->length = ctrl->cap->bufs[b->index].length[0]
+ + ctrl->cap->bufs[b->index].length[1]
+ + ctrl->cap->bufs[b->index].length[2];
+ break;
+
+ default:
+ b->length = cap->bufs[b->index].length[0];
+ break;
+ }
+
+ if (cap->pktdata_enable)
+ b->length += ctrl->cap->bufs[b->index].length[cap->pktdata_plane];
+
+ b->m.offset = b->index * PAGE_SIZE;
+ /* memory field should filled V4L2_MEMORY_MMAP */
+ b->memory = V4L2_MEMORY_MMAP;
+
+ ctrl->cap->bufs[b->index].state = VIDEOBUF_IDLE;
+
+ fimc_dbg("%s: %d bytes with offset: %d\n",
+ __func__, b->length, b->m.offset);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_g_ctrl_capture(void *fh, struct v4l2_control *c)
+{
+ struct fimc_control *ctrl = fh;
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+
+ switch (c->id) {
+ case V4L2_CID_ROTATION:
+ c->value = ctrl->cap->rotate;
+ break;
+
+ case V4L2_CID_HFLIP:
+ c->value = (ctrl->cap->flip & FIMC_XFLIP) ? 1 : 0;
+ break;
+
+ case V4L2_CID_VFLIP:
+ c->value = (ctrl->cap->flip & FIMC_YFLIP) ? 1 : 0;
+ break;
+
+ case V4L2_CID_CACHEABLE:
+ c->value = ctrl->cap->cacheable;
+ break;
+
+ default:
+ /* get ctrl supported by subdev */
+ /* WriteBack doesn't have subdev_call */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B))
+ break;
+ if (ctrl->cam->sd)
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, g_ctrl, c);
+ if (ctrl->is.sd)
+ ret = v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, c);
+ break;
+ }
+
+ return ret;
+}
+
+int fimc_s_ctrl_capture(void *fh, struct v4l2_control *c)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_global *fimc = get_fimc_dev();
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cam || !ctrl->cap ||
+ ((ctrl->cam->id != CAMERA_WB && ctrl->cam->id != CAMERA_WB_B) &&
+ (!ctrl->cam->sd) && (!ctrl->is.sd))) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ switch (c->id) {
+#ifdef CONFIG_MACH_GC1
+ case V4L2_CID_CAM_UPDATE_FW:
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+
+ /* shutdown the MCLK */
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+
+ mdelay(5);
+ }
+
+ if ((clk_get_rate(ctrl->cam->clk)) && (fimc->mclk_status == CAM_MCLK_OFF)) {
+ clk_set_rate(ctrl->cam->clk, ctrl->cam->clk_rate);
+ clk_enable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_ON;
+ fimc_info1("clock for camera: %d\n", ctrl->cam->clk_rate);
+
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(1);
+ }
+
+ if (c->value == FW_MODE_UPDATE)
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, load_fw);
+
+ else
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, s_ctrl, c);
+ break;
+#endif
+ case V4L2_CID_CAMERA_RESET:
+ fimc_warn("ESD: reset the camera sensor\n");
+ if (ctrl->cam->initialized) {
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+
+ /* shutdown the MCLK */
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ ctrl->cam->initialized = 0;
+#ifdef CONFIG_MACH_P4NOTE
+ /* 100ms: increase delay.
+ * There are cases that sensor doesn't get revived
+ * inspite of doing power reset.*/
+ msleep(100);
+#else
+ msleep(5);
+#endif
+ }
+ if (ctrl->cam->sd) {
+ fimc_warn("ESD: init external sensor\n");
+ ret = fimc_init_camera(ctrl);
+ }
+ if (ctrl->is.sd && ctrl->cam->use_isp) {
+ fimc_warn("ESD: init FIMC-IS\n");
+ ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, 0);
+ if (ret < 0) {
+ fimc_err("ESD : FIMC-IS power off failed");
+ return -EINVAL;
+ }
+ ret = fimc_is_init_cam(ctrl);
+ if (ret < 0) {
+ fimc_err("ESD : FIMC-IS init clock failed");
+ return -EINVAL;
+ }
+ ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, 1);
+ if (ret < 0) {
+ fimc_err("ESD : FIMC-IS power on failed");
+ return -EINVAL;
+ }
+ ret = v4l2_subdev_call(ctrl->is.sd, core, load_fw);
+ if (ret < 0) {
+ fimc_dbg("ESD : FIMC-IS load FW failed");
+ return -EINVAL;
+ }
+ ret = v4l2_subdev_call(ctrl->is.sd, core,
+ init, ctrl->cam->sensor_index);
+ if (ret < 0) {
+ fimc_err("ESD : FIMC-IS init failed");
+ return -EINVAL;
+ }
+ }
+ break;
+ case V4L2_CID_ROTATION:
+ ctrl->cap->rotate = c->value;
+ break;
+
+ case V4L2_CID_HFLIP:
+ if (c->value)
+ ctrl->cap->flip |= FIMC_YFLIP;
+ else
+ ctrl->cap->flip &= ~FIMC_YFLIP;
+ break;
+
+ case V4L2_CID_VFLIP:
+ if (c->value)
+ ctrl->cap->flip |= FIMC_XFLIP;
+ else
+ ctrl->cap->flip &= ~FIMC_XFLIP;
+ break;
+
+ case V4L2_CID_PADDR_Y:
+ if (ctrl->cap->bufs[c->value].length[FIMC_ADDR_Y])
+ c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_Y];
+ break;
+
+ case V4L2_CID_PADDR_CB: /* fall through */
+ case V4L2_CID_PADDR_CBCR:
+ if (ctrl->cap->bufs[c->value].length[FIMC_ADDR_CB])
+ c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_CB];
+ break;
+
+ case V4L2_CID_PADDR_CR:
+ if (ctrl->cap->bufs[c->value].length[FIMC_ADDR_CR])
+ c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_CR];
+ break;
+ /* Implementation as per C100 FIMC driver */
+ case V4L2_CID_STREAM_PAUSE:
+ fimc_hwset_stop_processing(ctrl);
+ break;
+
+ case V4L2_CID_IMAGE_EFFECT_APPLY:
+ ctrl->fe.ie_on = c->value ? 1 : 0;
+ ctrl->fe.ie_after_sc = 0;
+ ret = fimc_hwset_image_effect(ctrl);
+ break;
+
+ case V4L2_CID_IMAGE_EFFECT_FN:
+ if (c->value < 0 || c->value > FIMC_EFFECT_FIN_SILHOUETTE)
+ return -EINVAL;
+ ctrl->fe.fin = c->value;
+ ret = 0;
+ break;
+
+ case V4L2_CID_IMAGE_EFFECT_CB:
+ ctrl->fe.pat_cb = c->value & 0xFF;
+ ret = 0;
+ break;
+
+ case V4L2_CID_IMAGE_EFFECT_CR:
+ ctrl->fe.pat_cr = c->value & 0xFF;
+ ret = 0;
+ break;
+
+ case V4L2_CID_IS_LOAD_FW:
+ if (ctrl->is.sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, c->value);
+ break;
+ case V4L2_CID_IS_RESET:
+ if (ctrl->is.sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->is.sd, core, reset, c->value);
+ break;
+ case V4L2_CID_IS_S_POWER:
+ if (ctrl->is.sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->is.sd, core, s_power, c->value);
+ break;
+ case V4L2_CID_IS_S_STREAM:
+ if (ctrl->is.sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->is.sd, video, s_stream, c->value);
+ break;
+ case V4L2_CID_CACHEABLE:
+ ctrl->cap->cacheable = c->value;
+ ret = 0;
+ break;
+
+ case V4L2_CID_EMBEDDEDDATA_ENABLE:
+ ctrl->cap->pktdata_enable = c->value;
+ ret = 0;
+ break;
+
+ case V4L2_CID_IS_ZOOM:
+ fimc_is_set_zoom(ctrl, c);
+ break;
+
+ case V4L2_CID_CAMERA_SENSOR_MODE:
+ ctrl->cap->movie_mode = c->value;
+ if (ctrl->cam->sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, s_ctrl, c);
+ break;
+
+ case V4L2_CID_CAMERA_VT_MODE:
+ ctrl->cap->vt_mode = c->value;
+ if (fimc_cam_use) {
+ if (ctrl->cam->sd)
+ ret = v4l2_subdev_call(ctrl->cam->sd,
+ core, s_ctrl, c);
+ if (ctrl->is.sd && ctrl->cam->use_isp)
+ ret = v4l2_subdev_call(ctrl->is.sd,
+ core, s_ctrl, c);
+ }
+ break;
+
+ case V4L2_CID_CAMERA_SENSOR_OUTPUT_SIZE:
+ ctrl->cap->sensor_output_width = (u32)c->value >> 16;
+ ctrl->cap->sensor_output_height = (u32)c->value & 0x0FFFF;
+ printk(KERN_DEBUG "sensor output size: %dx%d\n",
+ ctrl->cap->sensor_output_width,
+ ctrl->cap->sensor_output_height);
+ break;
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ case V4L2_CID_CAMERA_BUSFREQ_LOCK:
+ /* lock bus frequency */
+ dev_lock(ctrl->bus_dev, ctrl->dev, (unsigned long)c->value);
+ break;
+ case V4L2_CID_CAMERA_BUSFREQ_UNLOCK:
+ /* unlock bus frequency */
+ dev_unlock(ctrl->bus_dev, ctrl->dev);
+ break;
+#endif
+
+ case V4L2_CID_IS_CAMERA_FLASH_MODE:
+ case V4L2_CID_CAMERA_SCENE_MODE:
+ default:
+ /* try on subdev */
+ /* WriteBack doesn't have subdev_call */
+
+ if ((ctrl->cam->id == CAMERA_WB) || \
+ (ctrl->cam->id == CAMERA_WB_B))
+ break;
+ if (fimc_cam_use) {
+ if (ctrl->cam->sd)
+ ret = v4l2_subdev_call(ctrl->cam->sd,
+ core, s_ctrl, c);
+ if (ctrl->is.sd && ctrl->cam->use_isp)
+ ret = v4l2_subdev_call(ctrl->is.sd,
+ core, s_ctrl, c);
+ } else
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+int fimc_g_ext_ctrls_capture(void *fh, struct v4l2_ext_controls *c)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+ mutex_lock(&ctrl->v4l2_lock);
+
+ if (ctrl->cam->sd)
+ /* try on subdev */
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, g_ext_ctrls, c);
+ if (ctrl->is.sd)
+ /* try on subdev */
+ ret = v4l2_subdev_call(ctrl->is.sd, core, g_ext_ctrls, c);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+int fimc_s_ext_ctrls_capture(void *fh, struct v4l2_ext_controls *c)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+ mutex_lock(&ctrl->v4l2_lock);
+
+ if (ctrl->cam->sd)
+ /* try on subdev */
+ ret = v4l2_subdev_call(ctrl->cam->sd, core, s_ext_ctrls, c);
+ else if (ctrl->is.sd)
+ ret = v4l2_subdev_call(ctrl->is.sd, core, s_ext_ctrls, c);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+int fimc_cropcap_capture(void *fh, struct v4l2_cropcap *a)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_global *fimc = get_fimc_dev();
+ struct s3c_platform_fimc *pdata;
+
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cam || !ctrl->cam->sd || !ctrl->cap) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+ mutex_lock(&ctrl->v4l2_lock);
+
+ pdata = to_fimc_plat(ctrl->dev);
+ if (!ctrl->cam)
+ ctrl->cam = fimc->camera[pdata->default_cam];
+
+ if (!cap) {
+ cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+ if (!cap) {
+ fimc_err("%s: no memory for "
+ "capture device info\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* assign to ctrl */
+ ctrl->cap = cap;
+ }
+
+ /* crop limitations */
+ cap->cropcap.bounds.left = 0;
+ cap->cropcap.bounds.top = 0;
+ cap->cropcap.bounds.width = ctrl->cam->width;
+ cap->cropcap.bounds.height = ctrl->cam->height;
+
+ /* crop default values */
+ cap->cropcap.defrect.left = 0;
+ cap->cropcap.defrect.top = 0;
+ cap->cropcap.defrect.width = ctrl->cam->width;
+ cap->cropcap.defrect.height = ctrl->cam->height;
+
+ a->bounds = cap->cropcap.bounds;
+ a->defrect = cap->cropcap.defrect;
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_g_crop_capture(void *fh, struct v4l2_crop *a)
+{
+ struct fimc_control *ctrl = fh;
+
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cap) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+ a->c = ctrl->cap->crop;
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_s_crop_capture(void *fh, struct v4l2_crop *a)
+{
+ struct fimc_control *ctrl = fh;
+
+ fimc_dbg("%s\n", __func__);
+
+ mutex_lock(&ctrl->v4l2_lock);
+ ctrl->cap->crop = a->c;
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_start_capture(struct fimc_control *ctrl)
+{
+ fimc_dbg("%s\n", __func__);
+
+ fimc_reset_status_reg(ctrl);
+
+ if (!ctrl->sc.bypass)
+ fimc_hwset_start_scaler(ctrl);
+
+ fimc_hwset_enable_capture(ctrl, ctrl->sc.bypass);
+ fimc_hwset_disable_frame_end_irq(ctrl);
+
+ return 0;
+}
+
+int fimc_stop_capture(struct fimc_control *ctrl)
+{
+ fimc_dbg("%s\n", __func__);
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!ctrl->cap) {
+ fimc_err("%s: No cappure format.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ctrl->cap->lastirq) {
+ fimc_hwset_enable_lastirq(ctrl);
+ fimc_hwset_disable_capture(ctrl);
+ fimc_hwset_disable_lastirq(ctrl);
+ } else {
+ fimc_hwset_disable_capture(ctrl);
+ fimc_hwset_enable_frame_end_irq(ctrl);
+ }
+
+ fimc_hwset_stop_scaler(ctrl);
+
+ return 0;
+}
+
+static int fimc_check_capture_source(struct fimc_control *ctrl)
+{
+ if(!ctrl->cam)
+ return -ENODEV;
+
+ if (ctrl->cam->sd || ctrl->is.sd || !ctrl->flite_sd)
+ return 0;
+
+ if (ctrl->cam->id == CAMERA_WB || ctrl->cam->id == CAMERA_WB_B)
+ return 0;
+
+ return -ENODEV;
+}
+
+static int is_scale_up(struct fimc_control *ctrl)
+{
+ struct v4l2_mbus_framefmt *mbus_fmt = &ctrl->cap->mbus_fmt;
+ struct v4l2_pix_format *pix = &ctrl->cap->fmt;
+
+ if (!mbus_fmt->width) {
+ fimc_err("%s: sensor resolution isn't selected.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ctrl->cap->rotate == 90 || ctrl->cap->rotate == 270) {
+ if (pix->width > mbus_fmt->height ||
+ pix->height > mbus_fmt->width) {
+ fimc_err("%s: ScaleUp isn't supported.\n", __func__);
+ return -EINVAL;
+ }
+ } else {
+ if (pix->width > mbus_fmt->width ||
+ pix->height > mbus_fmt->height) {
+ fimc_err("%s: ScaleUp isn't supported.\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int fimc_streamon_capture(void *fh)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct v4l2_frmsizeenum cam_frmsize;
+ struct v4l2_control is_ctrl;
+ void __iomem *qos_regs;
+
+ int rot = 0, i;
+ int ret = 0;
+ struct s3c_platform_camera *cam = NULL;
+
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ printk(KERN_INFO "%s++ fimc%d\n", __func__, ctrl->id);
+ cam_frmsize.discrete.width = 0;
+ cam_frmsize.discrete.height = 0;
+ is_ctrl.id = 0;
+ is_ctrl.value = 0;
+
+ if (!ctrl->cam) {
+ fimc_err("%s: ctrl->cam is null\n", __func__);
+ return -EINVAL;
+ } else {
+ cam = ctrl->cam;
+ }
+
+ if (fimc_check_capture_source(ctrl)) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (cam->sd) {
+ if (is_scale_up(ctrl))
+ return -EINVAL;
+ }
+
+ if (pdata->hw_ver < 0x51)
+ fimc_hw_reset_camera(ctrl);
+#if (!defined(CONFIG_EXYNOS_DEV_PD) && !defined(CONFIG_PM_RUNTIME))
+ ctrl->status = FIMC_READY_ON;
+#endif
+ cap->irq = 0;
+
+ fimc_hwset_enable_irq(ctrl, 0, 1);
+
+ if ((cam->id != CAMERA_WB) && (cam->id != CAMERA_WB_B)) {
+ if (fimc_cam_use && cam->sd) {
+ ret = v4l2_subdev_call(cam->sd, video, enum_framesizes,
+ &cam_frmsize);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "%s: enum_framesizes failed\n",
+ __func__);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+ } else {
+ if (cam_frmsize.discrete.width > 0
+ && cam_frmsize.discrete.height > 0) {
+ cam->window.left = 0;
+ cam->window.top = 0;
+ cam->width = cam->window.width
+ = cam_frmsize.discrete.width;
+ cam->height
+ = cam->window.height
+ = cam_frmsize.discrete.height;
+ printk(KERN_INFO "%s cam real size width = %d,"
+ "height = %d\n",__func__, ctrl->cam->width,
+ ctrl->cam->height);
+ }
+ }
+
+#ifdef CONFIG_MACH_P4NOTE
+#ifdef CONFIG_VIDEO_IMPROVE_STREAMOFF
+ v4l2_subdev_call(cam->sd, video, s_stream,
+ STREAM_MODE_WAIT_OFF);
+#endif /* CONFIG_VIDEO_IMPROVE_STREAMOFF */
+#else /* !CONFIG_MACH_P4NOTE */
+ if (cap->fmt.priv == V4L2_PIX_FMT_MODE_CAPTURE) {
+ ret = v4l2_subdev_call(cam->sd, video, s_stream, 1);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "%s: s_stream failed\n",
+ __func__);
+ return ret;
+ }
+ }
+#endif
+ if (cam->type == CAM_TYPE_MIPI) {
+ if (cam->id == CAMERA_CSI_C) {
+ s3c_csis_enable_pktdata(CSI_CH_0, cap->pktdata_enable);
+ s3c_csis_start(CSI_CH_0, cam->mipi_lanes,
+ cam->mipi_settle, cam->mipi_align,
+ cam->width, cam->height,
+ cap->fmt.pixelformat);
+ } else {
+ s3c_csis_enable_pktdata(CSI_CH_1, cap->pktdata_enable);
+ s3c_csis_start(CSI_CH_1, cam->mipi_lanes,
+ cam->mipi_settle, cam->mipi_align,
+ cam->width, cam->height,
+ cap->fmt.pixelformat);
+ }
+ }
+#ifdef CONFIG_MACH_P4NOTE
+ if (1) {
+#else
+ if (cap->fmt.priv != V4L2_PIX_FMT_MODE_CAPTURE) {
+#endif
+ ret = v4l2_subdev_call(cam->sd, video, s_stream, 1);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "%s: s_stream failed\n",
+ __func__);
+ if (cam->id == CAMERA_CSI_C)
+ s3c_csis_stop(CSI_CH_0);
+ else
+ s3c_csis_stop(CSI_CH_1);
+
+ return ret;
+ }
+ }
+ }
+ }
+ /* Set FIMD to write back */
+ if ((cam->id == CAMERA_WB) || (cam->id == CAMERA_WB_B)) {
+ if (cam->id == CAMERA_WB)
+ fimc_hwset_sysreg_camblk_fimd0_wb(ctrl);
+ else
+ fimc_hwset_sysreg_camblk_fimd1_wb(ctrl);
+
+ ret = s3cfb_direct_ioctl(0, S3CFB_SET_WRITEBACK, 1);
+ if (ret) {
+ fimc_err("failed set writeback\n");
+ return ret;
+ }
+
+ }
+
+ if (ctrl->is.sd && fimc_cam_use) {
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+ struct clk *pxl_async = NULL;
+ is_ctrl.id = V4L2_CID_IS_GET_SENSOR_OFFSET_X;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl);
+ ctrl->is.offset_x = is_ctrl.value;
+ is_ctrl.id = V4L2_CID_IS_GET_SENSOR_OFFSET_Y;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl);
+ ctrl->is.offset_y = is_ctrl.value;
+ fimc_dbg("CSI setting width = %d, height = %d\n",
+ ctrl->is.fmt.width + ctrl->is.offset_x,
+ ctrl->is.fmt.height + ctrl->is.offset_y);
+
+ if (ctrl->flite_sd && fimc_cam_use) {
+ ctrl->is.mbus_fmt.width += ctrl->is.offset_x;
+ ctrl->is.mbus_fmt.height += ctrl->is.offset_y;
+ ret = v4l2_subdev_call(ctrl->flite_sd, video,
+ s_mbus_fmt, &ctrl->is.mbus_fmt);
+ }
+
+ if (cam->id == CAMERA_CSI_C)
+ s3c_csis_start(CSI_CH_0, cam->mipi_lanes,
+ cam->mipi_settle, cam->mipi_align,
+ ctrl->is.fmt.width + ctrl->is.offset_x,
+ ctrl->is.fmt.height + ctrl->is.offset_y,
+ V4L2_PIX_FMT_SGRBG10);
+ else if (cam->id == CAMERA_CSI_D)
+ s3c_csis_start(CSI_CH_1, cam->mipi_lanes,
+ cam->mipi_settle, cam->mipi_align,
+ ctrl->is.fmt.width + ctrl->is.offset_x,
+ ctrl->is.fmt.height + ctrl->is.offset_y,
+ V4L2_PIX_FMT_SGRBG10);
+
+ pxl_async = clk_get(&pdev->dev, "pxl_async1");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_enable(pxl_async);
+ clk_put(pxl_async);
+ fimc_hwset_sysreg_camblk_isp_wb(ctrl);
+ }
+
+ if (ctrl->flite_sd && fimc_cam_use)
+ v4l2_subdev_call(ctrl->flite_sd, video, s_stream, 1);
+
+ if (!ctrl->is.sd && cap->movie_mode &&
+ !((cam->width == 880 && cam->height == 720))) {
+ printk(KERN_INFO "\n\n\n%s pm_qos_req is called..\n", __func__ );
+ dev_lock(ctrl->bus_dev, ctrl->dev, (unsigned long)400200);
+ pm_qos_add_request(&bus_qos_pm_qos_req, PM_QOS_BUS_QOS, 1);
+
+ /* ioremap for register block */
+ qos_regs = ioremap(0x11a00400, 0x10);
+ if (!qos_regs) {
+ fimc_err("%s: failed to remap io region\n", __func__);
+ return -1;
+ }
+ writel(0x3, qos_regs + 0x0);
+ writel(0x1, qos_regs + 0x4);
+ fimc_err("0x11a00400 = 0x%x , 0x11a00404 = 0x%x \n", readl(qos_regs + 0), readl(qos_regs + 4));
+
+ iounmap(qos_regs);
+ }
+
+ fimc_hwset_camera_type(ctrl);
+ fimc_hwset_camera_polarity(ctrl);
+ fimc_hwset_enable_lastend(ctrl);
+
+ if (cap->fmt.pixelformat != V4L2_PIX_FMT_JPEG &&
+ cap->fmt.pixelformat != V4L2_PIX_FMT_INTERLEAVED) {
+ fimc_hwset_camera_source(ctrl);
+ fimc_hwset_camera_offset(ctrl);
+
+ fimc_capture_scaler_info(ctrl);
+ fimc_hwset_prescaler(ctrl, &ctrl->sc);
+ fimc_hwset_scaler(ctrl, &ctrl->sc);
+ fimc_hwset_output_colorspace(ctrl, cap->fmt.pixelformat);
+ fimc_hwset_output_addr_style(ctrl, cap->fmt.pixelformat);
+
+ if (cap->fmt.pixelformat == V4L2_PIX_FMT_RGB32 ||
+ cap->fmt.pixelformat == V4L2_PIX_FMT_RGB565)
+ fimc_hwset_output_rgb(ctrl, cap->fmt.pixelformat);
+ else
+ fimc_hwset_output_yuv(ctrl, cap->fmt.pixelformat);
+
+ fimc_hwset_output_area(ctrl, cap->fmt.width, cap->fmt.height);
+ fimc_hwset_output_scan(ctrl, &cap->fmt);
+
+ fimc_hwset_output_rot_flip(ctrl, cap->rotate, cap->flip);
+ rot = fimc_mapping_rot_flip(cap->rotate, cap->flip);
+
+ if (rot & FIMC_ROT) {
+ fimc_hwset_org_output_size(ctrl, cap->fmt.width,
+ cap->fmt.height);
+ fimc_hwset_output_size(ctrl, cap->fmt.height,
+ cap->fmt.width);
+ } else {
+ fimc_hwset_org_output_size(ctrl, cap->fmt.width,
+ cap->fmt.height);
+ fimc_hwset_output_size(ctrl, cap->fmt.width,
+ cap->fmt.height);
+ }
+
+ fimc_hwset_jpeg_mode(ctrl, false);
+ } else {
+ fimc_hwset_output_size(ctrl,
+ cap->fmt.width, cap->fmt.height);
+ if (rot & FIMC_ROT)
+ fimc_hwset_org_output_size(ctrl,
+ cap->fmt.height, cap->fmt.width);
+ else
+ fimc_hwset_org_output_size(ctrl,
+ cap->fmt.width, cap->fmt.height);
+ if (cap->fmt.pixelformat == V4L2_PIX_FMT_JPEG)
+ fimc_hwset_output_area_size(ctrl,
+ fimc_camera_get_jpeg_memsize(ctrl));
+ else if (cap->fmt.pixelformat == V4L2_PIX_FMT_INTERLEAVED)
+ fimc_hwset_output_area_size(ctrl,
+ 0xc00000);
+ fimc_hwset_jpeg_mode(ctrl, true);
+ }
+
+ if (pdata->hw_ver >= 0x51) {
+ for (i = 0; i < cap->nr_bufs; i++)
+ fimc_hwset_output_address(ctrl, &cap->bufs[i], i);
+ } else {
+ for (i = 0; i < FIMC_PINGPONG; i++)
+ fimc_add_outqueue(ctrl, i);
+ }
+
+ if (cap->fmt.pixelformat == V4L2_PIX_FMT_JPEG ||
+ cap->fmt.pixelformat == V4L2_PIX_FMT_INTERLEAVED) {
+ fimc_hwset_scaler_bypass(ctrl);
+ }
+
+ ctrl->cap->cnt = 0;
+ fimc_start_capture(ctrl);
+ ctrl->status = FIMC_STREAMON;
+
+ if (ctrl->is.sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->is.sd, video, s_stream, 1);
+ printk(KERN_INFO "%s-- fimc%d\n", __func__, ctrl->id);
+
+ /* if available buffer did not remained */
+ return 0;
+}
+
+int fimc_streamoff_capture(void *fh)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int ret = 0;
+ void __iomem *qos_regs;
+
+ printk(KERN_INFO "%s++ fimc%d\n", __func__, ctrl->id);
+
+ if (ctrl->status == FIMC_STREAMOFF) {
+ fimc_err("%s: fimc%d already stopped.\n", __func__, ctrl->id);
+ return -ENODEV;
+ }
+
+ if (fimc_check_capture_source(ctrl)) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ ctrl->status = FIMC_READY_OFF;
+
+ fimc_stop_capture(ctrl);
+#ifdef CONFIG_VIDEO_IMPROVE_STREAMOFF
+ if ((get_fimc_dev()->active_camera == 0) &&
+ fimc_cam_use && ctrl->cam->sd)
+ v4l2_subdev_call(ctrl->cam->sd, video, s_stream, 0);
+#endif
+
+ /* wait for stop hardware */
+ fimc_wait_disable_capture(ctrl);
+
+ fimc_hwset_disable_irq(ctrl);
+ if (pdata->hw_ver < 0x51)
+ INIT_LIST_HEAD(&cap->inq);
+
+ ctrl->status = FIMC_STREAMOFF;
+ if (fimc_cam_use) {
+ if (ctrl->is.sd)
+ v4l2_subdev_call(ctrl->is.sd, video, s_stream, 0);
+
+ if (ctrl->flite_sd)
+ v4l2_subdev_call(ctrl->flite_sd, video, s_stream, 0);
+
+ if (ctrl->cam->type == CAM_TYPE_MIPI) {
+ if (ctrl->cam->id == CAMERA_CSI_C)
+ s3c_csis_stop(CSI_CH_0);
+ else
+ s3c_csis_stop(CSI_CH_1);
+ }
+ fimc_hwset_reset(ctrl);
+
+#ifdef CONFIG_VIDEO_IMPROVE_STREAMOFF
+ if (ctrl->cam->sd && (get_fimc_dev()->active_camera != 0))
+#else
+ if (ctrl->cam->sd)
+#endif
+ v4l2_subdev_call(ctrl->cam->sd, video, s_stream, 0);
+ } else {
+ fimc_hwset_reset(ctrl);
+ }
+
+ if (!ctrl->is.sd && cap->movie_mode &&
+ !(ctrl->cam->width == 880 && ctrl->cam->height == 720)) {
+ printk(KERN_INFO "\n\n\n%s pm_qos_req is removed..\n", __func__ );
+ pm_qos_remove_request(&bus_qos_pm_qos_req);
+ dev_unlock(ctrl->bus_dev, ctrl->dev);
+
+ /* ioremap for register block */
+ qos_regs = ioremap(0x11a00400, 0x10);
+ if (!qos_regs) {
+ fimc_err("%s: failed to remap io region\n", __func__);
+ return -1;
+ }
+ writel(0x0, qos_regs + 0x0);
+ writel(0x0, qos_regs + 0x4);
+ fimc_err("0x11a00400 = 0x%x , 0x11a00404 = 0x%x \n", readl(qos_regs + 0), readl(qos_regs + 4));
+
+ iounmap(qos_regs);
+ }
+
+ /* Set FIMD to write back */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) {
+ ret = s3cfb_direct_ioctl(0, S3CFB_SET_WRITEBACK, 0);
+ if (ret) {
+ fimc_err("failed set writeback\n");
+ return ret;
+ }
+ }
+ /* disable camera power */
+ /* cam power off should call in the subdev release function */
+ if (fimc_cam_use) {
+ if (ctrl->cam->reset_camera) {
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+ if (ctrl->power_status != FIMC_POWER_SUSPEND)
+ ctrl->cam->initialized = 0;
+ }
+ }
+ printk(KERN_INFO "%s-- fimc%d\n", __func__, ctrl->id);
+ return 0;
+}
+
+int fimc_is_set_zoom(struct fimc_control *ctrl, struct v4l2_control *c)
+{
+ struct v4l2_control is_ctrl;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ struct s3c_platform_camera *cam = NULL;
+ int ret = 0;
+
+ is_ctrl.id = 0;
+ is_ctrl.value = 0;
+
+ if (ctrl->cam)
+ cam = ctrl->cam;
+ else
+ return -ENODEV;
+
+ /* 0. Check zoom width and height */
+ if (!c->value) {
+ ctrl->is.zoom_in_width = ctrl->is.fmt.width;
+ ctrl->is.zoom_in_height = ctrl->is.fmt.height;
+ } else {
+ ctrl->is.zoom_in_width = ctrl->is.fmt.width - (16 * c->value);
+ ctrl->is.zoom_in_height =
+ (ctrl->is.zoom_in_width * ctrl->is.fmt.height)
+ / ctrl->is.fmt.width;
+ /* bayer crop contraint */
+ switch (ctrl->is.zoom_in_height%4) {
+ case 1:
+ ctrl->is.zoom_in_height--;
+ break;
+ case 2:
+ ctrl->is.zoom_in_height += 2;
+ break;
+ case 3:
+ ctrl->is.zoom_in_height++;
+ break;
+ }
+ if ((ctrl->is.zoom_in_width < (ctrl->is.fmt.width/4))
+ || (ctrl->is.zoom_in_height < (ctrl->is.fmt.height/4))) {
+ ctrl->is.zoom_in_width = ctrl->is.fmt.width/4;
+ ctrl->is.zoom_in_height = ctrl->is.fmt.height/4;
+ }
+ }
+ /* 1. fimc stop */
+ fimc_stop_zoom_capture(ctrl);
+ /* 2. Set zoom and calculate new width and height */
+ if (ctrl->is.sd && fimc_cam_use) {
+ ret = v4l2_subdev_call(ctrl->is.sd, core, s_ctrl, c);
+ /* 2. Set zoom */
+ is_ctrl.id = V4L2_CID_IS_ZOOM_STATE;
+ is_ctrl.value = 0;
+ while (!is_ctrl.value) {
+ v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl);
+ fimc_dbg("V4L2_CID_IS_ZOOM_STATE - %d", is_ctrl.value);
+ }
+ }
+ /* 2. Change soruce size of FIMC */
+ fimc_hwset_camera_change_source(ctrl);
+ fimc_capture_change_scaler_info(ctrl);
+ fimc_hwset_prescaler(ctrl, &ctrl->sc);
+ fimc_hwset_scaler(ctrl, &ctrl->sc);
+ /* 4. Start FIMC */
+ fimc_start_zoom_capture(ctrl);
+ /* 5. FIMC-IS stream on */
+ if (ctrl->is.sd && fimc_cam_use)
+ ret = v4l2_subdev_call(ctrl->is.sd, video, s_stream, 1);
+
+ return 0;
+}
+
+static void fimc_buf2bs(struct fimc_buf_set *bs, struct fimc_buf *buf)
+{
+ bs->base[FIMC_ADDR_Y] = buf->base[FIMC_ADDR_Y];
+ bs->length[FIMC_ADDR_Y] = buf->length[FIMC_ADDR_Y];
+
+ bs->base[FIMC_ADDR_CB] = buf->base[FIMC_ADDR_CB];
+ bs->length[FIMC_ADDR_CB] = buf->length[FIMC_ADDR_CB];
+
+ bs->base[FIMC_ADDR_CR] = buf->base[FIMC_ADDR_CR];
+ bs->length[FIMC_ADDR_CR] = buf->length[FIMC_ADDR_CR];
+}
+
+int fimc_qbuf_capture(void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_buf *buf = (struct fimc_buf *)b->m.userptr;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ struct fimc_capinfo *cap = ctrl->cap;
+ int idx = b->index;
+ int framecnt_seq;
+ int available_bufnum;
+ size_t length = 0;
+ int i;
+
+ if (!cap || !ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+ if (pdata->hw_ver >= 0x51) {
+ if (cap->bufs[idx].state != VIDEOBUF_IDLE) {
+ fimc_err("%s: invalid state idx : %d\n", __func__, idx);
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -EINVAL;
+ } else {
+ if (b->memory == V4L2_MEMORY_USERPTR) {
+ fimc_buf2bs(&cap->bufs[idx], buf);
+ fimc_hwset_output_address(ctrl, &cap->bufs[idx], idx);
+ }
+
+ fimc_hwset_output_buf_sequence(ctrl, idx, FIMC_FRAMECNT_SEQ_ENABLE);
+ cap->bufs[idx].state = VIDEOBUF_QUEUED;
+ if (ctrl->status == FIMC_BUFFER_STOP) {
+ framecnt_seq = fimc_hwget_output_buf_sequence(ctrl);
+ available_bufnum =
+ fimc_hwget_number_of_bits(framecnt_seq);
+ if (available_bufnum >= 2) {
+ printk(KERN_INFO "fimc_qbuf_capture start again\n");
+ cap->cnt = 0;
+ fimc_start_capture(ctrl);
+ ctrl->status = FIMC_STREAMON;
+ ctrl->restart = true;
+ }
+ }
+ }
+ } else {
+ fimc_add_inqueue(ctrl, b->index);
+ }
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ if (!cap->cacheable)
+ return 0;
+
+ for (i = 0; i < 3; i++) {
+ if (cap->bufs[b->index].base[i])
+ length += cap->bufs[b->index].length[i];
+ else
+ break;
+ }
+
+ if (length > (unsigned long) L2_FLUSH_ALL) {
+ flush_cache_all(); /* L1 */
+ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
+ outer_flush_all(); /* L2 */
+ } else if (length > (unsigned long) L1_FLUSH_ALL) {
+ flush_cache_all(); /* L1 */
+ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
+
+ for (i = 0; i < 3; i++) {
+ phys_addr_t start = cap->bufs[b->index].base[i];
+ phys_addr_t end = cap->bufs[b->index].base[i] +
+ cap->bufs[b->index].length[i] - 1;
+
+ if (!start)
+ break;
+
+ outer_flush_range(start, end); /* L2 */
+ }
+ } else {
+ for (i = 0; i < 3; i++) {
+ phys_addr_t start = cap->bufs[b->index].base[i];
+ phys_addr_t end = cap->bufs[b->index].base[i] +
+ cap->bufs[b->index].length[i] - 1;
+
+ if (!start)
+ break;
+
+ dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
+ outer_flush_range(start, end); /* L2 */
+ }
+ }
+
+ return 0;
+}
+
+static void fimc_bs2buf(struct fimc_buf *buf, struct fimc_buf_set *bs)
+{
+ buf->base[FIMC_ADDR_Y] = bs->base[FIMC_ADDR_Y];
+ buf->length[FIMC_ADDR_Y] = bs->length[FIMC_ADDR_Y];
+
+ buf->base[FIMC_ADDR_CB] = bs->base[FIMC_ADDR_CB];
+ buf->length[FIMC_ADDR_CB] = bs->length[FIMC_ADDR_CB];
+
+ buf->base[FIMC_ADDR_CR] = bs->base[FIMC_ADDR_CR];
+ buf->length[FIMC_ADDR_CR] = bs->length[FIMC_ADDR_CR];
+}
+
+int fimc_dqbuf_capture(void *fh, struct v4l2_buffer *b)
+{
+ unsigned long spin_flags;
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_buf_set *bs;
+ struct fimc_buf *buf = (struct fimc_buf *)b->m.userptr;
+ size_t length = 0;
+ int i, pp, ret = 0;
+ phys_addr_t start, end;
+
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ if (!cap || !ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pdata->hw_ver >= 0x51) {
+ spin_lock_irqsave(&ctrl->outq_lock, spin_flags);
+
+ if (list_empty(&cap->outgoing_q)) {
+ fimc_info2("%s: outgoing_q is empty\n", __func__);
+ spin_unlock_irqrestore(&ctrl->outq_lock, spin_flags);
+ return -EAGAIN;
+ } else {
+ bs = list_first_entry(&cap->outgoing_q, struct fimc_buf_set,
+ list);
+ fimc_info2("%s[%d]: bs->id : %d\n", __func__, ctrl->id, bs->id);
+ b->index = bs->id;
+ bs->state = VIDEOBUF_IDLE;
+
+ if (b->memory == V4L2_MEMORY_USERPTR)
+ fimc_bs2buf(buf, bs);
+
+ list_del(&bs->list);
+ }
+
+ spin_unlock_irqrestore(&ctrl->outq_lock, spin_flags);
+ } else {
+ pp = ((fimc_hwget_frame_count(ctrl) + 2) % 4);
+ if (cap->fmt.field == V4L2_FIELD_INTERLACED_TB)
+ pp &= ~0x1;
+ b->index = cap->outq[pp];
+ fimc_info2("%s: buffer(%d) outq[%d]\n", __func__, b->index, pp);
+ ret = fimc_add_outqueue(ctrl, pp);
+ if (ret) {
+ b->index = -1;
+ fimc_err("%s: no inqueue buffer\n", __func__);
+ }
+ }
+
+ if (cap->pktdata_enable) {
+ flush_cache_all(); /* L1 */
+ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
+
+ start = cap->bufs[b->index].base[cap->pktdata_plane];
+ end = cap->bufs[b->index].base[cap->pktdata_plane] +
+ cap->bufs[b->index].length[cap->pktdata_plane] - 1;
+ fimc_info2("fimc_dqbuf interleaved mode cache flush... start 0x%x, size 0x%x\n",
+ start, cap->bufs[b->index].length[cap->pktdata_plane] );
+
+ outer_flush_range(start, end); /* L2 */
+ }
+
+ if (!cap->cacheable)
+ return ret;
+
+ for (i = 0; i < 3; i++) {
+ if (cap->bufs[b->index].base[i])
+ length += cap->bufs[b->index].length[i];
+ else
+ break;
+ }
+
+ if (length > (unsigned long) L2_FLUSH_ALL) {
+ flush_cache_all(); /* L1 */
+ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
+ outer_flush_all(); /* L2 */
+ } else if (length > (unsigned long) L1_FLUSH_ALL) {
+ flush_cache_all(); /* L1 */
+ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
+
+ for (i = 0; i < 3; i++) {
+ phys_addr_t start = cap->bufs[b->index].base[i];
+ phys_addr_t end = cap->bufs[b->index].base[i] +
+ cap->bufs[b->index].length[i] - 1;
+
+ if (!start)
+ break;
+
+ outer_flush_range(start, end); /* L2 */
+ }
+ } else {
+ for (i = 0; i < 3; i++) {
+ phys_addr_t start = cap->bufs[b->index].base[i];
+ phys_addr_t end = cap->bufs[b->index].base[i] +
+ cap->bufs[b->index].length[i] - 1;
+
+ if (!start)
+ break;
+
+ dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
+ outer_flush_range(start, end); /* L2 */
+ }
+ }
+
+ return ret;
+}
+
+int fimc_enum_framesizes(struct file *filp, void *fh, struct v4l2_frmsizeenum *fsize)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int i;
+ u32 index = 0;
+ for (i = 0; i < ARRAY_SIZE(capture_fmts); i++) {
+ if (fsize->pixel_format != capture_fmts[i].pixelformat)
+ continue;
+ if (fsize->index == index) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ /* this is camera sensor's width, height.
+ * originally this should be filled each file format
+ */
+ fsize->discrete.width = ctrl->cam->width;
+ fsize->discrete.height = ctrl->cam->height;
+
+ return 0;
+ }
+ index++;
+ }
+
+ return -EINVAL;
+}
+int fimc_enum_frameintervals(struct file *filp, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ if (fival->index > 0)
+ return -EINVAL;
+ /* temporary only support 30fps */
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete.numerator = 1000;
+ fival->discrete.denominator = 30000;
+
+ return 0;
+}
+
+/*
+ * only used at mipi power func.
+ */
+struct device *fimc_get_active_device(void)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct fimc_control *ctrl;
+
+ if (!fimc || (fimc->active_camera < 0))
+ return NULL;
+
+ ctrl = get_fimc_ctrl(fimc->active_camera);
+
+ return ctrl->dev;
+}
diff --git a/drivers/media/video/samsung/fimc/fimc_capture_u1.c b/drivers/media/video/samsung/fimc/fimc_capture_u1.c
new file mode 100644
index 0000000..d21d877
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc_capture_u1.c
@@ -0,0 +1,2317 @@
+/* linux/drivers/media/video/samsung/fimc_capture.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * V4L2 Capture device support file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/videodev2_exynos_camera.h>
+#include <linux/clk.h>
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <plat/media.h>
+#include <plat/clock.h>
+#include <plat/fimc.h>
+#include <linux/delay.h>
+#include <mach/cpufreq.h>
+
+#include <asm/cacheflush.h>
+
+#include "fimc.h"
+
+#define FRM_RATIO(w, h) ((w)*10/(h))
+
+typedef enum {
+ FRM_RATIO_QCIF = 12,
+ FRM_RATIO_VGA = 13,
+ FRM_RATIO_D1 = 15,
+ FRM_RATIO_WVGA = 16,
+ FRM_RATIO_HD = 17,
+} frm_ratio_t;
+
+/* subdev handling macro */
+#define subdev_call(ctrl, o, f, args...) \
+ v4l2_subdev_call(ctrl->cam->sd, o, f, ##args)
+
+static const struct v4l2_fmtdesc capture_fmts[] = {
+ {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "RGB-5-6-5",
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ }, {
+ .index = 1,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "RGB-8-8-8, unpacked 24 bpp",
+ .pixelformat = V4L2_PIX_FMT_RGB32,
+ }, {
+ .index = 2,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "YUV 4:2:2 packed, YCbYCr",
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ }, {
+ .index = 3,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "YUV 4:2:2 packed, CbYCrY",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ }, {
+ .index = 4,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "YUV 4:2:2 packed, CrYCbY",
+ .pixelformat = V4L2_PIX_FMT_VYUY,
+ }, {
+ .index = 5,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PACKED,
+ .description = "YUV 4:2:2 packed, YCrYCb",
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ }, {
+ .index = 6,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:2 planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ }, {
+ .index = 7,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ }, {
+ .index = 8,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/CbCr, Tiled",
+ .pixelformat = V4L2_PIX_FMT_NV12T,
+ }, {
+ .index = 9,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ }, {
+ .index = 10,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:2 planar, Y/CbCr",
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ }, {
+ .index = 11,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:2 planar, Y/CrCb",
+ .pixelformat = V4L2_PIX_FMT_NV61,
+ }, {
+ .index = 12,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YUV 4:2:0 planar, Y/Cb/Cr",
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ }, {
+ .index = 13,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .description = "JPEG encoded data",
+ .pixelformat = V4L2_PIX_FMT_JPEG,
+ }, {
+ .index = 14,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = FORMAT_FLAGS_PLANAR,
+ .description = "YVU 4:2:0 planar, Y/Cr/Cb",
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ },
+};
+
+static const struct v4l2_queryctrl fimc_controls[] = {
+ {
+ .id = V4L2_CID_ROTATION,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Roataion",
+ .minimum = 0,
+ .maximum = 270,
+ .step = 90,
+ .default_value = 0,
+ }, {
+ .id = V4L2_CID_HFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Horizontal Flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ }, {
+ .id = V4L2_CID_VFLIP,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Vertical Flip",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ }, {
+ .id = V4L2_CID_PADDR_Y,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Physical address Y",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .id = V4L2_CID_PADDR_CB,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Physical address Cb",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .id = V4L2_CID_PADDR_CR,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Physical address Cr",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .id = V4L2_CID_PADDR_CBCR,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Physical address CbCr",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ }, {
+ .id = V4L2_CID_CACHEABLE,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .name = "Cacheable",
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+};
+
+#ifndef CONFIG_VIDEO_FIMC_MIPI
+void s3c_csis_start(int csis_id, int lanes, int settle, \
+ int align, int width, int height, int pixel_format) {}
+void s3c_csis_stop(int csis_id) {}
+#endif
+
+static int fimc_init_camera(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct s3c_platform_fimc *pdata;
+ struct s3c_platform_camera *cam;
+ int ret = 0, retry_cnt = 0;
+ u32 pixelformat;
+
+ pdata = to_fimc_plat(ctrl->dev);
+
+ cam = ctrl->cam;
+
+ /* do nothing if already initialized */
+ if (ctrl->cam->initialized)
+ return 0;
+
+ /*
+ * WriteBack mode doesn't need to set clock and power,
+ * but it needs to set source width, height depend on LCD resolution.
+ */
+ if ((cam->id == CAMERA_WB) || (cam->id == CAMERA_WB_B)) {
+ s3cfb_direct_ioctl(0, S3CFB_GET_LCD_WIDTH, \
+ (unsigned long)&cam->width);
+ s3cfb_direct_ioctl(0, S3CFB_GET_LCD_HEIGHT, \
+ (unsigned long)&cam->height);
+ cam->window.width = cam->width;
+ cam->window.height = cam->height;
+ cam->initialized = 1;
+ return 0;
+ }
+
+retry:
+ /* set rate for mclk */
+ if ((clk_get_rate(cam->clk)) && (fimc->mclk_status == CAM_MCLK_OFF)) {
+ clk_set_rate(cam->clk, cam->clk_rate);
+ clk_enable(cam->clk);
+ fimc->mclk_status = CAM_MCLK_ON;
+ fimc_info1("clock for camera: %d\n", cam->clk_rate);
+ }
+
+ /* enable camera power if needed */
+ if (cam->cam_power) {
+ ret = cam->cam_power(1);
+ if (unlikely(ret < 0))
+ fimc_err("fail to power on\n\n");
+ }
+
+ /* subdev call for init */
+#if !defined(CONFIG_MACH_PX)
+ do_gettimeofday(&ctrl->before_time);
+#endif
+ if (ctrl->cap->fmt.priv == V4L2_PIX_FMT_MODE_CAPTURE) {
+ ret = v4l2_subdev_call(cam->sd, core, init, 1);
+ pixelformat = V4L2_PIX_FMT_JPEG;
+ } else {
+ ret = v4l2_subdev_call(cam->sd, core, init, 0);
+ pixelformat = cam->pixelformat;
+ }
+
+ /* Retry camera power-up if first i2c fails. */
+ if (unlikely(ret < 0)) {
+ if (cam->cam_power)
+ cam->cam_power(0);
+
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+
+ if (retry_cnt++ < 3) {
+ msleep(100);
+ fimc_err("Retry power on(%d/3)\n\n", retry_cnt);
+ goto retry;
+ }
+ } else {
+ cam->initialized = 1;
+ }
+
+ return ret;
+}
+
+static int fimc_camera_get_jpeg_memsize(struct fimc_control *ctrl)
+{
+ int ret = 0;
+ struct v4l2_control cam_ctrl;
+ cam_ctrl.id = V4L2_CID_CAM_JPEG_MEMSIZE;
+
+ ret = subdev_call(ctrl, core, g_ctrl, &cam_ctrl);
+ if (ret < 0) {
+ fimc_err("%s: Subdev doesn't support JEPG encoding.\n", \
+ __func__);
+ return 0;
+ }
+
+ return cam_ctrl.value;
+}
+
+
+static int fimc_capture_scaler_info(struct fimc_control *ctrl)
+{
+ struct fimc_scaler *sc = &ctrl->sc;
+ struct v4l2_rect *window = &ctrl->cam->window;
+ int tx, ty, sx, sy;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int rot = 0;
+
+ sx = window->width;
+ sy = window->height;
+
+ sc->real_width = sx;
+ sc->real_height = sy;
+
+ rot = fimc_mapping_rot_flip(ctrl->cap->rotate, ctrl->cap->flip);
+
+ if (rot & FIMC_ROT) {
+ tx = ctrl->cap->fmt.height;
+ ty = ctrl->cap->fmt.width;
+ } else {
+ tx = ctrl->cap->fmt.width;
+ ty = ctrl->cap->fmt.height;
+ }
+
+ fimc_warn("%s: CamOut (%d, %d), TargetOut (%d, %d)\n", __func__, sx, sy, tx, ty);
+
+ if (sx <= 0 || sy <= 0) {
+ fimc_err("%s: invalid source size\n", __func__);
+ return -EINVAL;
+ }
+
+ if (tx <= 0 || ty <= 0) {
+ fimc_err("%s: invalid target size\n", __func__);
+ return -EINVAL;
+ }
+
+ fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
+ fimc_get_scaler_factor(sy, ty, &sc->pre_vratio, &sc->vfactor);
+
+ sc->pre_dst_width = sx / sc->pre_hratio;
+ sc->pre_dst_height = sy / sc->pre_vratio;
+
+ if (pdata->hw_ver >= 0x50) {
+ sc->main_hratio = (sx << 14) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 14) / (ty << sc->vfactor);
+ } else {
+ sc->main_hratio = (sx << 8) / (tx << sc->hfactor);
+ sc->main_vratio = (sy << 8) / (ty << sc->vfactor);
+ }
+
+ sc->scaleup_h = (tx >= sx) ? 1 : 0;
+ sc->scaleup_v = (ty >= sy) ? 1 : 0;
+
+ return 0;
+}
+
+static int fimc_add_inqueue(struct fimc_control *ctrl, int i)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_buf_set *tmp_buf;
+ struct list_head *count;
+
+ /* PINGPONG_2ADDR_MODE Only */
+ list_for_each(count, &cap->inq) {
+ tmp_buf = list_entry(count, struct fimc_buf_set, list);
+ /* skip list_add_tail if already buffer is in cap->inq list*/
+ if (tmp_buf->id == i)
+ return 0;
+ }
+ list_add_tail(&cap->bufs[i].list, &cap->inq);
+
+ return 0;
+}
+
+static int fimc_add_outqueue(struct fimc_control *ctrl, int i)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_buf_set *buf;
+ unsigned int mask = 0x2;
+
+ /* PINGPONG_2ADDR_MODE Only */
+ /* pair_buf_index stands for pair index of i. (0<->2) (1<->3) */
+ int pair_buf_index = (i^mask);
+
+ /* FIMC have 4 h/w registers */
+ if (i < 0 || i >= FIMC_PHYBUFS) {
+ fimc_err("%s: invalid queue index : %d\n", __func__, i);
+ return -ENOENT;
+ }
+
+ if (list_empty(&cap->inq))
+ return -ENOENT;
+
+ buf = list_first_entry(&cap->inq, struct fimc_buf_set, list);
+
+ /* pair index buffer should be allocated first */
+ cap->outq[pair_buf_index] = buf->id;
+ fimc_hwset_output_address(ctrl, buf, pair_buf_index);
+
+ cap->outq[i] = buf->id;
+ fimc_hwset_output_address(ctrl, buf, i);
+
+ list_del(&buf->list);
+
+ return 0;
+}
+
+int fimc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+
+ /* WriteBack doesn't have subdev_call */
+
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B))
+ return 0;
+
+ mutex_lock(&ctrl->v4l2_lock);
+ ret = subdev_call(ctrl, video, g_parm, a);
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+int fimc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+ int new_fps = a->parm.capture.timeperframe.denominator /
+ a->parm.capture.timeperframe.numerator;
+
+ fimc_info2("%s fimc%d, %d\n", __func__, ctrl->id, new_fps);
+
+ /* WriteBack doesn't have subdev_call */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B))
+ return 0;
+
+ mutex_lock(&ctrl->v4l2_lock);
+#ifdef CONFIG_MACH_S2PLUS
+ if (ctrl->id == FIMC0)
+#else
+ if (ctrl->id != FIMC2)
+#endif
+ ret = subdev_call(ctrl, video, s_parm, a);
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+/* Enumerate controls */
+int fimc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *qc)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int i, ret;
+
+ fimc_dbg("%s\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(fimc_controls); i++) {
+ if (fimc_controls[i].id == qc->id) {
+ memcpy(qc, &fimc_controls[i], \
+ sizeof(struct v4l2_queryctrl));
+ return 0;
+ }
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+ ret = subdev_call(ctrl, core, queryctrl, qc);
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+/* Menu control items */
+int fimc_querymenu(struct file *file, void *fh, struct v4l2_querymenu *qm)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+
+ mutex_lock(&ctrl->v4l2_lock);
+ ret = subdev_call(ctrl, core, querymenu, qm);
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+int fimc_enum_input(struct file *file, void *fh, struct v4l2_input *inp)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+
+ fimc_dbg("%s: index %d\n", __func__, inp->index);
+
+ if (inp->index < 0 || inp->index >= FIMC_MAXCAMS) {
+ fimc_err("%s: invalid input index, received = %d\n" \
+ , __func__, inp->index);
+ return -EINVAL;
+ }
+
+ if (!fimc->camera_isvalid[inp->index])
+ return -EINVAL;
+ mutex_lock(&ctrl->v4l2_lock);
+
+ strcpy(inp->name, fimc->camera[inp->index]->info->type);
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_g_input(struct file *file, void *fh, unsigned int *i)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_global *fimc = get_fimc_dev();
+
+ /* In case of isueing g_input before s_input */
+ if (!ctrl->cam) {
+ fimc_err("no camera device selected yet!" \
+ "do VIDIOC_S_INPUT first\n");
+ return -ENODEV;
+ }
+ mutex_lock(&ctrl->v4l2_lock);
+
+ *i = (unsigned int) fimc->active_camera;
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ fimc_dbg("%s: index %d\n", __func__, *i);
+
+ return 0;
+}
+
+int fimc_release_subdev(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct i2c_client *client;
+
+ if (ctrl && ctrl->cam && ctrl->cam->sd) {
+ fimc_dbg("%s called\n", __func__);
+
+ /* WriteBack doesn't need clock setting */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) {
+ ctrl->cam->initialized = 0;
+ ctrl->cam = NULL;
+ fimc->active_camera = -1;
+ return 0;
+ }
+
+ client = v4l2_get_subdevdata(ctrl->cam->sd);
+ i2c_unregister_device(client);
+ ctrl->cam->sd = NULL;
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+
+ /* shutdown the MCLK */
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+
+ ctrl->cam->initialized = 0;
+ ctrl->cam = NULL;
+ fimc->active_camera = -1;
+ }
+ return 0;
+}
+
+static int fimc_configure_subdev(struct fimc_control *ctrl)
+{
+ struct i2c_adapter *i2c_adap;
+ struct i2c_board_info *i2c_info;
+ struct v4l2_subdev *sd;
+ unsigned short addr;
+ char *name;
+
+ i2c_adap = i2c_get_adapter(ctrl->cam->get_i2c_busnum());
+ if (!i2c_adap) {
+ fimc_err("subdev i2c_adapter missing-skip registration\n");
+ return -ENODEV;
+ }
+
+ i2c_info = ctrl->cam->info;
+ if (!i2c_info) {
+ fimc_err("%s: subdev i2c board info missing\n", __func__);
+ return -ENODEV;
+ }
+
+ name = i2c_info->type;
+ if (!name) {
+ fimc_err("subdev i2c driver name missing-skip registration\n");
+ return -ENODEV;
+ }
+
+ addr = i2c_info->addr;
+ if (!addr) {
+ fimc_err("subdev i2c address missing-skip registration\n");
+ return -ENODEV;
+ }
+ /*
+ * NOTE: first time subdev being registered,
+ * s_config is called and try to initialize subdev device
+ * but in this point, we are not giving MCLK and power to subdev
+ * so nothing happens but pass platform data through
+ */
+ sd = v4l2_i2c_new_subdev_board(&ctrl->v4l2_dev, i2c_adap,
+ i2c_info, &addr);
+ if (!sd) {
+ fimc_err("%s: v4l2 subdev board registering failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ /* Assign subdev to proper camera device pointer */
+ ctrl->cam->sd = sd;
+
+ return 0;
+}
+
+int fimc_s_input(struct file *file, void *fh, unsigned int i)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+ int ret = 0;
+
+ printk(KERN_INFO "%s: index %d FIMC%d\n", __func__, i, ctrl->id);
+
+ if (i < 0 || i >= FIMC_MAXCAMS) {
+ fimc_err("%s: invalid input index\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!fimc->camera_isvalid[i])
+ return -EINVAL;
+
+#ifdef CONFIG_MACH_S2PLUS
+ if (fimc->camera[i]->sd && ctrl->id == FIMC0) {
+#else
+ if (fimc->camera[i]->sd && ctrl->id != FIMC2) {
+#endif
+ fimc_err("%s: Camera already in use.\n", __func__);
+ return -EBUSY;
+ }
+ mutex_lock(&ctrl->v4l2_lock);
+
+ /* If ctrl->cam is not NULL, there is one subdev already registered.
+ * We need to unregister that subdev first. */
+ if (i != fimc->active_camera) {
+ printk(KERN_INFO "\n\nfimc_s_input activating subdev\n");
+ fimc_release_subdev(ctrl);
+ ctrl->cam = fimc->camera[i];
+
+ if ((ctrl->cam->id != CAMERA_WB) && (ctrl->cam->id != CAMERA_WB_B)) {
+ ret = fimc_configure_subdev(ctrl);
+ if (ret < 0) {
+ mutex_unlock(&ctrl->v4l2_lock);
+ fimc_err("%s: Could not register camera" \
+ " sensor with V4L2.\n", __func__);
+ return -ENODEV;
+ }
+ }
+
+ fimc->active_camera = i;
+ printk(KERN_INFO "fimc_s_input activated subdev = %d\n", i);
+ }
+
+#ifdef CONFIG_MACH_S2PLUS
+ if (ctrl->id == FIMC1) {
+ if (i == fimc->active_camera) {
+ ctrl->cam = fimc->camera[i];
+ fimc_info2("fimc_s_input activating subdev FIMC1 %d\n",
+ ctrl->cam->initialized);
+ } else {
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -EINVAL;
+ }
+ }
+#else
+ if (ctrl->id == FIMC2) {
+ if (i == fimc->active_camera) {
+ ctrl->cam = fimc->camera[i];
+ fimc_info2("fimc_s_input activating subdev FIMC2 %d\n",
+ ctrl->cam->initialized);
+ } else {
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -EINVAL;
+ }
+ }
+#endif
+
+ /*
+ * The first time alloc for struct cap_info, and will be
+ * released at the file close.
+ * Anyone has better idea to do this?
+ */
+ if (!cap) {
+ cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+ if (!cap) {
+ fimc_err("%s: no memory for "
+ "capture device info\n", __func__);
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENOMEM;
+ }
+
+ /* assign to ctrl */
+ ctrl->cap = cap;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_OFF) {
+ pm_runtime_get_sync(&pdev->dev);
+ }
+#endif
+ }
+
+#if !defined(CONFIG_MACH_PX)
+ if (fimc->active_camera == 0) {
+ if (!ctrl->cam->initialized)
+ ret = fimc_init_camera(ctrl);
+
+ if (unlikely(ret < 0)) {
+ if (ret == -ENOSYS) {
+ /* return no error If firmware is bad.
+ Because F/W update app should access the sensor through HAL instance */
+ fimc_warn("%s: please update the F/W\n", __func__);
+ } else {
+ mutex_unlock(&ctrl->v4l2_lock);
+ fimc_err("%s: fail to initialize subdev\n", __func__);
+ return ret;
+ }
+ }
+ }
+#endif
+
+ mutex_unlock(&ctrl->v4l2_lock);
+ printk(KERN_INFO "%s--: index %d FIMC%d\n", __func__, i, ctrl->id);
+
+ return 0;
+}
+
+int fimc_enum_fmt_vid_capture(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int i = f->index;
+
+ /* printk(KERN_INFO "%s++\n", __func__); */
+
+ if (i >= ARRAY_SIZE(capture_fmts)) {
+ fimc_err("%s: There is no support format index %d\n", __func__, i);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ memset(f, 0, sizeof(*f));
+ memcpy(f, &capture_fmts[i], sizeof(*f));
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ /* printk(KERN_INFO "%s--\n", __func__); */
+ return 0;
+}
+
+int fimc_g_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+
+ printk(KERN_INFO "%s++\n", __func__);
+
+ if (!ctrl->cap) {
+ fimc_err("%s: no capture device info\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ memset(&f->fmt.pix, 0, sizeof(f->fmt.pix));
+ memcpy(&f->fmt.pix, &ctrl->cap->fmt, sizeof(f->fmt.pix));
+
+ mutex_unlock(&ctrl->v4l2_lock);
+ printk(KERN_INFO "%s--\n", __func__);
+
+ return 0;
+}
+
+/*
+ * Check for whether the requested format
+ * can be streamed out from FIMC
+ * depends on FIMC node
+ */
+static int fimc_fmt_avail(struct fimc_control *ctrl,
+ struct v4l2_pix_format *f)
+{
+ int i;
+
+ /*
+ * TODO: check for which FIMC is used.
+ * Available fmt should be varied for each FIMC
+ */
+
+ for (i = 0; i < ARRAY_SIZE(capture_fmts); i++) {
+ if (capture_fmts[i].pixelformat == f->pixelformat)
+ return 0;
+ }
+
+ fimc_info1("Not supported pixelformat requested\n");
+
+ return -1;
+}
+
+/*
+ * figures out the depth of requested format
+ */
+static int fimc_fmt_depth(struct fimc_control *ctrl, struct v4l2_pix_format *f)
+{
+ int err, depth = 0;
+
+ /* First check for available format or not */
+ err = fimc_fmt_avail(ctrl, f);
+ if (err < 0)
+ return -1;
+
+ /* handles only supported pixelformats */
+ switch (f->pixelformat) {
+ case V4L2_PIX_FMT_RGB32:
+ depth = 32;
+ fimc_dbg("32bpp\n");
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ depth = 16;
+ fimc_dbg("16bpp\n");
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12T:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ depth = 12;
+ fimc_dbg("12bpp\n");
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ depth = -1;
+ fimc_dbg("Compressed format.\n");
+ break;
+ default:
+ fimc_dbg("why am I here?\n");
+ break;
+ }
+
+ return depth;
+}
+
+static int fimc_calc_frame_ratio(struct fimc_control *ctrl,
+ struct fimc_capinfo *cap)
+{
+ if (cap->fmt.priv != V4L2_PIX_FMT_MODE_PREVIEW)
+ return 0;
+
+ if ((cap->sensor_output_width != 0) &&
+ (cap->sensor_output_height != 0)) {
+ cap->mbus_fmt.width = cap->sensor_output_width;
+ cap->mbus_fmt.height = cap->sensor_output_height;
+ cap->sensor_output_width = cap->sensor_output_height = 0;
+ pr_info("fimc: forced sensor output size: (%d, %d) to (%d, %d)\n",
+ cap->mbus_fmt.width, cap->mbus_fmt.height,
+ cap->fmt.width, cap->fmt.height);
+ } else if (cap->vt_mode) {
+ cap->mbus_fmt.width = 640;
+ cap->mbus_fmt.height = 480;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_MACH_PX) && defined(CONFIG_VIDEO_HD_SUPPORT)
+static int fimc_check_hd_mode(struct fimc_control *ctrl, struct v4l2_format *f)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ struct fimc_capinfo *cap = ctrl->cap;
+ u32 hd_mode = 0;
+ int ret = -EINVAL;
+
+ if (!cap->movie_mode || (fimc->active_camera != 0))
+ return 0;
+
+ if (f->fmt.pix.width == 1280 || cap->sensor_output_width == 1280)
+ hd_mode = 1;
+
+ printk(KERN_DEBUG "%s:movie_mode=%d, hd_mode=%d\n",
+ __func__, cap->movie_mode, hd_mode);
+
+ if (((cap->movie_mode == 2) && !hd_mode) ||
+ ((cap->movie_mode == 1) && hd_mode)) {
+ fimc_warn("%s: mode change, power(%d) down\n",
+ __func__, ctrl->cam->initialized);
+ cap->movie_mode = hd_mode ? 2 : 1;
+
+ if (ctrl->cam->initialized) {
+ struct v4l2_control c;
+
+ subdev_call(ctrl, core, reset, 0);
+ c.id = V4L2_CID_CAMERA_SENSOR_MODE;
+ c.value = cap->movie_mode;
+ subdev_call(ctrl, core, s_ctrl, &c);
+
+ if (ctrl->cam->cam_power) {
+ ret = ctrl->cam->cam_power(0);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ /* shutdown the MCLK */
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+
+ ctrl->cam->initialized = 0;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+int fimc_s_fmt_vid_private(struct file *file, void *fh, struct v4l2_format *f)
+{
+ return -EINVAL;
+}
+
+int fimc_s_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct v4l2_mbus_framefmt *mbus_fmt;
+
+ int ret = 0;
+ int depth;
+ printk(KERN_INFO "%s FIMC%d\n", __func__, ctrl->id);
+
+ /* rotaton, flip, dtp_mode, movie_mode and vt_mode,
+ * sensor_output_width,height should be maintained.(by TN) */
+ memset(cap, 0, sizeof(*cap) - sizeof(u32) * 7);
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ memset(&cap->fmt, 0, sizeof(cap->fmt));
+ memcpy(&cap->fmt, &f->fmt.pix, sizeof(cap->fmt));
+
+ mbus_fmt = &cap->mbus_fmt;
+#ifdef CONFIG_MACH_S2PLUS
+ if (ctrl->id == FIMC0) {
+#else
+ if (ctrl->id != FIMC2) {
+#endif
+ if (cap->movie_mode || cap->vt_mode ||
+ cap->fmt.priv == V4L2_PIX_FMT_MODE_HDR) {
+#if defined(CONFIG_MACH_PX) && defined(CONFIG_VIDEO_HD_SUPPORT)
+ ret = fimc_check_hd_mode(ctrl, f);
+ if (unlikely(ret)) {
+ fimc_err("%s: error, check_hd_mode\n",
+ __func__);
+ return ret;
+ }
+#endif
+ fimc_calc_frame_ratio(ctrl, cap);
+ }
+#if defined(CONFIG_MACH_U1_BD) || defined(CONFIG_MACH_Q1_BD)
+ else {
+ fimc_calc_frame_ratio(ctrl, cap);
+ }
+#endif
+
+ if (!(mbus_fmt->width && mbus_fmt->height)) {
+ mbus_fmt->width = cap->fmt.width;
+ mbus_fmt->height = cap->fmt.height;
+ }
+ mbus_fmt->field = cap->fmt.priv;
+ }
+
+ /*
+ * Note that expecting format only can be with
+ * available output format from FIMC
+ * Following items should be handled in driver
+ * bytesperline = width * depth / 8
+ * sizeimage = bytesperline * height
+ */
+ /* This function may return 0 or -1 in case of error,
+ * hence need to check here.
+ */
+
+ depth = fimc_fmt_depth(ctrl, &cap->fmt);
+ if (depth == 0) {
+ mutex_unlock(&ctrl->v4l2_lock);
+ fimc_err("%s: Invalid pixel format\n", __func__);
+ return -EINVAL;
+ } else if (depth < 0) {
+ /*
+ * When the pixelformat is JPEG,
+ * the application is requesting for data
+ * in JPEG compressed format
+ */
+ cap->fmt.colorspace = V4L2_COLORSPACE_JPEG;
+ mbus_fmt->code = V4L2_MBUS_FMT_JPEG_1X8;
+ } else {
+ cap->fmt.bytesperline = (cap->fmt.width * depth) >> 3;
+ cap->fmt.sizeimage = (cap->fmt.bytesperline * cap->fmt.height);
+ mbus_fmt->code = V4L2_MBUS_FMT_VYUY8_2X8;
+ }
+ mbus_fmt->colorspace = cap->fmt.colorspace;
+
+
+ if (cap->fmt.colorspace == V4L2_COLORSPACE_JPEG) {
+ ctrl->sc.bypass = 1;
+ cap->lastirq = 0;
+ fimc_info1("fimc_s_fmt_vid_capture V4L2_COLORSPACE_JPEG\n");
+ } else {
+ ctrl->sc.bypass = 0;
+ cap->lastirq = 0;
+ }
+
+ fimc_info1("s_fmt width = %d, height = %d\n", \
+ cap->fmt.width, cap->fmt.height);
+
+ /* WriteBack doesn't have subdev_call */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) {
+ mutex_unlock(&ctrl->v4l2_lock);
+ return 0;
+ }
+
+#ifdef CONFIG_MACH_S2PLUS
+ if (ctrl->id == FIMC0)
+#else
+ if (ctrl->id != FIMC2)
+#endif
+ ret = subdev_call(ctrl, video, s_mbus_fmt, mbus_fmt);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+ printk(KERN_INFO "%s -- FIMC%d\n", __func__, ctrl->id);
+
+ return ret;
+}
+
+int fimc_try_fmt_vid_capture(struct file *file, void *fh, struct v4l2_format *f)
+{
+ /* Not implement */
+ return -ENOTTY;
+}
+
+static int fimc_alloc_buffers(struct fimc_control *ctrl,
+ int plane, int size, int align, int bpp, int use_paddingbuf)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ int i, j;
+ int plane_length[4] = {0, };
+ if (plane < 1 || plane > 3)
+ return -ENOMEM;
+
+ switch (plane) {
+ case 1:
+ if (align) {
+ plane_length[0] = PAGE_ALIGN((size*bpp) >> 3);
+ plane_length[1] = 0;
+ plane_length[2] = 0;
+ } else {
+ plane_length[0] = (size*bpp) >> 3;
+ plane_length[1] = 0;
+ plane_length[2] = 0;
+ }
+ break;
+ /* In case of 2, only NV12 and NV12T is supported. */
+ case 2:
+ if (align) {
+ plane_length[0] = PAGE_ALIGN((size*8) >> 3);
+ plane_length[1] = PAGE_ALIGN((size*(bpp-8)) >> 3);
+ plane_length[2] = 0;
+ fimc_info2("plane_length[0] = %d, plane_length[1] = %d\n" \
+ , plane_length[0], plane_length[1]);
+ } else {
+ plane_length[0] = ((size*8) >> 3);
+ plane_length[1] = ((size*(bpp-8)) >> 3);
+ plane_length[2] = 0;
+ fimc_info2("plane_length[0] = %d, plane_length[1] = %d\n" \
+ , plane_length[0], plane_length[1]);
+ }
+
+ break;
+ /* In case of 3
+ * YUV422 : 8 / 4 / 4 (bits)
+ * YUV420 : 8 / 2 / 2 (bits)
+ * 3rd plane have to consider page align for mmap */
+ case 3:
+ if (align) {
+ plane_length[0] = (size*8) >> 3;
+ plane_length[1] = (size*((bpp-8)/2)) >> 3;
+ plane_length[2] = PAGE_ALIGN((size*bpp)>>3) - plane_length[0] - plane_length[1];
+ } else {
+ plane_length[0] = (size*8) >> 3;
+ plane_length[1] = (size*((bpp-8)/2)) >> 3;
+ plane_length[2] = ((size*bpp)>>3) - plane_length[0] - plane_length[1];
+ }
+ break;
+ default:
+ fimc_err("impossible!\n");
+ return -ENOMEM;
+ }
+
+ if (use_paddingbuf)
+ plane_length[3] = 16;
+ else
+ plane_length[3] = 0;
+
+ for (i = 0; i < cap->nr_bufs; i++) {
+ for (j = 0; j < plane; j++) {
+ cap->bufs[i].length[j] = plane_length[j];
+ fimc_dma_alloc(ctrl, &cap->bufs[i], j, align);
+
+ if (!cap->bufs[i].base[j])
+ goto err_alloc;
+ }
+ if (use_paddingbuf) {
+ cap->bufs[i].length[3] = plane_length[3];
+ fimc_dma_alloc(ctrl, &cap->bufs[i], 3, align);
+
+ if (!cap->bufs[i].base[3])
+ goto err_alloc;
+ }
+ cap->bufs[i].state = VIDEOBUF_PREPARED;
+ }
+
+ return 0;
+
+err_alloc:
+ for (i = 0; i < cap->nr_bufs; i++) {
+ for (j = 0; j < plane; j++) {
+ if (cap->bufs[i].base[j])
+ fimc_dma_free(ctrl, &cap->bufs[i], j);
+ }
+ if (use_paddingbuf) {
+ if (cap->bufs[i].base[3])
+ fimc_dma_free(ctrl, &cap->bufs[i], 3);
+ }
+ memset(&cap->bufs[i], 0, sizeof(cap->bufs[i]));
+ }
+
+ return -ENOMEM;
+}
+
+static void fimc_free_buffers(struct fimc_control *ctrl)
+{
+ struct fimc_capinfo *cap;
+ int i;
+
+ if (ctrl && ctrl->cap)
+ cap = ctrl->cap;
+ else
+ return;
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ memset(&cap->bufs[i], 0, sizeof(cap->bufs[i]));
+ cap->bufs[i].state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ ctrl->mem.curr = ctrl->mem.base;
+}
+
+int fimc_reqbufs_capture(void *fh, struct v4l2_requestbuffers *b)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+#endif
+ int ret = 0, i;
+ int bpp = 0;
+ int size = 0;
+
+ if (!cap) {
+ fimc_err("%s: no capture device info\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ /* A count value of zero frees all buffers */
+ if ((b->count == 0) || (b->count >= FIMC_CAPBUFS)) {
+ /* aborting or finishing any DMA in progress */
+ if (ctrl->status == FIMC_STREAMON)
+ fimc_streamoff_capture(fh);
+ for (i = 0; i < FIMC_CAPBUFS; i++) {
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 0);
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 1);
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 2);
+ }
+
+ mutex_unlock(&ctrl->v4l2_lock);
+ return 0;
+ }
+ /* free previous buffers */
+ if ((cap->nr_bufs >= 0) && (cap->nr_bufs < FIMC_CAPBUFS)) {
+ fimc_info1("%s : remained previous buffer count is %d\n", __func__,
+ cap->nr_bufs);
+ for (i = 0; i < cap->nr_bufs; i++) {
+ fimc_dma_free(ctrl, &cap->bufs[i], 0);
+ fimc_dma_free(ctrl, &cap->bufs[i], 1);
+ fimc_dma_free(ctrl, &cap->bufs[i], 2);
+ }
+ }
+ fimc_free_buffers(ctrl);
+
+ cap->nr_bufs = b->count;
+ if (pdata->hw_ver >= 0x51) {
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_OFF) {
+ pm_runtime_get_sync(&pdev->dev);
+ /*vcm_set_pgtable_base(ctrl->vcm_id);*/
+ }
+#endif
+ fimc_hw_reset_output_buf_sequence(ctrl);
+ for (i = 0; i < cap->nr_bufs; i++) {
+ fimc_hwset_output_buf_sequence(ctrl, i, 1);
+ cap->bufs[i].id = i;
+ cap->bufs[i].state = VIDEOBUF_NEEDS_INIT;
+
+ /* initialize list */
+ INIT_LIST_HEAD(&cap->bufs[i].list);
+ }
+ fimc_info1("%s: requested %d buffers\n", __func__, b->count);
+ fimc_info1("%s: sequence[%d]\n", __func__, fimc_hwget_output_buf_sequence(ctrl));
+ INIT_LIST_HEAD(&cap->outgoing_q);
+ }
+ if (pdata->hw_ver < 0x51) {
+ INIT_LIST_HEAD(&cap->inq);
+ for (i = 0; i < cap->nr_bufs; i++) {
+ cap->bufs[i].id = i;
+ cap->bufs[i].state = VIDEOBUF_NEEDS_INIT;
+
+ /* initialize list */
+ INIT_LIST_HEAD(&cap->bufs[i].list);
+ }
+ }
+
+ bpp = fimc_fmt_depth(ctrl, &cap->fmt);
+
+ switch (cap->fmt.pixelformat) {
+ case V4L2_PIX_FMT_RGB32: /* fall through */
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61: /* fall through */
+ fimc_info1("%s : 1plane\n", __func__);
+ ret = fimc_alloc_buffers(ctrl, 1,
+ cap->fmt.width * cap->fmt.height, SZ_4K, bpp, 0);
+ break;
+
+ case V4L2_PIX_FMT_NV21:
+ fimc_info1("%s : 2plane for NV21 w %d h %d\n", __func__, cap->fmt.width, cap->fmt.height);
+ ret = fimc_alloc_buffers(ctrl, 2,
+ cap->fmt.width * cap->fmt.height, 0, bpp, 0);
+ break;
+
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ fimc_info1("%s : 2plane for NV12\n", __func__);
+ ret = fimc_alloc_buffers(ctrl, 2,
+ cap->fmt.width * cap->fmt.height, SZ_64K, bpp, 0);
+ break;
+
+ case V4L2_PIX_FMT_NV12T: /* fall through */
+ fimc_info1("%s : 2plane for NV12T\n", __func__);
+ ret = fimc_alloc_buffers(ctrl, 2,
+ ALIGN(cap->fmt.width, 128) * ALIGN(cap->fmt.height, 32), SZ_64K, bpp, 0);
+ break;
+
+ case V4L2_PIX_FMT_YUV422P: /* fall through */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ fimc_info1("%s : 3plane\n", __func__);
+ ret = fimc_alloc_buffers(ctrl, 3,
+ cap->fmt.width * cap->fmt.height, 0, bpp, 0);
+ break;
+
+ case V4L2_PIX_FMT_JPEG:
+ fimc_info1("%s : JPEG 1plane\n", __func__);
+ size = fimc_camera_get_jpeg_memsize(ctrl);
+ fimc_info2("%s : JPEG 1plane size = %x\n",
+ __func__, size);
+ ret = fimc_alloc_buffers(ctrl, 1,
+ size, 0, 8, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (ret) {
+ fimc_err("%s: no memory for capture buffer\n", __func__);
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_querybuf_capture(void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+
+ if (ctrl->status != FIMC_STREAMOFF) {
+ fimc_err("fimc is running\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+
+ switch (cap->fmt.pixelformat) {
+ case V4L2_PIX_FMT_JPEG: /* fall through */
+ case V4L2_PIX_FMT_RGB32: /* fall through */
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61: /* fall through */
+ b->length = cap->bufs[b->index].length[0];
+ break;
+
+ case V4L2_PIX_FMT_NV21:
+ b->length = ctrl->cap->bufs[b->index].length[0]
+ + ctrl->cap->bufs[b->index].length[1];
+ break;
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ b->length = ALIGN(ctrl->cap->bufs[b->index].length[0], SZ_64K)
+ + ALIGN(ctrl->cap->bufs[b->index].length[1], SZ_64K);
+ break;
+ case V4L2_PIX_FMT_YUV422P: /* fall through */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ b->length = ctrl->cap->bufs[b->index].length[0]
+ + ctrl->cap->bufs[b->index].length[1]
+ + ctrl->cap->bufs[b->index].length[2];
+ break;
+
+ default:
+ b->length = cap->bufs[b->index].length[0];
+ break;
+ }
+ b->m.offset = b->index * PAGE_SIZE;
+ /* memory field should filled V4L2_MEMORY_MMAP */
+ b->memory = V4L2_MEMORY_MMAP;
+
+ ctrl->cap->bufs[b->index].state = VIDEOBUF_IDLE;
+
+ fimc_dbg("%s: %d bytes with offset: %d\n",
+ __func__, b->length, b->m.offset);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_g_ctrl_capture(void *fh, struct v4l2_control *c)
+{
+ struct fimc_control *ctrl = fh;
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+
+ switch (c->id) {
+ case V4L2_CID_ROTATION:
+ c->value = ctrl->cap->rotate;
+ break;
+
+ case V4L2_CID_HFLIP:
+ c->value = (ctrl->cap->flip & FIMC_XFLIP) ? 1 : 0;
+ break;
+
+ case V4L2_CID_VFLIP:
+ c->value = (ctrl->cap->flip & FIMC_YFLIP) ? 1 : 0;
+ break;
+
+ case V4L2_CID_CACHEABLE:
+ c->value = ctrl->cap->cacheable;
+ break;
+
+ default:
+ /* get ctrl supported by subdev */
+ /* WriteBack doesn't have subdev_call */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B))
+ break;
+ ret = subdev_call(ctrl, core, g_ctrl, c);
+ break;
+ }
+
+ return ret;
+}
+
+int fimc_g_ext_ctrls_capture(void *fh, struct v4l2_ext_controls *c)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+ printk(KERN_INFO "%s++\n", __func__);
+
+ /* try on subdev */
+ ret = subdev_call(ctrl, core, g_ext_ctrls, c);
+
+ printk(KERN_INFO "%s--\n", __func__);
+
+ return ret;
+}
+
+int fimc_s_ctrl_capture(void *fh, struct v4l2_control *c)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_global *fimc = get_fimc_dev();
+ int ret = 0;
+
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cam || !ctrl->cap ){
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if ((ctrl->cam->id != CAMERA_WB) && (ctrl->cam->id != CAMERA_WB_B)) {
+ if (!ctrl->cam->sd) {
+ fimc_err("%s: No subdevice.\n", __func__);
+ return -ENODEV;
+ }
+ }
+
+ switch (c->id) {
+ case V4L2_CID_CAM_UPDATE_FW:
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+
+ /* shutdown the MCLK */
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+
+ mdelay(5);
+ }
+
+ if ((clk_get_rate(ctrl->cam->clk)) && (fimc->mclk_status == CAM_MCLK_OFF)) {
+ clk_set_rate(ctrl->cam->clk, ctrl->cam->clk_rate);
+ clk_enable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_ON;
+ fimc_info1("clock for camera: %d\n", ctrl->cam->clk_rate);
+
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(1);
+ }
+
+ if (c->value == FW_MODE_UPDATE)
+ ret = subdev_call(ctrl, core, load_fw);
+ else
+ ret = subdev_call(ctrl, core, s_ctrl, c);
+ break;
+
+ case V4L2_CID_CAMERA_RESET:
+ fimc_warn("reset the camera sensor\n");
+ if (ctrl->cam->initialized) {
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+
+ /* shutdown the MCLK */
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ ctrl->cam->initialized = 0;
+#if defined(CONFIG_MACH_PX)
+ /* 5ms -> 100ms: increase delay.
+ * There are cases that sensor doesn't get revived
+ * inspite of doing power reset.*/
+ msleep(100);
+#else
+ msleep(5);
+#endif
+ }
+ ret = fimc_init_camera(ctrl);
+ break;
+
+ case V4L2_CID_ROTATION:
+ ctrl->cap->rotate = c->value;
+ break;
+
+ case V4L2_CID_HFLIP:
+ if (c->value)
+ ctrl->cap->flip |= FIMC_XFLIP;
+ else
+ ctrl->cap->flip &= ~FIMC_XFLIP;
+ break;
+
+ case V4L2_CID_VFLIP:
+ if (c->value)
+ ctrl->cap->flip |= FIMC_YFLIP;
+ else
+ ctrl->cap->flip &= ~FIMC_YFLIP;
+ break;
+
+ case V4L2_CID_PADDR_Y:
+ if (ctrl->cap->bufs)
+ c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_Y];
+ break;
+
+ case V4L2_CID_PADDR_CB: /* fall through */
+ case V4L2_CID_PADDR_CBCR:
+ if (ctrl->cap->bufs)
+ c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_CB];
+ break;
+
+ case V4L2_CID_PADDR_CR:
+ if (ctrl->cap->bufs)
+ c->value = ctrl->cap->bufs[c->value].base[FIMC_ADDR_CR];
+ break;
+ /* Implementation as per C100 FIMC driver */
+ case V4L2_CID_STREAM_PAUSE:
+ fimc_hwset_stop_processing(ctrl);
+ break;
+
+ case V4L2_CID_IMAGE_EFFECT_APPLY:
+ ctrl->fe.ie_on = c->value ? 1 : 0;
+ ctrl->fe.ie_after_sc = 0;
+ ret = fimc_hwset_image_effect(ctrl);
+ break;
+
+ case V4L2_CID_IMAGE_EFFECT_FN:
+ if (c->value < 0 || c->value > FIMC_EFFECT_FIN_SILHOUETTE)
+ return -EINVAL;
+ ctrl->fe.fin = c->value;
+ ret = 0;
+ break;
+
+ case V4L2_CID_IMAGE_EFFECT_CB:
+ ctrl->fe.pat_cb = c->value & 0xFF;
+ ret = 0;
+ break;
+
+ case V4L2_CID_IMAGE_EFFECT_CR:
+ ctrl->fe.pat_cr = c->value & 0xFF;
+ ret = 0;
+ break;
+
+ case V4L2_CID_CAMERA_SENSOR_MODE:
+ ctrl->cap->movie_mode = c->value;
+ ret = subdev_call(ctrl, core, s_ctrl, c);
+#if defined(CONFIG_VIDEO_HD_SUPPORT)
+ printk(KERN_INFO "%s: CAMERA_SENSOR_MODE=%d\n",
+ __func__, c->value);
+ if (!ctrl->cam->initialized)
+ ret = fimc_init_camera(ctrl);
+#endif /* CONFIG_VIDEO_HD_SUPPORT */
+ break;
+
+ case V4L2_CID_CAMERA_VT_MODE:
+ ctrl->cap->vt_mode = c->value;
+ ret = subdev_call(ctrl, core, s_ctrl, c);
+ break;
+
+ case V4L2_CID_CAMERA_CHECK_DATALINE:
+#ifdef CONFIG_MACH_PX
+ /* if camera type is MIPI,
+ * we does not do any subdev_calll */
+ if ((ctrl->cam->type == CAM_TYPE_MIPI) ||
+ (ctrl->cap->dtp_mode == c->value)) {
+#else
+ if (ctrl->cap->dtp_mode == c->value) {
+#endif
+ ret = 0;
+ break;
+ } else {
+ if (c->value == 0 && ctrl->cam->initialized) {
+ /* need to reset after dtp test is finished */
+ fimc_warn("DTP: reset the camera sensor\n");
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+
+ /* shutdown the MCLK */
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ ctrl->cam->initialized = 0;
+
+ msleep(100);
+ ret = fimc_init_camera(ctrl);
+ }
+ ctrl->cap->dtp_mode = c->value;
+ }
+ ret = subdev_call(ctrl, core, s_ctrl, c);
+ break;
+
+ case V4L2_CID_CACHEABLE:
+ ctrl->cap->cacheable = c->value;
+ ret = 0;
+ break;
+
+ case V4L2_CID_CAMERA_SENSOR_OUTPUT_SIZE:
+ ctrl->cap->sensor_output_width = (u32)c->value >> 16;
+ ctrl->cap->sensor_output_height = (u32)c->value & 0x0FFFF;
+ break;
+
+ default:
+ /* try on subdev */
+ /* WriteBack doesn't have subdev_call */
+
+ if ((ctrl->cam->id == CAMERA_WB) || \
+ (ctrl->cam->id == CAMERA_WB_B))
+ break;
+#ifdef CONFIG_MACH_S2PLUS
+ if (FIMC0 == ctrl->id)
+#else
+ if (FIMC2 != ctrl->id)
+#endif
+ ret = subdev_call(ctrl, core, s_ctrl, c);
+ else
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+int fimc_s_ext_ctrls_capture(void *fh, struct v4l2_ext_controls *c)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = 0;
+ mutex_lock(&ctrl->v4l2_lock);
+
+ /* try on subdev */
+ ret = subdev_call(ctrl, core, s_ext_ctrls, c);
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return ret;
+}
+
+int fimc_cropcap_capture(void *fh, struct v4l2_cropcap *a)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_global *fimc = get_fimc_dev();
+ struct s3c_platform_fimc *pdata;
+
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cam || !ctrl->cam->sd || !ctrl->cap) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+ mutex_lock(&ctrl->v4l2_lock);
+
+ pdata = to_fimc_plat(ctrl->dev);
+ if (!ctrl->cam)
+ ctrl->cam = fimc->camera[pdata->default_cam];
+
+ if (!cap) {
+ cap = kzalloc(sizeof(*cap), GFP_KERNEL);
+ if (!cap) {
+ fimc_err("%s: no memory for "
+ "capture device info\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* assign to ctrl */
+ ctrl->cap = cap;
+ }
+
+ /* crop limitations */
+ cap->cropcap.bounds.left = 0;
+ cap->cropcap.bounds.top = 0;
+ cap->cropcap.bounds.width = ctrl->cam->width;
+ cap->cropcap.bounds.height = ctrl->cam->height;
+
+ /* crop default values */
+ cap->cropcap.defrect.left = 0;
+ cap->cropcap.defrect.top = 0;
+ cap->cropcap.defrect.width = ctrl->cam->width;
+ cap->cropcap.defrect.height = ctrl->cam->height;
+
+ a->bounds = cap->cropcap.bounds;
+ a->defrect = cap->cropcap.defrect;
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_g_crop_capture(void *fh, struct v4l2_crop *a)
+{
+ struct fimc_control *ctrl = fh;
+
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cap) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+ a->c = ctrl->cap->crop;
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_s_crop_capture(void *fh, struct v4l2_crop *a)
+{
+ struct fimc_control *ctrl = fh;
+
+ fimc_dbg("%s\n", __func__);
+
+ mutex_lock(&ctrl->v4l2_lock);
+ ctrl->cap->crop = a->c;
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_start_capture(struct fimc_control *ctrl)
+{
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->sc.bypass)
+ fimc_hwset_start_scaler(ctrl);
+
+ fimc_hwset_enable_capture(ctrl, ctrl->sc.bypass);
+
+ return 0;
+}
+
+int fimc_stop_capture(struct fimc_control *ctrl)
+{
+ fimc_dbg("%s\n", __func__);
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if ((ctrl->cam->id != CAMERA_WB) && (ctrl->cam->id != CAMERA_WB_B)) {
+ if (!ctrl->cam->sd) {
+ fimc_err("%s: No subdevice.\n", __func__);
+ return -ENODEV;
+ }
+ }
+
+ if (!ctrl->cap) {
+ fimc_err("%s: No cappure format.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ctrl->cap->lastirq) {
+ fimc_hwset_enable_lastirq(ctrl);
+ fimc_hwset_disable_capture(ctrl);
+ fimc_hwset_disable_lastirq(ctrl);
+ } else {
+ fimc_hwset_disable_capture(ctrl);
+ }
+
+ fimc_hwset_stop_scaler(ctrl);
+
+ return 0;
+}
+
+
+int fimc_streamon_capture(void *fh)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_global *fimc = get_fimc_dev();
+ struct v4l2_frmsizeenum cam_frmsize;
+
+ int rot = 0, i;
+ int ret = 0;
+ struct s3c_platform_camera *cam = NULL;
+
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ unsigned int inner_elapsed_usec = 0;
+
+ printk(KERN_INFO "%s fimc%d\n", __func__, ctrl->id);
+ cam_frmsize.discrete.width = 0;
+ cam_frmsize.discrete.height = 0;
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if ((ctrl->cam->id != CAMERA_WB) && (ctrl->cam->id != CAMERA_WB_B)) {
+ if (!ctrl->cam->sd) {
+ fimc_err("%s: No subdevice.\n", __func__);
+ return -ENODEV;
+ }
+ }
+
+ if (pdata->hw_ver < 0x51)
+ fimc_hw_reset_camera(ctrl);
+#if (!defined(CONFIG_EXYNOS_DEV_PD) && !defined(CONFIG_PM_RUNTIME))
+ ctrl->status = FIMC_READY_ON;
+#endif
+ cap->irq = 0;
+
+ fimc_hwset_enable_irq(ctrl, 0, 1);
+
+ if (!ctrl->cam->initialized) {
+ ret = fimc_init_camera(ctrl);
+ if (unlikely(ret < 0)) {
+ fimc_err("%s: fail to initialize subdev\n", __func__);
+ return ret;
+ }
+ }
+
+ /* csi control position change because runtime pm */
+ if (ctrl->cam)
+ cam = ctrl->cam;
+
+ if ((ctrl->cam->id != CAMERA_WB) && (ctrl->cam->id != CAMERA_WB_B)) {
+#ifdef CONFIG_MACH_S2PLUS
+ if (ctrl->id == FIMC0) {
+#else
+ if (ctrl->id != FIMC2) {
+#endif
+ ret = subdev_call(ctrl, video, enum_framesizes, &cam_frmsize);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "%s: enum_framesizes failed\n", __func__);
+ if (ret != -ENOIOCTLCMD)
+ return ret;
+ } else {
+#ifdef CONFIG_TARGET_LOCALE_KOR
+ if ((ctrl->cap->vt_mode != 0) &&
+#else
+ if ((ctrl->cap->vt_mode == 1) &&
+#endif
+ (cap->rotate == 90 || cap->rotate == 270)) {
+ ctrl->cam->window.left = 136;
+ ctrl->cam->window.top = 0;
+ ctrl->cam->window.width = 368;
+ ctrl->cam->window.height = 480;
+ ctrl->cam->width = cam_frmsize.discrete.width;
+ ctrl->cam->height = cam_frmsize.discrete.height;
+ dev_err(ctrl->dev, "vtmode = %d, rotate = %d,"
+ " cam->width = %d,"
+ " cam->height = %d\n", ctrl->cap->vt_mode, cap->rotate,
+ ctrl->cam->width, ctrl->cam->height);
+ } else {
+ if (cam_frmsize.discrete.width > 0 && cam_frmsize.discrete.height > 0) {
+ ctrl->cam->window.left = 0;
+ ctrl->cam->window.top = 0;
+ ctrl->cam->width = ctrl->cam->window.width = cam_frmsize.discrete.width;
+ ctrl->cam->height = ctrl->cam->window.height = cam_frmsize.discrete.height;
+ fimc_info2("enum_framesizes width = %d, height = %d\n",
+ ctrl->cam->width, ctrl->cam->height);
+ }
+ }
+ }
+
+ if (cam->type == CAM_TYPE_MIPI) {
+ /*
+ * subdev call for sleep/wakeup:
+ * no error although no s_stream api support
+ */
+#if defined(CONFIG_MACH_PX)
+#ifdef CONFIG_VIDEO_IMPROVE_STREAMOFF
+ v4l2_subdev_call(cam->sd, video, s_stream,
+ STREAM_MODE_WAIT_OFF);
+#endif /* CONFIG_VIDEO_IMPROVE_STREAMOFF */
+#else /* CONFIG_MACH_PX */
+ if (fimc->active_camera == 0) {
+ if (cap->fmt.priv != V4L2_PIX_FMT_MODE_PREVIEW) {
+ v4l2_subdev_call(cam->sd, video, s_stream,
+ STREAM_MODE_CAM_ON);
+ }
+ } else {
+ do_gettimeofday(&ctrl->curr_time);
+ inner_elapsed_usec = \
+ (ctrl->curr_time.tv_sec - ctrl->before_time.tv_sec) * USEC_PER_SEC \
+ + ctrl->curr_time.tv_usec - ctrl->before_time.tv_usec;
+ inner_elapsed_usec = inner_elapsed_usec / 1000;
+
+ /* printk(KERN_INFO "\n\nfront cam stream off remain time = %dms\n",
+ inner_elapsed_usec);*/
+
+ if (150 > inner_elapsed_usec) {
+ /*printk(KERN_INFO "front cam stream off added msleep = %dms\n",
+ 150 - inner_elapsed_usec);*/
+ msleep(150 - inner_elapsed_usec);
+ }
+ }
+#endif
+ if (cam->id == CAMERA_CSI_C) {
+ s3c_csis_start(CSI_CH_0, cam->mipi_lanes, cam->mipi_settle, \
+ cam->mipi_align, cam->width, cam->height, cap->fmt.pixelformat);
+ } else {
+ s3c_csis_start(CSI_CH_1, cam->mipi_lanes, cam->mipi_settle, \
+ cam->mipi_align, cam->width, cam->height, cap->fmt.pixelformat);
+ }
+#if defined(CONFIG_MACH_PX)
+ v4l2_subdev_call(cam->sd, video, s_stream,
+ STREAM_MODE_CAM_ON);
+#else /* CONFIG_MACH_PX */
+ if (fimc->active_camera == 0) {
+ if (cap->fmt.priv == V4L2_PIX_FMT_MODE_PREVIEW) {
+ v4l2_subdev_call(cam->sd, video, s_stream,
+ STREAM_MODE_CAM_ON);
+ }
+ } else {
+ v4l2_subdev_call(cam->sd, video, s_stream,
+ STREAM_MODE_CAM_ON);
+ }
+#endif
+ } else {
+ subdev_call(ctrl, video, s_stream, STREAM_MODE_CAM_ON);
+ }
+ } else {
+ if (cap->fmt.priv != V4L2_PIX_FMT_MODE_HDR)
+ v4l2_subdev_call(cam->sd, video, s_stream, STREAM_MODE_MOVIE_ON);
+ }
+ }
+
+ /* Set FIMD to write back */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B)) {
+ if (ctrl->cam->id == CAMERA_WB)
+ fimc_hwset_sysreg_camblk_fimd0_wb(ctrl);
+ else
+ fimc_hwset_sysreg_camblk_fimd1_wb(ctrl);
+
+ s3cfb_direct_ioctl(0, S3CFB_SET_WRITEBACK, 1);
+ }
+
+ fimc_hwset_camera_type(ctrl);
+ fimc_hwset_camera_polarity(ctrl);
+ fimc_hwset_enable_lastend(ctrl);
+
+ if (cap->fmt.pixelformat != V4L2_PIX_FMT_JPEG) {
+ fimc_hwset_camera_source(ctrl);
+ fimc_hwset_camera_offset(ctrl);
+
+ fimc_capture_scaler_info(ctrl);
+ fimc_hwset_prescaler(ctrl, &ctrl->sc);
+ fimc_hwset_scaler(ctrl, &ctrl->sc);
+ fimc_hwset_output_colorspace(ctrl, cap->fmt.pixelformat);
+ fimc_hwset_output_addr_style(ctrl, cap->fmt.pixelformat);
+
+ if (cap->fmt.pixelformat == V4L2_PIX_FMT_RGB32 ||
+ cap->fmt.pixelformat == V4L2_PIX_FMT_RGB565)
+ fimc_hwset_output_rgb(ctrl, cap->fmt.pixelformat);
+ else
+ fimc_hwset_output_yuv(ctrl, cap->fmt.pixelformat);
+
+ fimc_hwset_output_area(ctrl, cap->fmt.width, cap->fmt.height);
+ fimc_hwset_output_scan(ctrl, &cap->fmt);
+
+ fimc_hwset_output_rot_flip(ctrl, cap->rotate, cap->flip);
+ rot = fimc_mapping_rot_flip(cap->rotate, cap->flip);
+
+ if (rot & FIMC_ROT) {
+#ifndef CONFIG_VIDEO_CONFERENCE_CALL
+ if (cap->fmt.width > cap->fmt.height)
+ fimc_hwset_org_output_size(ctrl,
+ cap->fmt.width, cap->fmt.width);
+ else
+ fimc_hwset_org_output_size(ctrl,
+ cap->fmt.height, cap->fmt.height);
+
+ fimc_hwset_output_size(ctrl, cap->fmt.height, cap->fmt.width);
+#else
+ /* Fix codes 110723 */
+ fimc_hwset_org_output_size(ctrl,
+ cap->fmt.width, cap->fmt.height);
+ fimc_hwset_output_size(ctrl,
+ cap->fmt.height, cap->fmt.width);
+#endif
+ } else {
+ fimc_hwset_org_output_size(ctrl,
+ cap->fmt.width, cap->fmt.height);
+ fimc_hwset_output_size(ctrl, cap->fmt.width, cap->fmt.height);
+ }
+
+ fimc_hwset_jpeg_mode(ctrl, false);
+ } else {
+ fimc_hwset_output_size(ctrl,
+ cap->fmt.width, cap->fmt.height);
+ if (rot & FIMC_ROT)
+ fimc_hwset_org_output_size(ctrl,
+ cap->fmt.height, cap->fmt.width);
+ else
+ fimc_hwset_org_output_size(ctrl,
+ cap->fmt.width, cap->fmt.height);
+
+ fimc_hwset_output_area_size(ctrl, fimc_camera_get_jpeg_memsize(ctrl));
+ fimc_hwset_jpeg_mode(ctrl, true);
+ }
+
+ if (pdata->hw_ver >= 0x51) {
+ for (i = 0; i < cap->nr_bufs; i++)
+ fimc_hwset_output_address(ctrl, &cap->bufs[i], i);
+ } else {
+ for (i = 0; i < FIMC_PINGPONG; i++)
+ fimc_add_outqueue(ctrl, i);
+ }
+
+ if (ctrl->cap->fmt.colorspace == V4L2_COLORSPACE_JPEG) {
+ fimc_hwset_scaler_bypass(ctrl);
+ }
+
+ ctrl->cap->cnt = 0;
+ fimc_start_capture(ctrl);
+ ctrl->status = FIMC_STREAMON;
+ printk(KERN_INFO "%s-- fimc%d\n", __func__, ctrl->id);
+
+ /* if available buffer did not remained */
+ return 0;
+}
+
+int fimc_streamoff_capture(void *fh)
+{
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ printk(KERN_INFO "%s fimc%d\n", __func__, ctrl->id);
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if ((ctrl->cam->id != CAMERA_WB) && (ctrl->cam->id != CAMERA_WB_B)) {
+ if (!ctrl->cam->sd) {
+ fimc_err("%s: No subdevice.\n", __func__);
+ return -ENODEV;
+ }
+ }
+
+ ctrl->status = FIMC_READY_OFF;
+
+ fimc_stop_capture(ctrl);
+
+#if defined(CONFIG_MACH_PX)
+#ifdef CONFIG_VIDEO_IMPROVE_STREAMOFF
+#ifdef CONFIG_MACH_S2PLUS
+ if ((ctrl->id == FIMC0) && (ctrl->cam->type == CAM_TYPE_MIPI))
+#else
+ if ((ctrl->id != FIMC2) && (ctrl->cam->type == CAM_TYPE_MIPI))
+#endif
+ v4l2_subdev_call(ctrl->cam->sd, video, s_stream,
+ STREAM_MODE_CAM_OFF);
+#endif /* CONFIG_VIDEO_IMPROVE_STREAMOFF */
+#else /* CONFIG_MACH_PX */
+ if (get_fimc_dev()->active_camera == 1)
+ v4l2_subdev_call(ctrl->cam->sd, video, s_stream, STREAM_MODE_CAM_OFF);
+
+ do_gettimeofday(&ctrl->before_time);
+#endif
+
+ /* wait for stop hardware */
+ fimc_wait_disable_capture(ctrl);
+
+ fimc_hwset_disable_irq(ctrl);
+ if (pdata->hw_ver < 0x51)
+ INIT_LIST_HEAD(&cap->inq);
+
+ ctrl->status = FIMC_STREAMOFF;
+#ifdef CONFIG_MACH_S2PLUS
+ if (ctrl->id == FIMC0) {
+#else
+ if (ctrl->id != FIMC2) {
+#endif
+ if (ctrl->cam->type == CAM_TYPE_MIPI) {
+ if (ctrl->cam->id == CAMERA_CSI_C)
+ s3c_csis_stop(CSI_CH_0);
+ else
+ s3c_csis_stop(CSI_CH_1);
+ }
+
+#if defined(CONFIG_MACH_PX)
+#ifndef CONFIG_VIDEO_IMPROVE_STREAMOFF
+ v4l2_subdev_call(ctrl->cam->sd, video, s_stream,
+ STREAM_MODE_CAM_OFF);
+#endif /* CONFIG_VIDEO_IMPROVE_STREAMOFF */
+#else /* CONFIG_MACH_PX */
+ if (get_fimc_dev()->active_camera == 0)
+ v4l2_subdev_call(ctrl->cam->sd, video, s_stream, STREAM_MODE_CAM_OFF);
+#endif
+ fimc_hwset_reset(ctrl);
+ } else {
+ fimc_hwset_reset(ctrl);
+ if (cap->fmt.priv != V4L2_PIX_FMT_MODE_HDR)
+ v4l2_subdev_call(ctrl->cam->sd, video, s_stream, STREAM_MODE_MOVIE_OFF);
+ }
+
+ /* Set FIMD to write back */
+ if ((ctrl->cam->id == CAMERA_WB) || (ctrl->cam->id == CAMERA_WB_B))
+ s3cfb_direct_ioctl(0, S3CFB_SET_WRITEBACK, 0);
+
+ /* disable camera power */
+ /* cam power off should call in the subdev release function */
+ if (ctrl->cam->reset_camera) {
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+ if (ctrl->power_status != FIMC_POWER_SUSPEND)
+ ctrl->cam->initialized = 0;
+ }
+
+ printk(KERN_INFO "%s -- fimc%d\n", __func__, ctrl->id);
+ return 0;
+}
+
+int fimc_qbuf_capture(void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_control *ctrl = fh;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ struct fimc_capinfo *cap = ctrl->cap;
+
+ if (!cap || !ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (b->memory != V4L2_MEMORY_MMAP) {
+ fimc_err("%s: invalid memory type\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->v4l2_lock);
+ if (pdata->hw_ver >= 0x51) {
+ if (cap->bufs[b->index].state != VIDEOBUF_IDLE) {
+ fimc_err("%s: invalid state b->index : %d\n", __func__,
+ b->index);
+ mutex_unlock(&ctrl->v4l2_lock);
+ return -EINVAL;
+ } else {
+ fimc_info2("%s[%d] : b->index : %d\n", __func__, ctrl->id,
+ b->index);
+ fimc_hwset_output_buf_sequence(ctrl, b->index,
+ FIMC_FRAMECNT_SEQ_ENABLE);
+ cap->bufs[b->index].state = VIDEOBUF_QUEUED;
+ if (ctrl->status == FIMC_BUFFER_STOP) {
+ printk(KERN_INFO "fimc_qbuf_capture start fimc%d again\n",
+ ctrl->id);
+ fimc_start_capture(ctrl);
+ ctrl->status = FIMC_STREAMON;
+ }
+ }
+ } else {
+ fimc_add_inqueue(ctrl, b->index);
+ }
+
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_dqbuf_capture(void *fh, struct v4l2_buffer *b)
+{
+ unsigned long spin_flags;
+ struct fimc_control *ctrl = fh;
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_buf_set *buf;
+ size_t length = 0;
+ int i, pp, ret = 0;
+ phys_addr_t start, end;
+
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ if (!cap || !ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (b->memory != V4L2_MEMORY_MMAP) {
+ fimc_err("%s: invalid memory type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (pdata->hw_ver >= 0x51) {
+ spin_lock_irqsave(&ctrl->outq_lock, spin_flags);
+
+ if (list_empty(&cap->outgoing_q)) {
+ fimc_info2("%s: outgoing_q is empty\n", __func__);
+ spin_unlock_irqrestore(&ctrl->outq_lock, spin_flags);
+ return -EAGAIN;
+ } else {
+ buf = list_first_entry(&cap->outgoing_q, struct fimc_buf_set,
+ list);
+ fimc_info2("%s[%d]: buf->id : %d\n", __func__, ctrl->id,
+ buf->id);
+ b->index = buf->id;
+ buf->state = VIDEOBUF_IDLE;
+
+ list_del(&buf->list);
+ }
+
+ spin_unlock_irqrestore(&ctrl->outq_lock, spin_flags);
+
+ } else {
+ pp = ((fimc_hwget_frame_count(ctrl) + 2) % 4);
+ if (cap->fmt.field == V4L2_FIELD_INTERLACED_TB)
+ pp &= ~0x1;
+ b->index = cap->outq[pp];
+ fimc_info2("%s: buffer(%d) outq[%d]\n", __func__, b->index, pp);
+ ret = fimc_add_outqueue(ctrl, pp);
+ if (ret) {
+ b->index = -1;
+ fimc_err("%s: no inqueue buffer\n", __func__);
+ }
+ }
+
+ if (!cap->cacheable)
+ return ret;
+
+ for (i = 0; i < 3; i++) {
+ if (cap->bufs[b->index].base[i])
+ length += cap->bufs[b->index].length[i];
+ else
+ break;
+ }
+
+ if (length > (unsigned long) L2_FLUSH_ALL) {
+ flush_cache_all(); /* L1 */
+ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
+ outer_flush_all(); /* L2 */
+ } else if (length > (unsigned long) L1_FLUSH_ALL) {
+ flush_cache_all(); /* L1 */
+ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1);
+
+ for (i = 0; i < 3; i++) {
+ phys_addr_t start = cap->bufs[b->index].base[i];
+ phys_addr_t end = cap->bufs[b->index].base[i] +
+ cap->bufs[b->index].length[i] - 1;
+
+ if (!start)
+ break;
+
+ outer_flush_range(start, end); /* L2 */
+ }
+ } else {
+ for (i = 0; i < 3; i++) {
+ phys_addr_t start = cap->bufs[b->index].base[i];
+ phys_addr_t end = cap->bufs[b->index].base[i] +
+ cap->bufs[b->index].length[i] - 1;
+
+ if (!start)
+ break;
+
+ dmac_flush_range(phys_to_virt(start), phys_to_virt(end));
+ outer_flush_range(start, end); /* L2 */
+ }
+ }
+
+ return ret;
+}
+
+int fimc_enum_framesizes(struct file *filp, void *fh, struct v4l2_frmsizeenum *fsize)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int i;
+ u32 index = 0;
+ for (i = 0; i < ARRAY_SIZE(capture_fmts); i++) {
+ if (fsize->pixel_format != capture_fmts[i].pixelformat)
+ continue;
+ if (fsize->index == index) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ /* this is camera sensor's width, height.
+ * originally this should be filled each file format
+ */
+ fsize->discrete.width = ctrl->cam->width;
+ fsize->discrete.height = ctrl->cam->height;
+
+ return 0;
+ }
+ index++;
+ }
+
+ return -EINVAL;
+}
+int fimc_enum_frameintervals(struct file *filp, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ if (fival->index > 0)
+ return -EINVAL;
+ /* temporary only support 30fps */
+ fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ fival->discrete.numerator = 1000;
+ fival->discrete.denominator = 30000;
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/fimc/fimc_dev.c b/drivers/media/video/samsung/fimc/fimc_dev.c
new file mode 100644
index 0000000..a0a91cd
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc_dev.c
@@ -0,0 +1,2378 @@
+/* linux/drivers/media/video/samsung/fimc/fimc_dev.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Core file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/memory.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-device.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/videodev2_exynos_camera.h>
+#include <linux/delay.h>
+#include <linux/cma.h>
+#include <plat/fimc.h>
+#include <plat/clock.h>
+#include <mach/regs-pmu.h>
+
+#include "fimc.h"
+
+char buf[32];
+struct fimc_global *fimc_dev;
+
+#ifndef CONFIG_VIDEO_FIMC_MIPI
+int s3c_csis_get_pkt(int csis_id, void *pktdata) {}
+#endif
+
+void s3c_fimc_irq_work(struct work_struct *work)
+{
+ struct fimc_control *ctrl = container_of(work, struct fimc_control,
+ work_struct);
+ int ret, irq_cnt;
+
+ irq_cnt = atomic_read(&ctrl->irq_cnt);
+ if (irq_cnt > 0) {
+ do {
+ ret = atomic_dec_and_test((atomic_t *)&ctrl->irq_cnt);
+ if (atomic_read(&ctrl->dev->power.usage_count) > 0)
+ pm_runtime_put_sync(ctrl->dev);
+ } while (ret != 1);
+ }
+}
+
+int fimc_dma_alloc(struct fimc_control *ctrl, struct fimc_buf_set *bs,
+ int i, int align)
+{
+ dma_addr_t end, *curr;
+
+ mutex_lock(&ctrl->lock);
+
+ end = ctrl->mem.base + ctrl->mem.size;
+ curr = &ctrl->mem.curr;
+
+ if (!bs->length[i]) {
+ mutex_unlock(&ctrl->lock);
+ return -EINVAL;
+ }
+
+ if (!align) {
+ if (*curr + bs->length[i] > end) {
+ goto overflow;
+ } else {
+ bs->base[i] = *curr;
+ bs->garbage[i] = 0;
+ *curr += bs->length[i];
+ }
+ } else {
+ if (ALIGN(*curr, align) + bs->length[i] > end) {
+ goto overflow;
+ } else {
+ bs->base[i] = ALIGN(*curr, align);
+ bs->garbage[i] = ALIGN(*curr, align) - *curr;
+ *curr += (bs->length[i] + bs->garbage[i]);
+ }
+ }
+
+ mutex_unlock(&ctrl->lock);
+
+ return 0;
+
+overflow:
+ bs->base[i] = 0;
+ bs->length[i] = 0;
+ bs->garbage[i] = 0;
+
+ mutex_unlock(&ctrl->lock);
+
+ return -ENOMEM;
+}
+
+void fimc_dma_free(struct fimc_control *ctrl, struct fimc_buf_set *bs, int i)
+{
+ int total = bs->length[i] + bs->garbage[i];
+ mutex_lock(&ctrl->lock);
+
+ if (bs->base[i]) {
+ if (ctrl->mem.curr - total >= ctrl->mem.base)
+ ctrl->mem.curr -= total;
+
+ bs->base[i] = 0;
+ bs->length[i] = 0;
+ bs->garbage[i] = 0;
+ }
+
+ mutex_unlock(&ctrl->lock);
+}
+
+static inline u32 fimc_irq_out_single_buf(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ int ret = -1, ctx_num, next;
+ u32 wakeup = 1;
+
+ if (ctx->status == FIMC_READY_OFF || ctx->status == FIMC_STREAMOFF) {
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctx->status = FIMC_STREAMOFF;
+ ctrl->status = FIMC_STREAMOFF;
+
+ return wakeup;
+ }
+ ctx->status = FIMC_STREAMON_IDLE;
+
+ /* Attach done buffer to outgoing queue. */
+ ret = fimc_push_outq(ctrl, ctx, ctrl->out->idxs.active.idx);
+ if (ret < 0)
+ fimc_err("%s:Failed: fimc_push_outq\n", __func__);
+
+ /* Detach buffer from incomming queue. */
+ ret = fimc_pop_inq(ctrl, &ctx_num, &next);
+ if (ret == 0) { /* There is a buffer in incomming queue. */
+ if (ctx_num != ctrl->out->last_ctx) {
+ struct fimc_buf_set buf_set; /* destination addr */
+ u32 format, width, height, y_size, c_size, rot;
+ int i, cfg;
+
+
+ ctx = &ctrl->out->ctx[ctx_num];
+ ctrl->out->last_ctx = ctx->ctx_num;
+ fimc_outdev_set_ctx_param(ctrl, ctx);
+
+ format = ctx->fbuf.fmt.pixelformat;
+ width = ctx->fbuf.fmt.width;
+ height = ctx->fbuf.fmt.height;
+ y_size = width * height;
+ c_size = y_size >> 2;
+ rot = ctx->rotate;
+
+ memset(&buf_set, 0x00, sizeof(buf_set));
+
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUYV:
+ buf_set.base[FIMC_ADDR_Y] =
+ (dma_addr_t)ctx->fbuf.base;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ buf_set.base[FIMC_ADDR_Y] =
+ (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] =
+ buf_set.base[FIMC_ADDR_Y] + y_size;
+ buf_set.base[FIMC_ADDR_CR] =
+ buf_set.base[FIMC_ADDR_CB] + c_size;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ buf_set.base[FIMC_ADDR_Y] =
+ (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] =
+ buf_set.base[FIMC_ADDR_Y] + y_size;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ buf_set.base[FIMC_ADDR_Y] = (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] =
+ ALIGN(buf_set.base[FIMC_ADDR_Y] + y_size, PAGE_SIZE - 1);
+ break;
+ case V4L2_PIX_FMT_NV12T:
+ if (rot == 0 || rot == 180)
+ fimc_get_nv12t_size(width, height, &y_size, &c_size);
+ else
+ fimc_get_nv12t_size(height, width, &y_size, &c_size);
+ buf_set.base[FIMC_ADDR_Y] =
+ (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] =
+ buf_set.base[FIMC_ADDR_Y] + y_size;
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n", __func__, format);
+ return -EINVAL;
+ }
+ cfg = fimc_hwget_output_buf_sequence(ctrl);
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ if (check_bit(cfg, i))
+ fimc_hwset_output_address(ctrl, &buf_set, i);
+ }
+ }
+
+ fimc_outdev_set_src_addr(ctrl, ctx->src[next].base);
+ ret = fimc_output_set_dst_addr(ctrl, ctx, next);
+ if (ret < 0)
+ fimc_err("%s:Fail: fimc_output_set_dst_addr\n", __func__);
+
+ ctrl->out->idxs.active.ctx = ctx_num;
+ ctrl->out->idxs.active.idx = next;
+
+ ctx->status = FIMC_STREAMON;
+ ctrl->status = FIMC_STREAMON;
+
+ ret = fimc_outdev_start_camif(ctrl);
+ if (ret < 0)
+ fimc_err("%s:Fail: fimc_start_camif\n", __func__);
+ } else { /* There is no buffer in incomming queue. */
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctx->status = FIMC_STREAMON_IDLE;
+ ctrl->status = FIMC_STREAMON_IDLE;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ ctrl->out->last_ctx = -1;
+#endif
+ }
+
+ return wakeup;
+}
+
+static inline u32 fimc_irq_out_multi_buf(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ int ret = -1, ctx_num, next;
+ u32 wakeup = 1;
+
+ if (ctx->status == FIMC_READY_OFF) {
+ if (ctrl->out->idxs.active.ctx == ctx->ctx_num) {
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ }
+
+ ctx->status = FIMC_STREAMOFF;
+
+ return wakeup;
+ }
+ ctx->status = FIMC_STREAMON_IDLE;
+
+ /* Attach done buffer to outgoing queue. */
+ ret = fimc_push_outq(ctrl, ctx, ctrl->out->idxs.active.idx);
+ if (ret < 0)
+ fimc_err("%s:Failed: fimc_push_outq\n", __func__);
+
+ /* Detach buffer from incomming queue. */
+ ret = fimc_pop_inq(ctrl, &ctx_num, &next);
+ if (ret == 0) { /* There is a buffer in incomming queue. */
+ if (ctx_num != ctrl->out->last_ctx) {
+ ctx = &ctrl->out->ctx[ctx_num];
+ ctrl->out->last_ctx = ctx->ctx_num;
+ fimc_outdev_set_ctx_param(ctrl, ctx);
+ }
+
+ fimc_outdev_set_src_addr(ctrl, ctx->src[next].base);
+ ret = fimc_output_set_dst_addr(ctrl, ctx, next);
+ if (ret < 0)
+ fimc_err("%s:Fail: fimc_output_set_dst_addr\n", __func__);
+
+ ctrl->out->idxs.active.ctx = ctx_num;
+ ctrl->out->idxs.active.idx = next;
+ ctx->status = FIMC_STREAMON;
+ ctrl->status = FIMC_STREAMON;
+
+ ret = fimc_outdev_start_camif(ctrl);
+ if (ret < 0)
+ fimc_err("%s:Fail: fimc_start_camif\n", __func__);
+
+ } else { /* There is no buffer in incomming queue. */
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctx->status = FIMC_STREAMON_IDLE;
+ ctrl->status = FIMC_STREAMON_IDLE;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ ctrl->out->last_ctx = -1;
+#endif
+ }
+
+ return wakeup;
+}
+
+static inline u32 fimc_irq_out_dma(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct fimc_buf_set buf_set;
+ int idx = ctrl->out->idxs.active.idx;
+ int ret = -1, i, ctx_num, next;
+ int cfg;
+ u32 wakeup = 1;
+
+ if (ctx->status == FIMC_READY_OFF
+ || ctx->status == FIMC_STREAMOFF) {
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctx->status = FIMC_STREAMOFF;
+ ctrl->status = FIMC_STREAMOFF;
+ return wakeup;
+ }
+
+ /* Attach done buffer to outgoing queue. */
+ ret = fimc_push_outq(ctrl, ctx, idx);
+ if (ret < 0)
+ fimc_err("Failed: fimc_push_outq\n");
+
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO) {
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_ADDR,
+ (unsigned long)ctx->dst[idx].base[FIMC_ADDR_Y]);
+
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_ADDR) fail\n");
+ return -EINVAL;
+ }
+
+ if (ctrl->fb.is_enable == 0) {
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_ON,
+ (unsigned long)NULL);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_ON)"\
+ " fail\n");
+ return -EINVAL;
+ }
+
+ ctrl->fb.is_enable = 1;
+ }
+ }
+
+ /* Detach buffer from incomming queue. */
+ ret = fimc_pop_inq(ctrl, &ctx_num, &next);
+ if (ret == 0) { /* There is a buffer in incomming queue. */
+ ctx = &ctrl->out->ctx[ctx_num];
+ fimc_outdev_set_src_addr(ctrl, ctx->src[next].base);
+
+ memset(&buf_set, 0x00, sizeof(buf_set));
+ buf_set.base[FIMC_ADDR_Y] = ctx->dst[next].base[FIMC_ADDR_Y];
+
+ cfg = fimc_hwget_output_buf_sequence(ctrl);
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ if (check_bit(cfg, i))
+ fimc_hwset_output_address(ctrl, &buf_set, i);
+ }
+
+ ctrl->out->idxs.active.ctx = ctx_num;
+ ctrl->out->idxs.active.idx = next;
+
+ ctx->status = FIMC_STREAMON;
+ ctrl->status = FIMC_STREAMON;
+
+ ret = fimc_outdev_start_camif(ctrl);
+ if (ret < 0)
+ fimc_err("Fail: fimc_start_camif\n");
+
+ } else { /* There is no buffer in incomming queue. */
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+
+ ctx->status = FIMC_STREAMON_IDLE;
+ ctrl->status = FIMC_STREAMON_IDLE;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ ctrl->out->last_ctx = -1;
+#endif
+ }
+
+ return wakeup;
+}
+
+static inline u32 fimc_irq_out_fimd(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct fimc_idx prev;
+ int ret = -1, ctx_num, next;
+ u32 wakeup = 0;
+
+ /* Attach done buffer to outgoing queue. */
+ if (ctrl->out->idxs.prev.idx != -1) {
+ ret = fimc_push_outq(ctrl, ctx, ctrl->out->idxs.prev.idx);
+ if (ret < 0) {
+ fimc_err("Failed: fimc_push_outq\n");
+ } else {
+ ctrl->out->idxs.prev.ctx = -1;
+ ctrl->out->idxs.prev.idx = -1;
+ wakeup = 1; /* To wake up fimc_v4l2_dqbuf */
+ }
+ }
+
+ /* Update index structure. */
+ if (ctrl->out->idxs.next.idx != -1) {
+ ctrl->out->idxs.active.ctx = ctrl->out->idxs.next.ctx;
+ ctrl->out->idxs.active.idx = ctrl->out->idxs.next.idx;
+ ctrl->out->idxs.next.idx = -1;
+ ctrl->out->idxs.next.ctx = -1;
+ }
+
+ /* Detach buffer from incomming queue. */
+ ret = fimc_pop_inq(ctrl, &ctx_num, &next);
+ if (ret == 0) { /* There is a buffer in incomming queue. */
+ prev.ctx = ctrl->out->idxs.active.ctx;
+ prev.idx = ctrl->out->idxs.active.idx;
+
+ ctrl->out->idxs.prev.ctx = prev.ctx;
+ ctrl->out->idxs.prev.idx = prev.idx;
+
+ ctrl->out->idxs.next.ctx = ctx_num;
+ ctrl->out->idxs.next.idx = next;
+
+ /* set source address */
+ fimc_outdev_set_src_addr(ctrl, ctx->src[next].base);
+ }
+
+ return wakeup;
+}
+
+static inline void fimc_irq_out(struct fimc_control *ctrl)
+{
+ struct fimc_ctx *ctx;
+ u32 wakeup = 1;
+ int ctx_num = ctrl->out->idxs.active.ctx;
+
+ /* Interrupt pendding clear */
+ fimc_hwset_clear_irq(ctrl);
+
+ /* check context num */
+ if (ctx_num < 0 || ctx_num >= FIMC_MAX_CTXS) {
+ fimc_err("fimc_irq_out: invalid ctx (ctx=%d)\n", ctx_num);
+ wake_up(&ctrl->wq);
+ return;
+ }
+
+ ctx = &ctrl->out->ctx[ctx_num];
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_NONE_SINGLE_BUF:
+ wakeup = fimc_irq_out_single_buf(ctrl, ctx);
+ break;
+ case FIMC_OVLY_NONE_MULTI_BUF:
+ wakeup = fimc_irq_out_multi_buf(ctrl, ctx);
+ break;
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL:
+ wakeup = fimc_irq_out_dma(ctrl, ctx);
+ break;
+ case FIMC_OVLY_FIFO:
+ if (ctx->status != FIMC_READY_OFF)
+ wakeup = fimc_irq_out_fimd(ctrl, ctx);
+ break;
+ default:
+ fimc_err("[ctx=%d] fimc_irq_out: wrong overlay.mode (%d)\n",
+ ctx_num, ctx->overlay.mode);
+ break;
+ }
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ atomic_inc((atomic_t *)&ctrl->irq_cnt);
+ queue_work(ctrl->fimc_irq_wq, &ctrl->work_struct);
+#endif
+
+ if (wakeup == 1)
+ wake_up(&ctrl->wq);
+}
+
+int fimc_hwget_number_of_bits(u32 framecnt_seq)
+{
+ u32 bits = 0;
+ while (framecnt_seq) {
+ framecnt_seq = framecnt_seq & (framecnt_seq - 1);
+ bits++;
+ }
+ return bits;
+}
+
+static int fimc_add_outgoing_queue(struct fimc_control *ctrl, int i)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_buf_set *tmp_buf;
+ struct list_head *count;
+
+ spin_lock(&ctrl->outq_lock);
+
+ list_for_each(count, &cap->outgoing_q) {
+ tmp_buf = list_entry(count, struct fimc_buf_set, list);
+ if (tmp_buf->id == i) {
+ fimc_info1("%s: Exist id in outqueue\n", __func__);
+
+ spin_unlock(&ctrl->outq_lock);
+ return 0;
+ }
+ }
+ list_add_tail(&cap->bufs[i].list, &cap->outgoing_q);
+ spin_unlock(&ctrl->outq_lock);
+
+ return 0;
+}
+
+static inline void fimc_irq_cap(struct fimc_control *ctrl)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ int pp;
+ int buf_index;
+ int framecnt_seq;
+ int available_bufnum;
+ static int is_frame_end_irq;
+ struct v4l2_control is_ctrl;
+ u32 is_fn;
+
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ is_ctrl.id = 0;
+ is_ctrl.value = 0;
+#ifdef DEBUG
+ static struct timeval curr_time, before_time;
+ if (!fimc_cam_use) {
+ do_gettimeofday(&curr_time);
+ printk(KERN_INFO "%s : time : %ld\n", __func__,
+ curr_time.tv_usec - before_time.tv_usec);
+ before_time.tv_usec = curr_time.tv_usec;
+ }
+#endif
+ fimc_hwset_clear_irq(ctrl);
+ if (fimc_hwget_overflow_state(ctrl)) {
+ ctrl->restart = true;
+ return;
+ }
+
+ if (pdata->hw_ver >= 0x51) {
+ if (is_frame_end_irq || ctrl->status == FIMC_BUFFER_STOP) {
+ pp = fimc_hwget_present_frame_count(ctrl);
+ is_frame_end_irq = 0;
+ } else {
+ pp = fimc_hwget_before_frame_count(ctrl);
+ }
+
+ if (cap->cnt < 20) {
+ printk(KERN_INFO "%s[%d], fimc%d, cnt[%d]\n", __func__,
+ pp, ctrl->id, cap->cnt);
+ cap->cnt++;
+ }
+
+ fimc_info2("%s[%d]\n", __func__, pp);
+ if (pp == 0 || ctrl->restart) {
+ printk(KERN_INFO "%s[%d] SKIPPED\n", __func__, pp);
+ if (ctrl->cap->nr_bufs == 1) {
+ fimc_stop_capture(ctrl);
+ is_frame_end_irq = 1;
+ ctrl->status = FIMC_BUFFER_STOP;
+ }
+ ctrl->restart = false;
+ return;
+ }
+
+ buf_index = pp - 1;
+ if (ctrl->cam->use_isp && fimc_cam_use) {
+ is_ctrl.id = V4L2_CID_IS_GET_FRAME_NUMBER;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd, core, g_ctrl, &is_ctrl);
+ is_fn = is_ctrl.value;
+ if (ctrl->is.frame_count == is_fn) {
+ is_ctrl.id = V4L2_CID_IS_GET_FRAME_VALID;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd, core, g_ctrl,
+ &is_ctrl);
+ if (is_ctrl.value) {
+ is_ctrl.id =
+ V4L2_CID_IS_SET_FRAME_VALID;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd,
+ core, s_ctrl, &is_ctrl);
+ } else {
+ fimc_info2(
+ "Invalid frame - fn %d\n", is_fn);
+ is_ctrl.id =
+ V4L2_CID_IS_SET_FRAME_VALID;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd,
+ core, s_ctrl, &is_ctrl);
+ }
+ ctrl->is.frame_count++;
+ } else {
+ /* Frame lost case */
+ is_ctrl.id =
+ V4L2_CID_IS_GET_LOSTED_FRAME_NUMBER;
+ is_ctrl.value = 0;
+ v4l2_subdev_call(ctrl->is.sd,
+ core, g_ctrl, &is_ctrl);
+ fimc_info2("%d Frame lost - %d,%d",
+ (is_ctrl.value-ctrl->is.frame_count),
+ ctrl->is.frame_count, is_ctrl.value);
+ ctrl->is.frame_count = is_ctrl.value;
+ is_ctrl.id = V4L2_CID_IS_CLEAR_FRAME_NUMBER;
+ is_ctrl.value = ctrl->is.frame_count;
+ v4l2_subdev_call(ctrl->is.sd,
+ core, s_ctrl, &is_ctrl);
+ }
+ }
+
+ if (cap->pktdata_enable) {
+ if (ctrl->cam->id == CAMERA_CSI_C)
+ s3c_csis_get_pkt(CSI_CH_0 , cap->bufs[buf_index].vaddr_pktdata);
+ else if (ctrl->cam->id == CAMERA_CSI_D)
+ s3c_csis_get_pkt(CSI_CH_1 , cap->bufs[buf_index].vaddr_pktdata);
+ }
+
+ fimc_add_outgoing_queue(ctrl, buf_index);
+ fimc_hwset_output_buf_sequence(ctrl, buf_index,
+ FIMC_FRAMECNT_SEQ_DISABLE);
+
+ framecnt_seq = fimc_hwget_output_buf_sequence(ctrl);
+ available_bufnum = fimc_hwget_number_of_bits(framecnt_seq);
+ fimc_info2("%s[%d] : framecnt_seq: %d, available_bufnum: %d\n",
+ __func__, ctrl->id, framecnt_seq, available_bufnum);
+ if (ctrl->status != FIMC_BUFFER_STOP) {
+ if (available_bufnum == 1) {
+ ctrl->cap->lastirq = 0;
+ fimc_stop_capture(ctrl);
+ is_frame_end_irq = 1;
+
+ printk(KERN_INFO "fimc_irq_cap available_bufnum = %d\n", available_bufnum);
+ ctrl->status = FIMC_BUFFER_STOP;
+ }
+ } else {
+ fimc_info1("%s : Aleady fimc stop\n", __func__);
+ }
+ } else
+ pp = ((fimc_hwget_frame_count(ctrl) + 2) % 4);
+
+ if (cap->fmt.field == V4L2_FIELD_INTERLACED_TB) {
+ /* odd value of pp means one frame is made with top/bottom */
+ if (pp & 0x1) {
+ cap->irq = 1;
+ wake_up(&ctrl->wq);
+ }
+ } else {
+ cap->irq = 1;
+ wake_up(&ctrl->wq);
+ }
+}
+
+static irqreturn_t fimc_irq(int irq, void *dev_id)
+{
+ struct fimc_control *ctrl = (struct fimc_control *) dev_id;
+ struct s3c_platform_fimc *pdata;
+
+ if (ctrl->cap)
+ fimc_irq_cap(ctrl);
+ else if (ctrl->out)
+ fimc_irq_out(ctrl);
+ else {
+ printk(KERN_ERR "%s this message must not be shown!!!"
+ " fimc%d\n", __func__, (int)dev_id);
+ pdata = to_fimc_plat(ctrl->dev);
+ pdata->clk_on(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ fimc_hwset_clear_irq(ctrl);
+ pdata->clk_off(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct fimc_control *fimc_register_controller(struct platform_device *pdev)
+{
+ struct s3c_platform_fimc *pdata;
+ struct fimc_control *ctrl;
+ struct resource *res;
+ int id, err;
+ struct cma_info mem_info;
+ struct clk *sclk_fimc_lclk = NULL;
+ struct clk *fimc_src_clk = NULL;
+
+ id = pdev->id;
+ pdata = to_fimc_plat(&pdev->dev);
+
+ ctrl = get_fimc_ctrl(id);
+ ctrl->id = id;
+ ctrl->dev = &pdev->dev;
+ ctrl->vd = &fimc_video_device[id];
+ ctrl->vd->minor = id;
+ ctrl->log = FIMC_LOG_DEFAULT;
+ ctrl->power_status = FIMC_POWER_OFF;
+
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ sprintf(ctrl->cma_name, "%s", FIMC_CMA_NAME);
+ ctrl->mem.size = 0;
+ ctrl->mem.base = 0;
+#else
+ /* CMA */
+#ifdef CONFIG_ION_EXYNOS
+ /* In Midas project, FIMC2 reserve memory is used by ION driver. */
+ if (id != 2) {
+#endif
+ sprintf(ctrl->cma_name, "%s%d", FIMC_CMA_NAME, ctrl->id);
+ err = cma_info(&mem_info, ctrl->dev, 0);
+ fimc_info1("%s : [cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x\n",
+ __func__, mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size);
+ if (err) {
+ fimc_err("%s: get cma info failed\n", __func__);
+ ctrl->mem.size = 0;
+ ctrl->mem.base = 0;
+ } else {
+ ctrl->mem.size = mem_info.total_size;
+ ctrl->mem.base = (dma_addr_t)cma_alloc
+ (ctrl->dev, ctrl->cma_name, (size_t)ctrl->mem.size, 0);
+ }
+#ifdef CONFIG_ION_EXYNOS
+ }
+#endif
+ printk(KERN_DEBUG "ctrl->mem.size = 0x%x\n", ctrl->mem.size);
+ printk(KERN_DEBUG "ctrl->mem.base = 0x%x\n", ctrl->mem.base);
+ ctrl->mem.curr = ctrl->mem.base;
+#endif
+ ctrl->status = FIMC_STREAMOFF;
+
+ switch (pdata->hw_ver) {
+ case 0x40:
+ ctrl->limit = &fimc40_limits[id];
+ break;
+ case 0x43:
+ case 0x45:
+ ctrl->limit = &fimc43_limits[id];
+ break;
+ case 0x50:
+ ctrl->limit = &fimc50_limits[id];
+ break;
+ case 0x51:
+ ctrl->limit = &fimc51_limits[id];
+ break;
+ default:
+ ctrl->limit = &fimc51_limits[id];
+ fimc_err("%s: failed to get HW version\n", __func__);
+ break;
+ }
+
+ sprintf(ctrl->name, "%s%d", FIMC_NAME, id);
+ strcpy(ctrl->vd->name, ctrl->name);
+
+ atomic_set(&ctrl->in_use, 0);
+ mutex_init(&ctrl->lock);
+ mutex_init(&ctrl->v4l2_lock);
+ spin_lock_init(&ctrl->outq_lock);
+ init_waitqueue_head(&ctrl->wq);
+
+ /* get resource for io memory */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ fimc_err("%s: failed to get io memory region\n", __func__);
+ return NULL;
+ }
+
+ /* request mem region */
+ res = request_mem_region(res->start, res->end - res->start + 1,
+ pdev->name);
+ if (!res) {
+ fimc_err("%s: failed to request io memory region\n", __func__);
+ return NULL;
+ }
+
+ /* ioremap for register block */
+ ctrl->regs = ioremap(res->start, res->end - res->start + 1);
+ if (!ctrl->regs) {
+ fimc_err("%s: failed to remap io region\n", __func__);
+ return NULL;
+ }
+
+ if (soc_is_exynos4210())
+ fimc_src_clk = clk_get(&pdev->dev, "mout_mpll");
+ else
+ fimc_src_clk = clk_get(&pdev->dev, "mout_mpll_user");
+
+ if (IS_ERR(fimc_src_clk)) {
+ dev_err(&pdev->dev, "failed to get parent clock\n");
+ iounmap(ctrl->regs);
+ return NULL;
+ }
+
+ sclk_fimc_lclk = clk_get(&pdev->dev, FIMC_CORE_CLK);
+ if (IS_ERR(sclk_fimc_lclk)) {
+ dev_err(&pdev->dev, "failed to get sclk_fimc_lclk\n");
+ iounmap(ctrl->regs);
+ clk_put(fimc_src_clk);
+ return NULL;
+ }
+
+ if (clk_set_parent(sclk_fimc_lclk, fimc_src_clk)) {
+ dev_err(&pdev->dev, "unable to set parent %s of clock %s.\n",
+ fimc_src_clk->name, sclk_fimc_lclk->name);
+ iounmap(ctrl->regs);
+ clk_put(sclk_fimc_lclk);
+ clk_put(fimc_src_clk);
+ return NULL;
+ }
+ clk_set_rate(sclk_fimc_lclk, FIMC_CLK_RATE);
+ clk_put(sclk_fimc_lclk);
+ clk_put(fimc_src_clk);
+
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ fimc_hwset_reset(ctrl);
+#endif
+
+ return ctrl;
+}
+
+static int fimc_unregister_controller(struct platform_device *pdev)
+{
+ struct s3c_platform_fimc *pdata;
+ struct fimc_control *ctrl;
+ int id = pdev->id;
+
+ pdata = to_fimc_plat(&pdev->dev);
+ ctrl = get_fimc_ctrl(id);
+
+ if (ctrl->irq)
+ free_irq(ctrl->irq, ctrl);
+ mutex_destroy(&ctrl->lock);
+ mutex_destroy(&ctrl->v4l2_lock);
+
+ if (pdata->clk_off)
+ pdata->clk_off(pdev, &ctrl->clk);
+
+ iounmap(ctrl->regs);
+ memset(ctrl, 0, sizeof(*ctrl));
+
+ return 0;
+}
+
+static void fimc_mmap_open(struct vm_area_struct *vma)
+{
+ struct fimc_global *dev = fimc_dev;
+ int pri_data = (int)vma->vm_private_data;
+ u32 id = pri_data / 0x100;
+ u32 ctx = (pri_data - (id * 0x100)) / 0x10;
+ u32 idx = pri_data % 0x10;
+
+ BUG_ON(id >= FIMC_DEVICES);
+ BUG_ON(ctx >= FIMC_MAX_CTXS);
+ BUG_ON(idx >= FIMC_OUTBUFS);
+
+ atomic_inc(&dev->ctrl[id].out->ctx[ctx].src[idx].mapped_cnt);
+}
+
+static void fimc_mmap_close(struct vm_area_struct *vma)
+{
+ struct fimc_global *dev = fimc_dev;
+ int pri_data = (int)vma->vm_private_data;
+ u32 id = pri_data / 0x100;
+ u32 ctx = (pri_data - (id * 0x100)) / 0x10;
+ u32 idx = pri_data % 0x10;
+
+ BUG_ON(id >= FIMC_DEVICES);
+ BUG_ON(ctx >= FIMC_MAX_CTXS);
+ BUG_ON(idx >= FIMC_OUTBUFS);
+
+ atomic_dec(&dev->ctrl[id].out->ctx[ctx].src[idx].mapped_cnt);
+}
+
+static struct vm_operations_struct fimc_mmap_ops = {
+ .open = fimc_mmap_open,
+ .close = fimc_mmap_close,
+};
+
+static inline
+int fimc_mmap_out_src(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ int ctx_id = prv_data->ctx_id;
+ struct fimc_ctx *ctx = &ctrl->out->ctx[ctx_id];
+ u32 start_phy_addr = 0;
+ u32 size = vma->vm_end - vma->vm_start;
+ u32 pfn, idx = vma->vm_pgoff;
+ u32 buf_length = 0;
+ int pri_data = 0;
+
+ buf_length = PAGE_ALIGN(ctx->src[idx].length[FIMC_ADDR_Y] +
+ ctx->src[idx].length[FIMC_ADDR_CB] +
+ ctx->src[idx].length[FIMC_ADDR_CR]);
+ if (size > PAGE_ALIGN(buf_length)) {
+ fimc_err("Requested mmap size is too big\n");
+ return -EINVAL;
+ }
+
+ pri_data = (ctrl->id * 0x100) + (ctx_id * 0x10) + idx;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &fimc_mmap_ops;
+ vma->vm_private_data = (void *)pri_data;
+
+ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ fimc_err("writable mapping must be shared\n");
+ return -EINVAL;
+ }
+
+ start_phy_addr = ctx->src[idx].base[FIMC_ADDR_Y];
+ pfn = __phys_to_pfn(start_phy_addr);
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
+ fimc_err("mmap fail\n");
+ return -EINVAL;
+ }
+
+ vma->vm_ops->open(vma);
+
+ ctx->src[idx].flags |= V4L2_BUF_FLAG_MAPPED;
+
+ return 0;
+}
+
+static inline
+int fimc_mmap_out_dst(struct file *filp, struct vm_area_struct *vma, u32 idx)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ int ctx_id = prv_data->ctx_id;
+ unsigned long pfn = 0, size;
+ int ret = 0;
+
+ size = vma->vm_end - vma->vm_start;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED;
+
+ if (ctrl->out->ctx[ctx_id].dst[idx].base[0])
+ pfn = __phys_to_pfn(ctrl->out->ctx[ctx_id].dst[idx].base[0]);
+ else
+ pfn = __phys_to_pfn(ctrl->mem.curr);
+
+ ret = remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot);
+ if (ret != 0)
+ fimc_err("remap_pfn_range fail.\n");
+
+ return ret;
+}
+
+static inline int fimc_mmap_out(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ int ctx_id = prv_data->ctx_id;
+ int idx = ctrl->out->ctx[ctx_id].overlay.req_idx;
+ int ret = -1;
+
+ if (idx >= 0)
+ ret = fimc_mmap_out_dst(filp, vma, idx);
+ else if (idx == FIMC_MMAP_IDX)
+ ret = fimc_mmap_out_src(filp, vma);
+
+ return ret;
+}
+
+static inline int fimc_mmap_cap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ u32 size = vma->vm_end - vma->vm_start;
+ u32 pfn, idx = vma->vm_pgoff;
+
+ if (!ctrl->cap->cacheable)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ vma->vm_flags |= VM_RESERVED;
+
+ /*
+ * page frame number of the address for a source frame
+ * to be stored at.
+ */
+ pfn = __phys_to_pfn(ctrl->cap->bufs[idx].base[0]);
+
+ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ fimc_err("%s: writable mapping must be shared\n", __func__);
+ return -EINVAL;
+ }
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
+ fimc_err("%s: mmap fail\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ int ret;
+
+ if (ctrl->cap)
+ ret = fimc_mmap_cap(filp, vma);
+ else
+ ret = fimc_mmap_out(filp, vma);
+
+ return ret;
+}
+
+static u32 fimc_poll(struct file *filp, poll_table *wait)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ struct fimc_capinfo *cap = ctrl->cap;
+ u32 mask = 0;
+
+ if (!cap)
+ return 0;
+
+ if (!list_empty(&cap->outgoing_q))
+ mask = POLLIN | POLLRDNORM;
+ else
+ poll_wait(filp, &ctrl->wq, wait);
+
+ return mask;
+}
+
+static
+ssize_t fimc_read(struct file *filp, char *buf, size_t count, loff_t *pos)
+{
+ return 0;
+}
+
+static
+ssize_t fimc_write(struct file *filp, const char *b, size_t c, loff_t *offset)
+{
+ return 0;
+}
+
+u32 fimc_mapping_rot_flip(u32 rot, u32 flip)
+{
+ u32 ret = 0;
+
+ switch (rot) {
+ case 0:
+ if (flip & FIMC_XFLIP)
+ ret |= FIMC_XFLIP;
+
+ if (flip & FIMC_YFLIP)
+ ret |= FIMC_YFLIP;
+ break;
+
+ case 90:
+ ret = FIMC_ROT;
+ if (flip & FIMC_XFLIP)
+ ret |= FIMC_XFLIP;
+
+ if (flip & FIMC_YFLIP)
+ ret |= FIMC_YFLIP;
+ break;
+
+ case 180:
+ ret = (FIMC_XFLIP | FIMC_YFLIP);
+ if (flip & FIMC_XFLIP)
+ ret &= ~FIMC_XFLIP;
+
+ if (flip & FIMC_YFLIP)
+ ret &= ~FIMC_YFLIP;
+ break;
+
+ case 270:
+ ret = (FIMC_XFLIP | FIMC_YFLIP | FIMC_ROT);
+ if (flip & FIMC_XFLIP)
+ ret &= ~FIMC_XFLIP;
+
+ if (flip & FIMC_YFLIP)
+ ret &= ~FIMC_YFLIP;
+ break;
+ }
+
+ return ret;
+}
+
+int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ if (src >= tar * 64) {
+ return -EINVAL;
+ } else if (src >= tar * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= tar * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= tar * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= tar * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= tar * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+void fimc_get_nv12t_size(int img_hres, int img_vres,
+ int *y_size, int *cb_size)
+{
+ int remain;
+ int y_hres_byte, y_vres_byte;
+ int cb_hres_byte, cb_vres_byte;
+ int y_hres_roundup, y_vres_roundup;
+ int cb_hres_roundup, cb_vres_roundup;
+
+ /* to make 'img_hres and img_vres' be 16 multiple */
+ remain = img_hres % 16;
+ if (remain != 0) {
+ remain = 16 - remain;
+ img_hres = img_hres + remain;
+ }
+ remain = img_vres % 16;
+ if (remain != 0) {
+ remain = 16 - remain;
+ img_vres = img_vres + remain;
+ }
+
+ cb_hres_byte = img_hres;
+ cb_vres_byte = img_vres;
+
+ y_hres_byte = img_hres - 1;
+ y_vres_byte = img_vres - 1;
+ y_hres_roundup = ((y_hres_byte >> 4) >> 3) + 1;
+ y_vres_roundup = ((y_vres_byte >> 4) >> 2) + 1;
+ if ((y_vres_byte & 0x20) == 0) {
+ y_hres_byte = y_hres_byte & 0x7f00;
+ y_hres_byte = y_hres_byte >> 8;
+ y_hres_byte = y_hres_byte & 0x7f;
+
+ y_vres_byte = y_vres_byte & 0x7fc0;
+ y_vres_byte = y_vres_byte >> 6;
+ y_vres_byte = y_vres_byte & 0x1ff;
+
+ *y_size = y_hres_byte +\
+ (y_vres_byte * y_hres_roundup) + 1;
+ } else {
+ *y_size = y_hres_roundup * y_vres_roundup;
+ }
+
+ *y_size = *(y_size) << 13;
+
+ cb_hres_byte = img_hres - 1;
+ cb_vres_byte = (img_vres >> 1) - 1;
+ cb_hres_roundup = ((cb_hres_byte >> 4) >> 3) + 1;
+ cb_vres_roundup = ((cb_vres_byte >> 4) >> 2) + 1;
+ if ((cb_vres_byte & 0x20) == 0) {
+ cb_hres_byte = cb_hres_byte & 0x7f00;
+ cb_hres_byte = cb_hres_byte >> 8;
+ cb_hres_byte = cb_hres_byte & 0x7f;
+
+ cb_vres_byte = cb_vres_byte & 0x7fc0;
+ cb_vres_byte = cb_vres_byte >> 6;
+ cb_vres_byte = cb_vres_byte & 0x1ff;
+
+ *cb_size = cb_hres_byte + (cb_vres_byte * cb_hres_roundup) + 1;
+ } else {
+ *cb_size = cb_hres_roundup * cb_vres_roundup;
+ }
+ *cb_size = (*cb_size) << 13;
+
+}
+
+static int fimc_open(struct file *filp)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ struct fimc_prv_data *prv_data;
+ int in_use, max_use;
+ int ret;
+ int i;
+
+ ctrl = video_get_drvdata(video_devdata(filp));
+ pdata = to_fimc_plat(ctrl->dev);
+
+ mutex_lock(&ctrl->lock);
+
+ in_use = atomic_read(&ctrl->in_use);
+ if (pdata->camera[0])
+ max_use = 1;
+ else
+ max_use = FIMC_MAX_CTXS + 1;
+
+ if (in_use >= max_use) {
+ ret = -EBUSY;
+ goto resource_busy;
+ } else {
+ atomic_inc(&ctrl->in_use);
+ fimc_warn("FIMC%d %d opened.\n",
+ ctrl->id, atomic_read(&ctrl->in_use));
+ }
+ in_use = atomic_read(&ctrl->in_use);
+
+ prv_data = kzalloc(sizeof(struct fimc_prv_data), GFP_KERNEL);
+ if (!prv_data) {
+ fimc_err("%s: not enough memory\n", __func__);
+ ret = -ENOMEM;
+ goto kzalloc_err;
+ }
+
+ if (in_use == 1) {
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ if (pdata->clk_on)
+ pdata->clk_on(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+
+ if (pdata->hw_ver == 0x40)
+ fimc_hw_reset_camera(ctrl);
+
+ /* Apply things to interface register */
+ fimc_hwset_reset(ctrl);
+#endif
+ ctrl->fb.open_fifo = s3cfb_open_fifo;
+ ctrl->fb.close_fifo = s3cfb_close_fifo;
+
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_GET_LCD_WIDTH,
+ (unsigned long)&ctrl->fb.lcd_hres);
+ if (ret < 0) {
+ fimc_err("Fail: S3CFB_GET_LCD_WIDTH\n");
+ goto resource_busy;
+ }
+
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_GET_LCD_HEIGHT,
+ (unsigned long)&ctrl->fb.lcd_vres);
+ if (ret < 0) {
+ fimc_err("Fail: S3CFB_GET_LCD_HEIGHT\n");
+ goto resource_busy;
+ }
+
+ ctrl->mem.curr = ctrl->mem.base;
+ ctrl->status = FIMC_STREAMOFF;
+ }
+ prv_data->ctrl = ctrl;
+ if (prv_data->ctrl->out != NULL) {
+ for (i = 0; i < FIMC_MAX_CTXS; i++)
+ if (prv_data->ctrl->out->ctx_used[i] == false) {
+ prv_data->ctx_id = i;
+ prv_data->ctrl->out->ctx_used[i] = true;
+ break;
+ }
+ } else {
+ prv_data->ctx_id = in_use - 1;
+ }
+
+ filp->private_data = prv_data;
+
+ mutex_unlock(&ctrl->lock);
+
+ return 0;
+
+kzalloc_err:
+ atomic_dec(&ctrl->in_use);
+
+resource_busy:
+ mutex_unlock(&ctrl->lock);
+ return ret;
+}
+
+static int fimc_release(struct file *filp)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ struct fimc_capinfo *cap;
+ int ctx_id = prv_data->ctx_id;
+ struct s3c_platform_fimc *pdata;
+ struct fimc_overlay_buf *buf;
+ struct mm_struct *mm = current->mm;
+ struct fimc_ctx *ctx;
+ int ret = 0, i;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+#endif
+
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ pdata = to_fimc_plat(ctrl->dev);
+
+ atomic_dec(&ctrl->in_use);
+
+ if (ctrl->cap && (ctrl->status != FIMC_STREAMOFF))
+ fimc_streamoff_capture((void *)ctrl);
+
+ /* FIXME: turning off actual working camera */
+ if (ctrl->cap && ctrl->cam) {
+ /* Unload the subdev (camera sensor) module,
+ * reset related status flags */
+ fimc_release_subdev(ctrl);
+ fimc_is_release_subdev(ctrl);
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_ON)
+ pm_runtime_put_sync(ctrl->dev);
+#endif
+ } else if (ctrl->is.sd) {
+ fimc_is_release_subdev(ctrl);
+ }
+ if (atomic_read(&ctrl->in_use) == 0) {
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ if (pdata->clk_off) {
+ pdata->clk_off(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ ctrl->power_status = FIMC_POWER_OFF;
+ }
+#endif
+ }
+ if (ctrl->out) {
+ if (ctx->status != FIMC_STREAMOFF) {
+ ret = fimc_outdev_stop_streaming(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_stop_streaming\n");
+ return -EINVAL;
+ }
+
+ ret = fimc_init_in_queue(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_init_in_queue\n");
+ return -EINVAL;
+ }
+
+ ret = fimc_init_out_queue(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_init_out_queue\n");
+ return -EINVAL;
+ }
+
+ /* Make all buffers DQUEUED state. */
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->src[i].state = VIDEOBUF_IDLE;
+ ctx->src[i].flags = V4L2_BUF_FLAG_MAPPED;
+ }
+
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO) {
+ ctrl->mem.curr = ctx->dst[0].base[FIMC_ADDR_Y];
+
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->dst[i].base[FIMC_ADDR_Y] = 0;
+ ctx->dst[i].length[FIMC_ADDR_Y] = 0;
+
+ ctx->dst[i].base[FIMC_ADDR_CB] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CB] = 0;
+
+ ctx->dst[i].base[FIMC_ADDR_CR] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CR] = 0;
+ }
+ }
+
+ ctx->status = FIMC_STREAMOFF;
+ }
+ }
+
+ if (atomic_read(&ctrl->in_use) == 0) {
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ if (pdata->clk_off) {
+ pdata->clk_off(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ ctrl->power_status = FIMC_POWER_OFF;
+ }
+
+ /* Apply things to interface register */
+ fimc_hwset_reset(ctrl);
+#endif
+ }
+
+ if (ctrl->out) {
+ ctx->is_requested = 0;
+ buf = &ctx->overlay.buf;
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ if (buf->vir_addr[i]) {
+ ret = do_munmap(mm, buf->vir_addr[i],
+ buf->size[i]);
+ if (ret < 0)
+ fimc_err("%s: do_munmap fail\n",
+ __func__);
+ }
+ }
+
+ /* reset inq & outq of context */
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->inq[i] = -1;
+ ctx->outq[i] = -1;
+ }
+
+ if (atomic_read(&ctrl->in_use) == 0) {
+ ctrl->status = FIMC_STREAMOFF;
+ fimc_outdev_init_idxs(ctrl);
+
+ ctrl->mem.curr = ctrl->mem.base;
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ if (ctrl->mem.base)
+ cma_free(ctrl->mem.base);
+#endif
+ kfree(ctrl->out);
+ ctrl->out = NULL;
+
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+ } else {
+ ctrl->out->ctx_used[ctx_id] = false;
+ }
+ }
+
+ if (ctrl->cap) {
+ cap = ctrl->cap;
+ ctrl->mem.curr = ctrl->mem.base;
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ if (ctrl->mem.base)
+ cma_free(ctrl->mem.base);
+#endif
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+ if (pdata->hw_ver >= 0x51)
+ INIT_LIST_HEAD(&cap->outgoing_q);
+ for (i = 0; i < FIMC_CAPBUFS; i++) {
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 0);
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 1);
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 2);
+ }
+ kfree(ctrl->cap);
+ ctrl->cap = NULL;
+ }
+
+ /*
+ * Close window for FIMC if window is enabled.
+ */
+ if (ctrl->fb.is_enable == 1) {
+ fimc_warn("WIN_OFF for FIMC%d\n", ctrl->id);
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_OFF,
+ (unsigned long)NULL);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_OFF) fail\n");
+ return -EINVAL;
+ }
+
+ ctrl->fb.is_enable = 0;
+ }
+
+ fimc_warn("FIMC%d %d released.\n",
+ ctrl->id, atomic_read(&ctrl->in_use));
+
+ return 0;
+}
+
+static const struct v4l2_file_operations fimc_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_open,
+ .release = fimc_release,
+ .ioctl = video_ioctl2,
+ .read = fimc_read,
+ .write = fimc_write,
+ .mmap = fimc_mmap,
+ .poll = fimc_poll,
+};
+
+static void fimc_vdev_release(struct video_device *vdev)
+{
+ kfree(vdev);
+}
+
+struct video_device fimc_video_device[FIMC_DEVICES] = {
+ [0] = {
+ .fops = &fimc_fops,
+ .ioctl_ops = &fimc_v4l2_ops,
+ .release = fimc_vdev_release,
+ },
+ [1] = {
+ .fops = &fimc_fops,
+ .ioctl_ops = &fimc_v4l2_ops,
+ .release = fimc_vdev_release,
+ },
+ [2] = {
+ .fops = &fimc_fops,
+ .ioctl_ops = &fimc_v4l2_ops,
+ .release = fimc_vdev_release,
+ },
+#ifdef CONFIG_ARCH_EXYNOS4
+ [3] = {
+ .fops = &fimc_fops,
+ .ioctl_ops = &fimc_v4l2_ops,
+ .release = fimc_vdev_release,
+ },
+#endif
+};
+
+static int fimc_init_global(struct platform_device *pdev)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ struct s3c_platform_camera *cam;
+ struct clk *srclk;
+ int id, i;
+
+ pdata = to_fimc_plat(&pdev->dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ /* Registering external camera modules. re-arrange order to be sure */
+ for (i = 0; i < FIMC_MAXCAMS; i++) {
+ cam = pdata->camera[i];
+ if (!cam)
+ break;
+ /* WriteBack doesn't need clock setting */
+ if ((cam->id == CAMERA_WB) || (cam->id == CAMERA_WB_B)) {
+ fimc_dev->camera[i] = cam;
+ fimc_dev->camera_isvalid[i] = 1;
+ fimc_dev->camera[i]->initialized = 0;
+ continue;
+ }
+
+ /* source clk for MCLK*/
+ srclk = clk_get(&pdev->dev, cam->srclk_name);
+ if (IS_ERR(srclk)) {
+ fimc_err("%s: failed to get srclk source\n", __func__);
+ return -EINVAL;
+ }
+
+ /* mclk */
+#if defined(CONFIG_MACH_MIDAS) || defined(CONFIG_SLP)
+ cam->clk = clk_get(&pdev->dev, cam->get_clk_name());
+#else
+ cam->clk = clk_get(&pdev->dev, cam->clk_name);
+#endif
+ if (IS_ERR(cam->clk)) {
+ fimc_err("%s: failed to get mclk source\n", __func__);
+ return -EINVAL;
+ }
+
+ if (clk_set_parent(cam->clk, srclk)) {
+ dev_err(&pdev->dev, "unable to set parent %s of clock %s.\n",
+ srclk->name, cam->clk->name);
+ clk_put(srclk);
+ clk_put(cam->clk);
+ return -EINVAL;
+ }
+
+ /* Assign camera device to fimc */
+ fimc_dev->camera[i] = cam;
+ fimc_dev->camera_isvalid[i] = 1;
+ fimc_dev->camera[i]->initialized = 0;
+ }
+
+ fimc_dev->mclk_status = CAM_MCLK_OFF;
+ fimc_dev->active_camera = -1;
+ fimc_dev->initialized = 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_DRM_EXYNOS_FIMD_WB
+static BLOCKING_NOTIFIER_HEAD(fimc_notifier_client_list);
+
+int fimc_register_client(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &fimc_notifier_client_list, nb);
+}
+EXPORT_SYMBOL(fimc_register_client);
+
+int fimc_unregister_client(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &fimc_notifier_client_list, nb);
+}
+EXPORT_SYMBOL(fimc_unregister_client);
+
+int fimc_send_event(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &fimc_notifier_client_list, val, v);
+}
+#endif
+
+static int fimc_show_log_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+ int id = -1;
+
+ char temp[150];
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ sprintf(temp, "\t");
+ strcat(buf, temp);
+ if (ctrl->log & FIMC_LOG_DEBUG) {
+ sprintf(temp, "FIMC_LOG_DEBUG | ");
+ strcat(buf, temp);
+ }
+
+ if (ctrl->log & FIMC_LOG_INFO_L2) {
+ sprintf(temp, "FIMC_LOG_INFO_L2 | ");
+ strcat(buf, temp);
+ }
+
+ if (ctrl->log & FIMC_LOG_INFO_L1) {
+ sprintf(temp, "FIMC_LOG_INFO_L1 | ");
+ strcat(buf, temp);
+ }
+
+ if (ctrl->log & FIMC_LOG_WARN) {
+ sprintf(temp, "FIMC_LOG_WARN | ");
+ strcat(buf, temp);
+ }
+
+ if (ctrl->log & FIMC_LOG_ERR) {
+ sprintf(temp, "FIMC_LOG_ERR\n");
+ strcat(buf, temp);
+ }
+
+ return strlen(buf);
+}
+
+static int fimc_store_log_level(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+
+ const char *p = buf;
+ char msg[150] = {0, };
+ int id = -1;
+ u32 match = 0;
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ while (*p != '\0') {
+ if (!isspace(*p))
+ strncat(msg, p, 1);
+ p++;
+ }
+
+ ctrl->log = 0;
+ printk(KERN_INFO "FIMC.%d log level is set as below.\n", id);
+
+ if (strstr(msg, "FIMC_LOG_ERR") != NULL) {
+ ctrl->log |= FIMC_LOG_ERR;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_ERR\n");
+ }
+
+ if (strstr(msg, "FIMC_LOG_WARN") != NULL) {
+ ctrl->log |= FIMC_LOG_WARN;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_WARN\n");
+ }
+
+ if (strstr(msg, "FIMC_LOG_INFO_L1") != NULL) {
+ ctrl->log |= FIMC_LOG_INFO_L1;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_INFO_L1\n");
+ }
+
+ if (strstr(msg, "FIMC_LOG_INFO_L2") != NULL) {
+ ctrl->log |= FIMC_LOG_INFO_L2;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_INFO_L2\n");
+ }
+
+ if (strstr(msg, "FIMC_LOG_DEBUG") != NULL) {
+ ctrl->log |= FIMC_LOG_DEBUG;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_DEBUG\n");
+ }
+
+ if (!match) {
+ printk(KERN_INFO "FIMC_LOG_ERR \t: Error condition.\n");
+ printk(KERN_INFO "FIMC_LOG_WARN \t: WARNING condition.\n");
+ printk(KERN_INFO "FIMC_LOG_INFO_L1 \t: V4L2 API without QBUF, DQBUF.\n");
+ printk(KERN_INFO "FIMC_LOG_INFO_L2 \t: V4L2 API QBUF, DQBUF.\n");
+ printk(KERN_INFO "FIMC_LOG_DEBUG \t: Queue status report.\n");
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(log_level, 0644, fimc_show_log_level, fimc_store_log_level);
+
+static int fimc_show_range_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+ int id = -1;
+
+ char temp[150];
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ sprintf(temp, "\t");
+ strcat(buf, temp);
+ if (ctrl->range == FIMC_RANGE_NARROW) {
+ sprintf(temp, "FIMC_RANGE_NARROW\n");
+ strcat(buf, temp);
+ } else {
+ sprintf(temp, "FIMC_RANGE_WIDE\n");
+ strcat(buf, temp);
+ }
+
+ return strlen(buf);
+}
+
+static int fimc_store_range_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+
+ const char *p = buf;
+ char msg[150] = {0, };
+ int id = -1;
+ u32 match = 0;
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ while (*p != '\0') {
+ if (!isspace(*p))
+ strncat(msg, p, 1);
+ p++;
+ }
+
+ ctrl->range = 0;
+ printk(KERN_INFO "FIMC.%d range mode is set as below.\n", id);
+
+ if (strstr(msg, "FIMC_RANGE_WIDE") != NULL) {
+ ctrl->range = FIMC_RANGE_WIDE;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_RANGE_WIDE\n");
+ }
+
+ if (strstr(msg, "FIMC_RANGE_NARROW") != NULL) {
+ ctrl->range = FIMC_RANGE_NARROW;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_RANGE_NARROW\n");
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(range_mode, 0644, \
+ fimc_show_range_mode,
+ fimc_store_range_mode);
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+ struct s3c_platform_fimc *pdata;
+ struct fimc_control *ctrl;
+ int ret;
+
+ if (!fimc_dev) {
+ fimc_dev = kzalloc(sizeof(*fimc_dev), GFP_KERNEL);
+ if (!fimc_dev) {
+ dev_err(&pdev->dev, "%s: not enough memory\n", __func__);
+ return -ENOMEM;
+ }
+ }
+
+ ctrl = fimc_register_controller(pdev);
+ if (!ctrl) {
+ printk(KERN_ERR "%s: cannot register fimc\n", __func__);
+ goto err_alloc;
+ }
+
+ pdata = to_fimc_plat(&pdev->dev);
+ if ((ctrl->id == FIMC0) && (pdata->cfg_gpio))
+ pdata->cfg_gpio(pdev);
+
+ /* V4L2 device-subdev registration */
+ ret = v4l2_device_register(&pdev->dev, &ctrl->v4l2_dev);
+ if (ret) {
+ fimc_err("%s: v4l2 device register failed\n", __func__);
+ goto err_fimc;
+ }
+ ctrl->vd->v4l2_dev = &ctrl->v4l2_dev;
+
+ ctrl->vd->v4l2_dev = &ctrl->v4l2_dev;
+
+ /* things to initialize once */
+ if (!fimc_dev->initialized) {
+ ret = fimc_init_global(pdev);
+ if (ret)
+ goto err_v4l2;
+ }
+
+ /* video device register */
+ ret = video_register_device(ctrl->vd, VFL_TYPE_GRABBER, ctrl->id);
+ if (ret) {
+ fimc_err("%s: cannot register video driver\n", __func__);
+ goto err_v4l2;
+ }
+
+ video_set_drvdata(ctrl->vd, ctrl);
+
+#ifdef CONFIG_VIDEO_FIMC_RANGE_WIDE
+ ctrl->range = FIMC_RANGE_WIDE;
+#else
+ ctrl->range = FIMC_RANGE_NARROW;
+#endif
+
+ ret = device_create_file(&(pdev->dev), &dev_attr_log_level);
+ if (ret < 0) {
+ fimc_err("failed to add sysfs entries for log level\n");
+ goto err_global;
+ }
+ ret = device_create_file(&(pdev->dev), &dev_attr_range_mode);
+ if (ret < 0) {
+ fimc_err("failed to add sysfs entries for range mode\n");
+ goto err_global;
+ }
+ printk(KERN_INFO "FIMC%d registered successfully\n", ctrl->id);
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ sprintf(buf, "fimc%d_iqr_wq_name", ctrl->id);
+ ctrl->fimc_irq_wq = create_workqueue(buf);
+ if (ctrl->fimc_irq_wq == NULL) {
+ fimc_err("failed to create_workqueue\n");
+ goto err_global;
+ }
+
+ INIT_WORK(&ctrl->work_struct, s3c_fimc_irq_work);
+ atomic_set(&ctrl->irq_cnt, 0);
+
+ ctrl->power_status = FIMC_POWER_OFF;
+ pm_runtime_enable(&pdev->dev);
+#endif
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* To lock bus frequency in OPP mode */
+ ctrl->bus_dev = dev_get(EXYNOS_BUSFREQ_NAME);
+#endif
+
+ /* irq */
+ ctrl->irq = platform_get_irq(pdev, 0);
+ if (request_irq(ctrl->irq, fimc_irq, IRQF_DISABLED, ctrl->name, ctrl))
+ fimc_err("%s: request_irq failed\n", __func__);
+
+ return 0;
+
+err_global:
+ video_unregister_device(ctrl->vd);
+
+err_v4l2:
+ v4l2_device_unregister(&ctrl->v4l2_dev);
+
+err_fimc:
+ fimc_unregister_controller(pdev);
+
+err_alloc:
+ kfree(fimc_dev);
+ return -EINVAL;
+
+}
+
+static int fimc_remove(struct platform_device *pdev)
+{
+ fimc_unregister_controller(pdev);
+
+ device_remove_file(&(pdev->dev), &dev_attr_log_level);
+
+ kfree(fimc_dev);
+ fimc_dev = NULL;
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ pm_runtime_disable(&pdev->dev);
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static inline void fimc_suspend_out_ctx(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL: /* fall through */
+ case FIMC_OVLY_NONE_MULTI_BUF: /* fall through */
+ case FIMC_OVLY_NONE_SINGLE_BUF:
+ if (ctx->status == FIMC_STREAMON) {
+ if (ctx->inq[0] != -1)
+ fimc_err("%s : %d in queue unstable\n",
+ __func__, __LINE__);
+
+ fimc_outdev_stop_streaming(ctrl, ctx);
+ ctx->status = FIMC_ON_SLEEP;
+ } else if (ctx->status == FIMC_STREAMON_IDLE) {
+ fimc_outdev_stop_streaming(ctrl, ctx);
+ ctx->status = FIMC_ON_IDLE_SLEEP;
+ } else {
+ ctx->status = FIMC_OFF_SLEEP;
+ }
+
+ break;
+ case FIMC_OVLY_FIFO:
+ if (ctx->status == FIMC_STREAMON) {
+ if (ctx->inq[0] != -1)
+ fimc_err("%s: %d in queue unstable\n",
+ __func__, __LINE__);
+
+ if ((ctrl->out->idxs.next.idx != -1) ||
+ (ctrl->out->idxs.prev.idx != -1))
+ fimc_err("%s: %d FIMC unstable\n",
+ __func__, __LINE__);
+
+ fimc_outdev_stop_streaming(ctrl, ctx);
+ ctx->status = FIMC_ON_SLEEP;
+ } else {
+ ctx->status = FIMC_OFF_SLEEP;
+ }
+
+ break;
+ case FIMC_OVLY_NOT_FIXED:
+ ctx->status = FIMC_OFF_SLEEP;
+ break;
+ }
+}
+
+static inline int fimc_suspend_out(struct fimc_control *ctrl)
+{
+ struct fimc_ctx *ctx;
+ int i, on_sleep = 0, idle_sleep = 0, off_sleep = 0;
+
+ for (i = 0; i < FIMC_MAX_CTXS; i++) {
+ ctx = &ctrl->out->ctx[i];
+ fimc_suspend_out_ctx(ctrl, ctx);
+
+ switch (ctx->status) {
+ case FIMC_ON_SLEEP:
+ on_sleep++;
+ break;
+ case FIMC_ON_IDLE_SLEEP:
+ idle_sleep++;
+ break;
+ case FIMC_OFF_SLEEP:
+ off_sleep++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (on_sleep)
+ ctrl->status = FIMC_ON_SLEEP;
+ else if (idle_sleep)
+ ctrl->status = FIMC_ON_IDLE_SLEEP;
+ else
+ ctrl->status = FIMC_OFF_SLEEP;
+
+ ctrl->out->last_ctx = -1;
+
+ return 0;
+}
+
+static inline int fimc_suspend_cap(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+ printk(KERN_INFO "%s\n", __func__);
+ if (ctrl->power_status == FIMC_POWER_ON)
+ pm_runtime_put_sync(&pdev->dev);
+#endif
+
+ if (ctrl->cam->id == CAMERA_WB || ctrl->cam->id == CAMERA_WB_B) {
+ fimc_dbg("%s\n", __func__);
+ ctrl->suspend_framecnt = fimc_hwget_output_buf_sequence(ctrl);
+ fimc_streamoff_capture((void *)ctrl);
+ fimc_info1("%s : framecnt_seq : %d\n",
+ __func__, ctrl->suspend_framecnt);
+ } else {
+ if (ctrl->id == FIMC0 && ctrl->cam->initialized) {
+ ctrl->cam->initialized = 0;
+ if (ctrl->cam->use_isp) {
+ printk(KERN_INFO "%s use_isp s_power down\n", __func__);
+ v4l2_subdev_call(ctrl->is.sd, core, s_power, 0);
+ } else
+ v4l2_subdev_call(ctrl->cam->sd, core, s_power, 0);
+
+ if (fimc->mclk_status == CAM_MCLK_ON) {
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+ /* shutdown the MCLK */
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+ }
+ }
+ ctrl->power_status = FIMC_POWER_OFF;
+ printk(KERN_INFO "%s--\n", __func__);
+
+ return 0;
+}
+
+int fimc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ int id;
+
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+ pdata = to_fimc_plat(ctrl->dev);
+
+ printk(KERN_INFO "%s\n", __func__);
+ if (ctrl->out)
+ fimc_suspend_out(ctrl);
+
+ else if (ctrl->cap)
+ fimc_suspend_cap(ctrl);
+ else
+ ctrl->status = FIMC_OFF_SLEEP;
+
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ if (atomic_read(&ctrl->in_use) && pdata->clk_off)
+ pdata->clk_off(pdev, &ctrl->clk);
+#endif
+
+ printk(KERN_INFO "%s--\n", __func__);
+ return 0;
+}
+
+int fimc_suspend_pd(struct device *dev)
+{
+ struct platform_device *pdev;
+ int ret;
+ pm_message_t state;
+
+ state.event = 0;
+ pdev = to_platform_device(dev);
+ ret = fimc_suspend(pdev, state);
+
+ return 0;
+}
+
+static inline int fimc_resume_out(struct fimc_control *ctrl)
+{
+ struct fimc_ctx *ctx;
+ int i;
+ u32 state = 0;
+
+ for (i = 0; i < FIMC_MAX_CTXS; i++) {
+ ctx = &ctrl->out->ctx[i];
+ if (ctx->status == FIMC_ON_IDLE_SLEEP) {
+ ctx->status = FIMC_STREAMON_IDLE;
+ state |= FIMC_STREAMON_IDLE;
+ } else if (ctx->status == FIMC_OFF_SLEEP) {
+ ctx->status = FIMC_STREAMOFF;
+ state |= FIMC_STREAMOFF;
+ } else {
+ fimc_err("%s: Abnormal (%d)\n", __func__, ctx->status);
+ }
+ }
+
+ if ((state & FIMC_STREAMON_IDLE) == FIMC_STREAMON_IDLE)
+ ctrl->status = FIMC_STREAMON_IDLE;
+ else
+ ctrl->status = FIMC_STREAMOFF;
+
+ return 0;
+}
+
+static inline int fimc_resume_cap(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ int tmp;
+ u32 timeout;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+#endif
+ printk(KERN_INFO "%s\n", __func__);
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (ctrl->power_status == FIMC_POWER_OFF)
+ pm_runtime_get_sync(&pdev->dev);
+#endif
+ __raw_writel(S5P_INT_LOCAL_PWR_EN, S5P_PMU_CAM_CONF);
+ /* Wait max 1ms */
+ timeout = 10;
+ while ((__raw_readl(S5P_PMU_CAM_CONF + 0x4) & S5P_INT_LOCAL_PWR_EN)
+ != S5P_INT_LOCAL_PWR_EN) {
+ if (timeout == 0) {
+ printk(KERN_ERR "Power domain CAM enable failed.\n");
+ break;
+ }
+ timeout--;
+ udelay(100);
+ }
+
+ if (ctrl->cam->id == CAMERA_WB || ctrl->cam->id == CAMERA_WB_B) {
+ fimc_info1("%s : framecnt_seq : %d\n",
+ __func__, ctrl->suspend_framecnt);
+ fimc_hwset_output_buf_sequence_all(ctrl,
+ ctrl->suspend_framecnt);
+ tmp = fimc_hwget_output_buf_sequence(ctrl);
+ fimc_info1("%s : real framecnt_seq : %d\n", __func__, tmp);
+
+ fimc_streamon_capture((void *)ctrl);
+ } else {
+ if (ctrl->id == FIMC0 && ctrl->cam->initialized == 0) {
+ if (!ctrl->cam->use_isp) {
+ clk_set_rate(ctrl->cam->clk, ctrl->cam->clk_rate);
+ clk_enable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_ON;
+ fimc_info1("clock for camera: %d\n", ctrl->cam->clk_rate);
+
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(1);
+
+ v4l2_subdev_call(ctrl->cam->sd, core, s_power, 1);
+ ctrl->cam->initialized = 1;
+ }
+
+ }
+ }
+ /* fimc_streamon_capture((void *)ctrl); */
+ ctrl->power_status = FIMC_POWER_ON;
+ printk(KERN_INFO "%s--\n", __func__);
+
+ return 0;
+}
+
+int fimc_resume(struct platform_device *pdev)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ int id = pdev->id;
+
+ ctrl = get_fimc_ctrl(id);
+ pdata = to_fimc_plat(ctrl->dev);
+ printk(KERN_INFO "%s", __func__);
+ if (atomic_read(&ctrl->in_use) && pdata->clk_on)
+ pdata->clk_on(pdev, &ctrl->clk);
+
+ if (ctrl->out)
+ fimc_resume_out(ctrl);
+
+ else if (ctrl->cap)
+ fimc_resume_cap(ctrl);
+ else
+ ctrl->status = FIMC_STREAMOFF;
+
+ printk(KERN_INFO "%s--", __func__);
+ return 0;
+}
+
+int fimc_resume_pd(struct device *dev)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = to_platform_device(dev);
+ ret = fimc_resume(pdev);
+ return 0;
+}
+
+
+#else
+#define fimc_suspend NULL
+#define fimc_resume NULL
+#define fimc_suspend_pd NULL
+#define fimc_resume_pd NULL
+#endif
+
+static int fimc_runtime_suspend_cap(struct fimc_control *ctrl)
+{
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+ struct clk *pxl_async = NULL;
+ printk(KERN_INFO "%s FIMC%d\n", __func__, ctrl->id);
+
+ ctrl->power_status = FIMC_POWER_SUSPEND;
+
+ if (ctrl->cap && (ctrl->status != FIMC_STREAMOFF)) {
+ fimc_streamoff_capture((void *)ctrl);
+ ctrl->status = FIMC_STREAMOFF;
+ }
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ctrl->cam->id == CAMERA_WB) {
+ fimc_info1("%s : writeback 0 suspend\n", __func__);
+ pxl_async = clk_get(&pdev->dev, "pxl_async0");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_disable(pxl_async);
+ clk_put(pxl_async);
+ } else if (ctrl->cam->id == CAMERA_WB_B) {
+ fimc_info1("%s : writeback 1 suspend\n", __func__);
+ pxl_async = clk_get(&pdev->dev, "pxl_async1");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_disable(pxl_async);
+ clk_put(pxl_async);
+ }
+ printk(KERN_INFO "%s FIMC%d --\n", __func__, ctrl->id);
+
+ return 0;
+}
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+ struct s3c_platform_fimc *pdata;
+ int id;
+ int ret;
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+ pdata = to_fimc_plat(ctrl->dev);
+
+ if (ctrl->out) {
+ fimc_info1("%s: fimc m2m\n", __func__);
+ } else if (ctrl->cap) {
+ fimc_info1("%s: fimc capture\n", __func__);
+ fimc_runtime_suspend_cap(ctrl);
+ } else
+ fimc_err("%s : invalid fimc control\n", __func__);
+
+ if (pdata->clk_off) {
+ ret = pdata->clk_off(pdev, &ctrl->clk);
+ if (ret == 0)
+ ctrl->power_status = FIMC_POWER_OFF;
+ }
+
+ return 0;
+}
+
+static int fimc_runtime_resume_cap(struct fimc_control *ctrl)
+{
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+ struct clk *pxl_async = NULL;
+ printk(KERN_INFO "%s FIMC%d\n", __func__, ctrl->id);
+
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ctrl->cam->id == CAMERA_WB) {
+ fimc_info1("%s : writeback 0 resume\n", __func__);
+ pxl_async = clk_get(&pdev->dev, "pxl_async0");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_enable(pxl_async);
+ clk_put(pxl_async);
+ } else if (ctrl->cam->id == CAMERA_WB_B) {
+ fimc_info1("%s : writeback 1 resume\n", __func__);
+ pxl_async = clk_get(&pdev->dev, "pxl_async1");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_enable(pxl_async);
+ clk_put(pxl_async);
+ }
+ printk(KERN_INFO "%s FIMC%d --\n", __func__, ctrl->id);
+ return 0;
+}
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ struct platform_device *pdev;
+ int id, ret = 0;
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ pdata = to_fimc_plat(ctrl->dev);
+ if (pdata->clk_on) {
+ ret = pdata->clk_on(pdev, &ctrl->clk);
+ if (ret == 0)
+ ctrl->power_status = FIMC_POWER_ON;
+ }
+
+ /* if status is FIMC_PROBE, not need to know differlence of out or
+ * cap */
+
+ if (ctrl->out) {
+ /* do not need to sub function in m2m mode */
+ fimc_info1("%s: fimc m2m\n", __func__);
+ } else if (ctrl->cap) {
+ fimc_info1("%s: fimc cap\n", __func__);
+ fimc_runtime_resume_cap(ctrl);
+ }
+
+ return 0;
+}
+static const struct dev_pm_ops fimc_pm_ops = {
+ .suspend = fimc_suspend_pd,
+ .resume = fimc_resume_pd,
+ .runtime_suspend = fimc_runtime_suspend,
+ .runtime_resume = fimc_runtime_resume,
+};
+
+static struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = fimc_remove,
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ .suspend = fimc_suspend,
+ .resume = fimc_resume,
+#endif
+ .driver = {
+ .name = FIMC_NAME,
+ .owner = THIS_MODULE,
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ .pm = &fimc_pm_ops,
+#else
+ .pm = NULL,
+#endif
+
+ },
+};
+
+static int fimc_register(void)
+{
+ return platform_driver_register(&fimc_driver);
+}
+
+static void fimc_unregister(void)
+{
+ platform_driver_unregister(&fimc_driver);
+}
+
+late_initcall(fimc_register);
+module_exit(fimc_unregister);
+
+MODULE_AUTHOR("Dongsoo, Kim <dongsoo45.kim@samsung.com>");
+MODULE_AUTHOR("Jinsung, Yang <jsgood.yang@samsung.com>");
+MODULE_AUTHOR("Jonghun, Han <jonghun.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung Camera Interface (FIMC) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/fimc/fimc_dev_u1.c b/drivers/media/video/samsung/fimc/fimc_dev_u1.c
new file mode 100644
index 0000000..f36db5d
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc_dev_u1.c
@@ -0,0 +1,2341 @@
+/* linux/drivers/media/video/samsung/fimc/fimc_dev.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Core file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-device.h>
+#include <linux/io.h>
+#include <linux/memory.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include <plat/clock.h>
+#if defined(CONFIG_CMA)
+#include <linux/cma.h>
+#elif defined(CONFIG_S5P_MEM_BOOTMEM)
+#include <plat/media.h>
+#include <mach/media.h>
+#endif
+#include <plat/fimc.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/videodev2_exynos_camera.h>
+
+#include <mach/regs-pmu.h>
+#include <linux/delay.h>
+
+#include "fimc.h"
+
+char buf[32];
+struct fimc_global *fimc_dev;
+void __iomem *qos_regs0 , *qos_regs1;
+
+void s3c_fimc_irq_work(struct work_struct *work)
+{
+ struct fimc_control *ctrl = container_of(work, struct fimc_control,
+ work_struct);
+ int ret, irq_cnt;
+
+ irq_cnt = atomic_read(&ctrl->irq_cnt);
+ if (irq_cnt > 0) {
+ do {
+ ret = atomic_dec_and_test((atomic_t *)&ctrl->irq_cnt);
+ pm_runtime_put_sync(ctrl->dev);
+ } while (ret != 1);
+ }
+}
+
+int fimc_dma_alloc(struct fimc_control *ctrl, struct fimc_buf_set *bs,
+ int i, int align)
+{
+ dma_addr_t end, *curr;
+
+ mutex_lock(&ctrl->lock);
+
+ end = ctrl->mem.base + ctrl->mem.size;
+ curr = &ctrl->mem.curr;
+
+ if (!bs->length[i]) {
+ mutex_unlock(&ctrl->lock);
+ return -EINVAL;
+ }
+
+ if (!align) {
+ if (*curr + bs->length[i] > end) {
+ goto overflow;
+ } else {
+ bs->base[i] = *curr;
+ bs->garbage[i] = 0;
+ *curr += bs->length[i];
+ }
+ } else {
+ if (ALIGN(*curr, align) + bs->length[i] > end) {
+ goto overflow;
+ } else {
+ bs->base[i] = ALIGN(*curr, align);
+ bs->garbage[i] = ALIGN(*curr, align) - *curr;
+ *curr += (bs->length[i] + bs->garbage[i]);
+ }
+ }
+
+ mutex_unlock(&ctrl->lock);
+
+ return 0;
+
+overflow:
+ bs->base[i] = 0;
+ bs->length[i] = 0;
+ bs->garbage[i] = 0;
+
+ mutex_unlock(&ctrl->lock);
+
+ return -ENOMEM;
+}
+
+void fimc_dma_free(struct fimc_control *ctrl, struct fimc_buf_set *bs, int i)
+{
+ int total = bs->length[i] + bs->garbage[i];
+ mutex_lock(&ctrl->lock);
+
+ if (bs->base[i]) {
+ if (ctrl->mem.curr - total >= ctrl->mem.base)
+ ctrl->mem.curr -= total;
+
+ bs->base[i] = 0;
+ bs->length[i] = 0;
+ bs->garbage[i] = 0;
+ }
+
+ mutex_unlock(&ctrl->lock);
+}
+
+static inline u32 fimc_irq_out_single_buf(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ int ret = -1, ctx_num, next;
+ u32 wakeup = 1;
+
+ if (ctx->status == FIMC_READY_OFF || ctx->status == FIMC_STREAMOFF) {
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctx->status = FIMC_STREAMOFF;
+ ctrl->status = FIMC_STREAMOFF;
+
+ return wakeup;
+ }
+ ctx->status = FIMC_STREAMON_IDLE;
+
+ /* Attach done buffer to outgoing queue. */
+ ret = fimc_push_outq(ctrl, ctx, ctrl->out->idxs.active.idx);
+ if (ret < 0)
+ fimc_err("%s:Failed: fimc_push_outq\n", __func__);
+
+ /* Detach buffer from incomming queue. */
+ ret = fimc_pop_inq(ctrl, &ctx_num, &next);
+ if (ret == 0) { /* There is a buffer in incomming queue. */
+ if (ctx_num != ctrl->out->last_ctx) {
+ ctx = &ctrl->out->ctx[ctx_num];
+ ctrl->out->last_ctx = ctx->ctx_num;
+ fimc_outdev_set_ctx_param(ctrl, ctx);
+ }
+
+ fimc_outdev_set_src_addr(ctrl, ctx->src[next].base);
+ ret = fimc_output_set_dst_addr(ctrl, ctx, next);
+ if (ret < 0)
+ fimc_err("%s:Fail: fimc_output_set_dst_addr\n", __func__);
+
+ ctrl->out->idxs.active.ctx = ctx_num;
+ ctrl->out->idxs.active.idx = next;
+
+ ctx->status = FIMC_STREAMON;
+ ctrl->status = FIMC_STREAMON;
+
+ ret = fimc_outdev_start_camif(ctrl);
+ if (ret < 0)
+ fimc_err("%s:Fail: fimc_start_camif\n", __func__);
+
+ } else { /* There is no buffer in incomming queue. */
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctx->status = FIMC_STREAMON_IDLE;
+ ctrl->status = FIMC_STREAMON_IDLE;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ ctrl->out->last_ctx = -1;
+#endif
+ }
+
+ return wakeup;
+}
+
+static inline u32 fimc_irq_out_multi_buf(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ int ret = -1, ctx_num, next;
+ u32 wakeup = 1;
+
+ if (ctx->status == FIMC_READY_OFF) {
+ if (ctrl->out->idxs.active.ctx == ctx->ctx_num) {
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ }
+
+ ctx->status = FIMC_STREAMOFF;
+
+ return wakeup;
+ }
+ ctx->status = FIMC_STREAMON_IDLE;
+
+ /* Attach done buffer to outgoing queue. */
+ ret = fimc_push_outq(ctrl, ctx, ctrl->out->idxs.active.idx);
+ if (ret < 0)
+ fimc_err("%s:Failed: fimc_push_outq\n", __func__);
+
+ /* Detach buffer from incomming queue. */
+ ret = fimc_pop_inq(ctrl, &ctx_num, &next);
+ if (ret == 0) { /* There is a buffer in incomming queue. */
+ if (ctx_num != ctrl->out->last_ctx) {
+ ctx = &ctrl->out->ctx[ctx_num];
+ ctrl->out->last_ctx = ctx->ctx_num;
+ fimc_outdev_set_ctx_param(ctrl, ctx);
+ }
+
+ fimc_outdev_set_src_addr(ctrl, ctx->src[next].base);
+ ret = fimc_output_set_dst_addr(ctrl, ctx, next);
+ if (ret < 0)
+ fimc_err("%s:Fail: fimc_output_set_dst_addr\n", __func__);
+
+ ctrl->out->idxs.active.ctx = ctx_num;
+ ctrl->out->idxs.active.idx = next;
+
+ ctx->status = FIMC_STREAMON;
+ ctrl->status = FIMC_STREAMON;
+
+ ret = fimc_outdev_start_camif(ctrl);
+ if (ret < 0)
+ fimc_err("%s:Fail: fimc_start_camif\n", __func__);
+
+ } else { /* There is no buffer in incomming queue. */
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctx->status = FIMC_STREAMON_IDLE;
+ ctrl->status = FIMC_STREAMON_IDLE;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ ctrl->out->last_ctx = -1;
+#endif
+ }
+
+ return wakeup;
+}
+
+static inline u32 fimc_irq_out_dma(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct fimc_buf_set buf_set;
+ int idx = ctrl->out->idxs.active.idx;
+ int ret = -1, i, ctx_num, next;
+ int cfg;
+ u32 wakeup = 1;
+
+ if (ctx->status == FIMC_READY_OFF
+ || ctx->status == FIMC_STREAMOFF) {
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctx->status = FIMC_STREAMOFF;
+ ctrl->status = FIMC_STREAMOFF;
+ return wakeup;
+ }
+
+ /* Attach done buffer to outgoing queue. */
+ ret = fimc_push_outq(ctrl, ctx, idx);
+ if (ret < 0)
+ fimc_err("Failed: fimc_push_outq\n");
+
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO) {
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_ADDR,
+ (unsigned long)ctx->dst[idx].base[FIMC_ADDR_Y]);
+
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_ADDR) fail\n");
+ return -EINVAL;
+ }
+
+ if (ctrl->fb.is_enable == 0) {
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_ON,
+ (unsigned long)NULL);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_ON)"\
+ " fail\n");
+ return -EINVAL;
+ }
+
+ ctrl->fb.is_enable = 1;
+ }
+ }
+
+ /* Detach buffer from incomming queue. */
+ ret = fimc_pop_inq(ctrl, &ctx_num, &next);
+ if (ret == 0) { /* There is a buffer in incomming queue. */
+ ctx = &ctrl->out->ctx[ctx_num];
+ fimc_outdev_set_src_addr(ctrl, ctx->src[next].base);
+
+ memset(&buf_set, 0x00, sizeof(buf_set));
+ buf_set.base[FIMC_ADDR_Y] = ctx->dst[next].base[FIMC_ADDR_Y];
+
+ cfg = fimc_hwget_output_buf_sequence(ctrl);
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ if (check_bit(cfg, i))
+ fimc_hwset_output_address(ctrl, &buf_set, i);
+ }
+
+ ctrl->out->idxs.active.ctx = ctx_num;
+ ctrl->out->idxs.active.idx = next;
+
+ ctx->status = FIMC_STREAMON;
+ ctrl->status = FIMC_STREAMON;
+
+ ret = fimc_outdev_start_camif(ctrl);
+ if (ret < 0)
+ fimc_err("Fail: fimc_start_camif\n");
+
+ } else { /* There is no buffer in incomming queue. */
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+
+ ctx->status = FIMC_STREAMON_IDLE;
+ ctrl->status = FIMC_STREAMON_IDLE;
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ ctrl->out->last_ctx = -1;
+#endif
+ }
+
+ return wakeup;
+}
+
+static inline u32 fimc_irq_out_fimd(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct fimc_idx prev;
+ int ret = -1, ctx_num, next;
+ u32 wakeup = 0;
+
+ /* Attach done buffer to outgoing queue. */
+ if (ctrl->out->idxs.prev.idx != -1) {
+ ret = fimc_push_outq(ctrl, ctx, ctrl->out->idxs.prev.idx);
+ if (ret < 0) {
+ fimc_err("Failed: fimc_push_outq\n");
+ } else {
+ ctrl->out->idxs.prev.ctx = -1;
+ ctrl->out->idxs.prev.idx = -1;
+ wakeup = 1; /* To wake up fimc_v4l2_dqbuf */
+ }
+ }
+
+ /* Update index structure. */
+ if (ctrl->out->idxs.next.idx != -1) {
+ ctrl->out->idxs.active.ctx = ctrl->out->idxs.next.ctx;
+ ctrl->out->idxs.active.idx = ctrl->out->idxs.next.idx;
+ ctrl->out->idxs.next.idx = -1;
+ ctrl->out->idxs.next.ctx = -1;
+ }
+
+ /* Detach buffer from incomming queue. */
+ ret = fimc_pop_inq(ctrl, &ctx_num, &next);
+ if (ret == 0) { /* There is a buffer in incomming queue. */
+ prev.ctx = ctrl->out->idxs.active.ctx;
+ prev.idx = ctrl->out->idxs.active.idx;
+
+ ctrl->out->idxs.prev.ctx = prev.ctx;
+ ctrl->out->idxs.prev.idx = prev.idx;
+
+ ctrl->out->idxs.next.ctx = ctx_num;
+ ctrl->out->idxs.next.idx = next;
+
+ /* set source address */
+ fimc_outdev_set_src_addr(ctrl, ctx->src[next].base);
+ }
+
+ return wakeup;
+}
+
+static inline void fimc_irq_out(struct fimc_control *ctrl)
+{
+ struct fimc_ctx *ctx;
+ u32 wakeup = 1;
+ int ctx_num = ctrl->out->idxs.active.ctx;
+
+ /* Interrupt pendding clear */
+ fimc_hwset_clear_irq(ctrl);
+
+ /* check context num */
+ if (ctx_num < 0 || ctx_num >= FIMC_MAX_CTXS) {
+ fimc_err("fimc_irq_out: invalid ctx (ctx=%d)\n", ctx_num);
+ wake_up(&ctrl->wq);
+ return;
+ }
+
+ ctx = &ctrl->out->ctx[ctx_num];
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_NONE_SINGLE_BUF:
+ wakeup = fimc_irq_out_single_buf(ctrl, ctx);
+ break;
+ case FIMC_OVLY_NONE_MULTI_BUF:
+ wakeup = fimc_irq_out_multi_buf(ctrl, ctx);
+ break;
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL:
+ wakeup = fimc_irq_out_dma(ctrl, ctx);
+ break;
+ case FIMC_OVLY_FIFO:
+ if (ctx->status != FIMC_READY_OFF)
+ wakeup = fimc_irq_out_fimd(ctrl, ctx);
+ break;
+ default:
+ fimc_err("[ctx=%d] fimc_irq_out: wrong overlay.mode (%d)\n",
+ ctx_num, ctx->overlay.mode);
+ break;
+ }
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ atomic_inc((atomic_t *)&ctrl->irq_cnt);
+ queue_work(ctrl->fimc_irq_wq, &ctrl->work_struct);
+#endif
+
+ if (wakeup == 1)
+ wake_up(&ctrl->wq);
+}
+
+int fimc_hwget_number_of_bits(u32 framecnt_seq)
+{
+ u32 bits = 0;
+ while (framecnt_seq) {
+ framecnt_seq = framecnt_seq & (framecnt_seq - 1);
+ bits++;
+ }
+ return bits;
+}
+
+static int fimc_add_outgoing_queue(struct fimc_control *ctrl, int i)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ struct fimc_buf_set *tmp_buf;
+ struct list_head *count;
+
+ spin_lock(&ctrl->outq_lock);
+
+ list_for_each(count, &cap->outgoing_q) {
+ tmp_buf = list_entry(count, struct fimc_buf_set, list);
+ if (tmp_buf->id == i) {
+ fimc_info1("%s: Exist id in outqueue\n", __func__);
+
+ spin_unlock(&ctrl->outq_lock);
+ return 0;
+ }
+ }
+ list_add_tail(&cap->bufs[i].list, &cap->outgoing_q);
+ spin_unlock(&ctrl->outq_lock);
+
+ return 0;
+}
+
+static inline void fimc_irq_cap(struct fimc_control *ctrl)
+{
+ struct fimc_capinfo *cap = ctrl->cap;
+ int pp;
+ int buf_index;
+ int framecnt_seq;
+ int available_bufnum;
+
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+#ifdef DEBUG
+ static struct timeval curr_time, before_time;
+ if (ctrl->id == FIMC2) {
+ do_gettimeofday(&curr_time);
+ printk(KERN_INFO "%s : time : %ld\n", __func__,
+ curr_time.tv_usec - before_time.tv_usec);
+ before_time.tv_usec = curr_time.tv_usec;
+ }
+#endif
+ fimc_hwset_clear_irq(ctrl);
+ if (fimc_hwget_overflow_state(ctrl))
+ return;
+
+ if (pdata->hw_ver >= 0x51) {
+ pp = fimc_hwget_before_frame_count(ctrl);
+ if (cap->cnt < 20) {
+ printk(KERN_INFO "%s[%d], fimc%d, cnt[%d]\n", __func__,
+ pp, ctrl->id, cap->cnt);
+ cap->cnt++;
+ }
+ if (pp == 0 || cap->cnt == 1) {
+ if (ctrl->cap->nr_bufs == 1)
+ pp = fimc_hwget_present_frame_count(ctrl);
+ else
+ return;
+ }
+ buf_index = pp - 1;
+ fimc_add_outgoing_queue(ctrl, buf_index);
+ fimc_hwset_output_buf_sequence(ctrl, buf_index,
+ FIMC_FRAMECNT_SEQ_DISABLE);
+
+ framecnt_seq = fimc_hwget_output_buf_sequence(ctrl);
+ available_bufnum = fimc_hwget_number_of_bits(framecnt_seq);
+ fimc_info2("%s[%d] : framecnt_seq: %d, available_bufnum: %d\n",
+ __func__, ctrl->id, framecnt_seq, available_bufnum);
+
+ if (ctrl->status != FIMC_BUFFER_STOP) {
+ if (available_bufnum == 1 || ctrl->cap->nr_bufs == 1) {
+ cap->cnt=0;
+ ctrl->cap->lastirq = 0;
+ fimc_stop_capture(ctrl);
+ ctrl->status = FIMC_BUFFER_STOP;
+ printk(KERN_INFO "fimc_irq_cap[%d] available_bufnum = %d\n",
+ ctrl->id, available_bufnum);
+ }
+ } else {
+ fimc_info1("%s : Aleady fimc stop\n", __func__);
+ }
+ } else
+ pp = ((fimc_hwget_frame_count(ctrl) + 2) % 4);
+
+ if (cap->fmt.field == V4L2_FIELD_INTERLACED_TB) {
+ /* odd value of pp means one frame is made with top/bottom */
+ if (pp & 0x1) {
+ cap->irq = 1;
+ wake_up(&ctrl->wq);
+ }
+ } else {
+ cap->irq = 1;
+ wake_up(&ctrl->wq);
+ }
+}
+
+static irqreturn_t fimc_irq(int irq, void *dev_id)
+{
+ struct fimc_control *ctrl = (struct fimc_control *) dev_id;
+ struct s3c_platform_fimc *pdata;
+
+ if (ctrl->cap)
+ fimc_irq_cap(ctrl);
+ else if (ctrl->out)
+ fimc_irq_out(ctrl);
+ else {
+ printk(KERN_ERR "%s this message must not be shown!!!"
+ " fimc%d\n", __func__, ctrl->id);
+ pdata = to_fimc_plat(ctrl->dev);
+ pdata->clk_on(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ fimc_hwset_clear_irq(ctrl);
+ pdata->clk_off(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static
+struct fimc_control *fimc_register_controller(struct platform_device *pdev)
+{
+ struct s3c_platform_fimc *pdata;
+ struct fimc_control *ctrl;
+ struct resource *res;
+ int id, err;
+ struct cma_info mem_info;
+ struct clk *sclk_fimc_lclk = NULL;
+ struct clk *fimc_src_clk = NULL;
+
+ id = pdev->id;
+ pdata = to_fimc_plat(&pdev->dev);
+
+ ctrl = get_fimc_ctrl(id);
+ ctrl->id = id;
+ ctrl->dev = &pdev->dev;
+ ctrl->vd = &fimc_video_device[id];
+ ctrl->vd->minor = id;
+ ctrl->log = FIMC_LOG_DEFAULT;
+ ctrl->power_status = FIMC_POWER_OFF;
+
+ /* CMA */
+#ifdef CONFIG_ION_EXYNOS
+ if (id != 2) {
+#endif
+ sprintf(ctrl->cma_name, "%s%d", FIMC_CMA_NAME, ctrl->id);
+ err = cma_info(&mem_info, ctrl->dev, 0);
+ fimc_info1("%s : [cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x\n",
+ __func__, mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size);
+ if (err) {
+ fimc_err("%s: get cma info failed\n", __func__);
+ ctrl->mem.size = 0;
+ ctrl->mem.base = 0;
+ } else {
+ ctrl->mem.size = mem_info.total_size;
+ ctrl->mem.base = (dma_addr_t)cma_alloc
+ (ctrl->dev, ctrl->cma_name, (size_t)ctrl->mem.size, 0);
+ }
+#ifdef CONFIG_ION_EXYNOS
+ }
+#endif
+ printk(KERN_INFO "ctrl->mem.size = 0x%x\n", ctrl->mem.size);
+ printk(KERN_INFO "ctrl->mem.base = 0x%x\n", ctrl->mem.base);
+
+ ctrl->mem.curr = ctrl->mem.base;
+ ctrl->status = FIMC_STREAMOFF;
+
+ switch (pdata->hw_ver) {
+ case 0x40:
+ ctrl->limit = &fimc40_limits[id];
+ break;
+ case 0x43:
+ case 0x45:
+ ctrl->limit = &fimc43_limits[id];
+ break;
+ case 0x50:
+ ctrl->limit = &fimc50_limits[id];
+ break;
+ case 0x51:
+ ctrl->limit = &fimc51_limits[id];
+ break;
+ }
+
+ sprintf(ctrl->name, "%s%d", FIMC_NAME, id);
+ strcpy(ctrl->vd->name, ctrl->name);
+
+ atomic_set(&ctrl->in_use, 0);
+ mutex_init(&ctrl->lock);
+ mutex_init(&ctrl->v4l2_lock);
+ spin_lock_init(&ctrl->outq_lock);
+ init_waitqueue_head(&ctrl->wq);
+
+ /* get resource for io memory */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ fimc_err("%s: failed to get io memory region\n", __func__);
+ return NULL;
+ }
+
+ /* request mem region */
+ res = request_mem_region(res->start, res->end - res->start + 1,
+ pdev->name);
+ if (!res) {
+ fimc_err("%s: failed to request io memory region\n", __func__);
+ return NULL;
+ }
+
+ /* ioremap for register block */
+ ctrl->regs = ioremap(res->start, res->end - res->start + 1);
+ if (!ctrl->regs) {
+ fimc_err("%s: failed to remap io region\n", __func__);
+ return NULL;
+ }
+
+ fimc_dev->backup_regs[id] = ctrl->regs;
+ /* irq */
+ ctrl->irq = platform_get_irq(pdev, 0);
+ if (request_irq(ctrl->irq, fimc_irq, IRQF_DISABLED, ctrl->name, ctrl))
+ fimc_err("%s: request_irq failed\n", __func__);
+
+ if (soc_is_exynos4210())
+ fimc_src_clk = clk_get(&pdev->dev, "mout_mpll");
+ else
+ fimc_src_clk = clk_get(&pdev->dev, "mout_mpll_user");
+
+ if (IS_ERR(fimc_src_clk)) {
+ dev_err(&pdev->dev, "failed to get parent clock\n");
+ iounmap(ctrl->regs);
+ return NULL;
+ }
+
+ sclk_fimc_lclk = clk_get(&pdev->dev, FIMC_CORE_CLK);
+ if (IS_ERR(sclk_fimc_lclk)) {
+ dev_err(&pdev->dev, "failed to get sclk_fimc_lclk\n");
+ iounmap(ctrl->regs);
+ clk_put(fimc_src_clk);
+ return NULL;
+ }
+
+ if (clk_set_parent(sclk_fimc_lclk, fimc_src_clk)) {
+ dev_err(&pdev->dev, "unable to set parent %s of clock %s.\n",
+ fimc_src_clk->name, sclk_fimc_lclk->name);
+ iounmap(ctrl->regs);
+ clk_put(sclk_fimc_lclk);
+ clk_put(fimc_src_clk);
+ return NULL;
+ }
+ clk_set_rate(sclk_fimc_lclk, FIMC_CLK_RATE);
+ clk_put(sclk_fimc_lclk);
+ clk_put(fimc_src_clk);
+
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ fimc_hwset_reset(ctrl);
+#endif
+
+ return ctrl;
+}
+
+static int fimc_unregister_controller(struct platform_device *pdev)
+{
+ struct s3c_platform_fimc *pdata;
+ struct fimc_control *ctrl;
+ int id = pdev->id;
+
+ pdata = to_fimc_plat(&pdev->dev);
+ ctrl = get_fimc_ctrl(id);
+
+ free_irq(ctrl->irq, ctrl);
+ mutex_destroy(&ctrl->lock);
+ mutex_destroy(&ctrl->v4l2_lock);
+
+ if (pdata->clk_off)
+ pdata->clk_off(pdev, &ctrl->clk);
+
+ iounmap(ctrl->regs);
+ memset(ctrl, 0, sizeof(*ctrl));
+
+ return 0;
+}
+
+static void fimc_mmap_open(struct vm_area_struct *vma)
+{
+ struct fimc_global *dev = fimc_dev;
+ int pri_data = (int)vma->vm_private_data;
+ u32 id = pri_data / 0x100;
+ u32 ctx = (pri_data - (id * 0x100)) / 0x10;
+ u32 idx = pri_data % 0x10;
+
+ BUG_ON(id >= FIMC_DEVICES);
+ BUG_ON(ctx >= FIMC_MAX_CTXS);
+ BUG_ON(idx >= FIMC_OUTBUFS);
+
+ atomic_inc(&dev->ctrl[id].out->ctx[ctx].src[idx].mapped_cnt);
+}
+
+static void fimc_mmap_close(struct vm_area_struct *vma)
+{
+ struct fimc_global *dev = fimc_dev;
+ int pri_data = (int)vma->vm_private_data;
+ u32 id = pri_data / 0x100;
+ u32 ctx = (pri_data - (id * 0x100)) / 0x10;
+ u32 idx = pri_data % 0x10;
+
+ BUG_ON(id >= FIMC_DEVICES);
+ BUG_ON(ctx >= FIMC_MAX_CTXS);
+ BUG_ON(idx >= FIMC_OUTBUFS);
+
+ atomic_dec(&dev->ctrl[id].out->ctx[ctx].src[idx].mapped_cnt);
+}
+
+static struct vm_operations_struct fimc_mmap_ops = {
+ .open = fimc_mmap_open,
+ .close = fimc_mmap_close,
+};
+
+static inline
+int fimc_mmap_out_src(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ int ctx_id = prv_data->ctx_id;
+ struct fimc_ctx *ctx = &ctrl->out->ctx[ctx_id];
+ u32 start_phy_addr = 0;
+ u32 size = vma->vm_end - vma->vm_start;
+ u32 pfn, idx = vma->vm_pgoff;
+ u32 buf_length = 0;
+ int pri_data = 0;
+
+ buf_length = PAGE_ALIGN(ctx->src[idx].length[FIMC_ADDR_Y] +
+ ctx->src[idx].length[FIMC_ADDR_CB] +
+ ctx->src[idx].length[FIMC_ADDR_CR]);
+ if (size > PAGE_ALIGN(buf_length)) {
+ fimc_err("Requested mmap size is too big\n");
+ return -EINVAL;
+ }
+
+ pri_data = (ctrl->id * 0x100) + (ctx_id * 0x10) + idx;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_ops = &fimc_mmap_ops;
+ vma->vm_private_data = (void *)pri_data;
+
+ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ fimc_err("writable mapping must be shared\n");
+ return -EINVAL;
+ }
+
+ start_phy_addr = ctx->src[idx].base[FIMC_ADDR_Y];
+ pfn = __phys_to_pfn(start_phy_addr);
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
+ fimc_err("mmap fail\n");
+ return -EINVAL;
+ }
+
+ vma->vm_ops->open(vma);
+
+ ctx->src[idx].flags |= V4L2_BUF_FLAG_MAPPED;
+
+ return 0;
+}
+
+static inline
+int fimc_mmap_out_dst(struct file *filp, struct vm_area_struct *vma, u32 idx)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ int ctx_id = prv_data->ctx_id;
+ unsigned long pfn = 0, size;
+ int ret = 0;
+
+ size = vma->vm_end - vma->vm_start;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED;
+
+ if (ctrl->out->ctx[ctx_id].dst[idx].base[0])
+ pfn = __phys_to_pfn(ctrl->out->ctx[ctx_id].dst[idx].base[0]);
+ else
+ pfn = __phys_to_pfn(ctrl->mem.curr);
+
+ ret = remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot);
+ if (ret != 0)
+ fimc_err("remap_pfn_range fail.\n");
+
+ return ret;
+}
+
+static inline int fimc_mmap_out(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ int ctx_id = prv_data->ctx_id;
+
+ int idx = ctrl->out->ctx[ctx_id].overlay.req_idx;
+ int ret = -1;
+
+ if (idx >= 0)
+ ret = fimc_mmap_out_dst(filp, vma, idx);
+ else if (idx == FIMC_MMAP_IDX)
+ ret = fimc_mmap_out_src(filp, vma);
+
+ return ret;
+}
+
+static inline int fimc_mmap_cap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ u32 size = vma->vm_end - vma->vm_start;
+ u32 pfn, idx = vma->vm_pgoff;
+
+ if (ctrl->cap->fmt.priv != V4L2_PIX_FMT_MODE_HDR && !ctrl->cap->cacheable)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ vma->vm_flags |= VM_RESERVED;
+
+ /*
+ * page frame number of the address for a source frame
+ * to be stored at.
+ */
+ pfn = __phys_to_pfn(ctrl->cap->bufs[idx].base[0]);
+
+ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
+ fimc_err("%s: writable mapping must be shared\n", __func__);
+ return -EINVAL;
+ }
+
+ if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) {
+ fimc_err("%s: mmap fail\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ int ret;
+
+ if (ctrl->cap)
+ ret = fimc_mmap_cap(filp, vma);
+ else
+ ret = fimc_mmap_out(filp, vma);
+
+ return ret;
+}
+
+static u32 fimc_poll(struct file *filp, poll_table *wait)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ struct fimc_capinfo *cap = ctrl->cap;
+ u32 mask = 0;
+
+ if (!cap)
+ return 0;
+
+ if (!list_empty(&cap->outgoing_q)) {
+ mask = POLLIN | POLLRDNORM;
+ cap->poll_cnt = 0;
+ } else {
+ poll_wait(filp, &ctrl->wq, wait);
+ if (++cap->poll_cnt > 15)
+ fimc_hwset_sw_reset(ctrl);
+ }
+
+ return mask;
+}
+
+static
+ssize_t fimc_read(struct file *filp, char *buf, size_t count, loff_t *pos)
+{
+ return 0;
+}
+
+static
+ssize_t fimc_write(struct file *filp, const char *b, size_t c, loff_t *offset)
+{
+ return 0;
+}
+
+u32 fimc_mapping_rot_flip(u32 rot, u32 flip)
+{
+ u32 ret = 0;
+
+ switch (rot) {
+ case 0:
+ if (flip & FIMC_XFLIP)
+ ret |= FIMC_XFLIP;
+
+ if (flip & FIMC_YFLIP)
+ ret |= FIMC_YFLIP;
+ break;
+
+ case 90:
+ ret = FIMC_ROT;
+ if (flip & FIMC_XFLIP)
+ ret |= FIMC_XFLIP;
+
+ if (flip & FIMC_YFLIP)
+ ret |= FIMC_YFLIP;
+ break;
+
+ case 180:
+ ret = (FIMC_XFLIP | FIMC_YFLIP);
+ if (flip & FIMC_XFLIP)
+ ret &= ~FIMC_XFLIP;
+
+ if (flip & FIMC_YFLIP)
+ ret &= ~FIMC_YFLIP;
+ break;
+
+ case 270:
+ ret = (FIMC_XFLIP | FIMC_YFLIP | FIMC_ROT);
+ if (flip & FIMC_XFLIP)
+ ret &= ~FIMC_XFLIP;
+
+ if (flip & FIMC_YFLIP)
+ ret &= ~FIMC_YFLIP;
+ break;
+ }
+
+ return ret;
+}
+
+int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
+{
+ if (src >= tar * 64) {
+ return -EINVAL;
+ } else if (src >= tar * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= tar * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= tar * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= tar * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= tar * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+void fimc_get_nv12t_size(int img_hres, int img_vres,
+ int *y_size, int *cb_size)
+{
+ int remain;
+ int y_hres_byte, y_vres_byte;
+ int cb_hres_byte, cb_vres_byte;
+ int y_hres_roundup, y_vres_roundup;
+ int cb_hres_roundup, cb_vres_roundup;
+
+ /* to make 'img_hres and img_vres' be 16 multiple */
+ remain = img_hres % 16;
+ if (remain != 0) {
+ remain = 16 - remain;
+ img_hres = img_hres + remain;
+ }
+ remain = img_vres % 16;
+ if (remain != 0) {
+ remain = 16 - remain;
+ img_vres = img_vres + remain;
+ }
+
+ cb_hres_byte = img_hres;
+ cb_vres_byte = img_vres;
+
+ y_hres_byte = img_hres - 1;
+ y_vres_byte = img_vres - 1;
+ y_hres_roundup = ((y_hres_byte >> 4) >> 3) + 1;
+ y_vres_roundup = ((y_vres_byte >> 4) >> 2) + 1;
+ if ((y_vres_byte & 0x20) == 0) {
+ y_hres_byte = y_hres_byte & 0x7f00;
+ y_hres_byte = y_hres_byte >> 8;
+ y_hres_byte = y_hres_byte & 0x7f;
+
+ y_vres_byte = y_vres_byte & 0x7fc0;
+ y_vres_byte = y_vres_byte >> 6;
+ y_vres_byte = y_vres_byte & 0x1ff;
+
+ *y_size = y_hres_byte +\
+ (y_vres_byte * y_hres_roundup) + 1;
+ } else {
+ *y_size = y_hres_roundup * y_vres_roundup;
+ }
+
+ *y_size = *(y_size) << 13;
+
+ cb_hres_byte = img_hres - 1;
+ cb_vres_byte = (img_vres >> 1) - 1;
+ cb_hres_roundup = ((cb_hres_byte >> 4) >> 3) + 1;
+ cb_vres_roundup = ((cb_vres_byte >> 4) >> 2) + 1;
+ if ((cb_vres_byte & 0x20) == 0) {
+ cb_hres_byte = cb_hres_byte & 0x7f00;
+ cb_hres_byte = cb_hres_byte >> 8;
+ cb_hres_byte = cb_hres_byte & 0x7f;
+
+ cb_vres_byte = cb_vres_byte & 0x7fc0;
+ cb_vres_byte = cb_vres_byte >> 6;
+ cb_vres_byte = cb_vres_byte & 0x1ff;
+
+ *cb_size = cb_hres_byte + (cb_vres_byte * cb_hres_roundup) + 1;
+ } else {
+ *cb_size = cb_hres_roundup * cb_vres_roundup;
+ }
+ *cb_size = (*cb_size) << 13;
+
+}
+
+static int fimc_open(struct file *filp)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ struct fimc_prv_data *prv_data;
+ int in_use;
+ int ret;
+ int i;
+
+ ctrl = video_get_drvdata(video_devdata(filp));
+ pdata = to_fimc_plat(ctrl->dev);
+
+ mutex_lock(&ctrl->lock);
+
+ in_use = atomic_read(&ctrl->in_use);
+ if (in_use > FIMC_MAX_CTXS) {
+ ret = -EBUSY;
+ goto resource_busy;
+ } else {
+ atomic_inc(&ctrl->in_use);
+ fimc_warn("FIMC%d %d opened.\n",
+ ctrl->id, atomic_read(&ctrl->in_use));
+ }
+ in_use = atomic_read(&ctrl->in_use);
+
+ prv_data = kzalloc(sizeof(struct fimc_prv_data), GFP_KERNEL);
+ if (!prv_data) {
+ fimc_err("%s: not enough memory\n", __func__);
+ ret = -ENOMEM;
+ goto kzalloc_err;
+ }
+
+ if (in_use == 1) {
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ if (pdata->clk_on)
+ pdata->clk_on(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+
+ if (pdata->hw_ver == 0x40)
+ fimc_hw_reset_camera(ctrl);
+
+ /* Apply things to interface register */
+ fimc_hwset_reset(ctrl);
+#endif
+ ctrl->fb.open_fifo = s3cfb_open_fifo;
+ ctrl->fb.close_fifo = s3cfb_close_fifo;
+
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_GET_LCD_WIDTH,
+ (unsigned long)&ctrl->fb.lcd_hres);
+ if (ret < 0) {
+ fimc_err("Fail: S3CFB_GET_LCD_WIDTH\n");
+ goto resource_busy;
+ }
+
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_GET_LCD_HEIGHT,
+ (unsigned long)&ctrl->fb.lcd_vres);
+ if (ret < 0) {
+ fimc_err("Fail: S3CFB_GET_LCD_HEIGHT\n");
+ goto resource_busy;
+ }
+
+ ctrl->mem.curr = ctrl->mem.base;
+ ctrl->status = FIMC_STREAMOFF;
+
+ }
+
+ prv_data->ctrl = ctrl;
+ if (prv_data->ctrl->out != NULL) {
+ for (i = 0; i < FIMC_MAX_CTXS; i++)
+ if (prv_data->ctrl->out->ctx_used[i] == false) {
+ prv_data->ctx_id = i;
+ prv_data->ctrl->out->ctx_used[i] = true;
+ break;
+ }
+ } else
+ prv_data->ctx_id = in_use - 1;
+
+ filp->private_data = prv_data;
+
+ mutex_unlock(&ctrl->lock);
+
+ return 0;
+
+kzalloc_err:
+ atomic_dec(&ctrl->in_use);
+
+resource_busy:
+ mutex_unlock(&ctrl->lock);
+ return ret;
+}
+
+static int fimc_release(struct file *filp)
+{
+ struct fimc_prv_data *prv_data =
+ (struct fimc_prv_data *)filp->private_data;
+ struct fimc_control *ctrl = prv_data->ctrl;
+ struct fimc_capinfo *cap;
+
+ int ctx_id = prv_data->ctx_id;
+ struct s3c_platform_fimc *pdata;
+ struct fimc_overlay_buf *buf;
+ struct mm_struct *mm = current->mm;
+ struct fimc_ctx *ctx;
+ int ret = 0, i;
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ pdata = to_fimc_plat(ctrl->dev);
+
+ atomic_dec(&ctrl->in_use);
+
+ if (ctrl->cap && (ctrl->status != FIMC_STREAMOFF))
+ fimc_streamoff_capture((void *)ctrl);
+
+ /* FIXME: turning off actual working camera */
+ if (ctrl->cam && ctrl->id != FIMC2) {
+ /* Unload the subdev (camera sensor) module,
+ * reset related status flags */
+ fimc_release_subdev(ctrl);
+ }
+
+ if (atomic_read(&ctrl->in_use) == 0) {
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ if (pdata->clk_off) {
+ pdata->clk_off(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ ctrl->power_status = FIMC_POWER_OFF;
+ }
+#endif
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+/* #ifdef SYSMMU_FIMC */
+ if (ctrl->power_status == FIMC_POWER_ON) {
+ pm_runtime_put_sync(ctrl->dev);
+ }
+/* #endif */
+#endif
+
+ }
+ if (ctrl->out) {
+ if (ctx->status != FIMC_STREAMOFF) {
+ ret = fimc_outdev_stop_streaming(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_stop_streaming\n");
+ return -EINVAL;
+ }
+
+ ret = fimc_init_in_queue(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_init_in_queue\n");
+ return -EINVAL;
+ }
+
+ ret = fimc_init_out_queue(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_init_out_queue\n");
+ return -EINVAL;
+ }
+
+ /* Make all buffers DQUEUED state. */
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->src[i].state = VIDEOBUF_IDLE;
+ ctx->src[i].flags = V4L2_BUF_FLAG_MAPPED;
+ }
+
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO) {
+ ctrl->mem.curr = ctx->dst[0].base[FIMC_ADDR_Y];
+
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->dst[i].base[FIMC_ADDR_Y] = 0;
+ ctx->dst[i].length[FIMC_ADDR_Y] = 0;
+
+ ctx->dst[i].base[FIMC_ADDR_CB] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CB] = 0;
+
+ ctx->dst[i].base[FIMC_ADDR_CR] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CR] = 0;
+ }
+ }
+
+ ctx->status = FIMC_STREAMOFF;
+ }
+
+ ctx->is_requested = 0;
+ buf = &ctx->overlay.buf;
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ if (buf->vir_addr[i]) {
+ ret = do_munmap(mm, buf->vir_addr[i],
+ buf->size[i]);
+ if (ret < 0)
+ fimc_err("%s: do_munmap fail\n",
+ __func__);
+ }
+ }
+
+ /* reset inq & outq of context */
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->inq[i] = -1;
+ ctx->outq[i] = -1;
+ }
+
+ if (atomic_read(&ctrl->in_use) == 0) {
+ ctrl->status = FIMC_STREAMOFF;
+ fimc_outdev_init_idxs(ctrl);
+
+ ctrl->mem.curr = ctrl->mem.base;
+
+ kfree(ctrl->out);
+ ctrl->out = NULL;
+
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+ } else {
+ ctrl->out->ctx_used[ctx_id] = false;
+ }
+ }
+
+ if (ctrl->cap) {
+ cap = ctrl->cap;
+ ctrl->mem.curr = ctrl->mem.base;
+ kfree(filp->private_data);
+ filp->private_data = NULL;
+ if (pdata->hw_ver >= 0x51)
+ INIT_LIST_HEAD(&cap->outgoing_q);
+ for (i = 0; i < FIMC_CAPBUFS; i++) {
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 0);
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 1);
+ fimc_dma_free(ctrl, &ctrl->cap->bufs[i], 2);
+ }
+ kfree(ctrl->cap);
+ ctrl->cap = NULL;
+ }
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ flush_workqueue(ctrl->fimc_irq_wq);
+#endif
+
+ /*
+ * Close window for FIMC if window is enabled.
+ */
+ if (ctrl->fb.is_enable == 1) {
+ fimc_warn("WIN_OFF for FIMC%d\n", ctrl->id);
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_OFF,
+ (unsigned long)NULL);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_OFF) fail\n");
+ return -EINVAL;
+ }
+
+ ctrl->fb.is_enable = 0;
+ }
+
+ fimc_warn("FIMC%d %d released.\n",
+ ctrl->id, atomic_read(&ctrl->in_use));
+
+ return 0;
+}
+
+static const struct v4l2_file_operations fimc_fops = {
+ .owner = THIS_MODULE,
+ .open = fimc_open,
+ .release = fimc_release,
+ .ioctl = video_ioctl2,
+ .read = fimc_read,
+ .write = fimc_write,
+ .mmap = fimc_mmap,
+ .poll = fimc_poll,
+};
+
+static void fimc_vdev_release(struct video_device *vdev)
+{
+ kfree(vdev);
+}
+
+struct video_device fimc_video_device[FIMC_DEVICES] = {
+ [0] = {
+ .fops = &fimc_fops,
+ .ioctl_ops = &fimc_v4l2_ops,
+ .release = fimc_vdev_release,
+ },
+ [1] = {
+ .fops = &fimc_fops,
+ .ioctl_ops = &fimc_v4l2_ops,
+ .release = fimc_vdev_release,
+ },
+ [2] = {
+ .fops = &fimc_fops,
+ .ioctl_ops = &fimc_v4l2_ops,
+ .release = fimc_vdev_release,
+ },
+#ifdef CONFIG_ARCH_EXYNOS4
+ [3] = {
+ .fops = &fimc_fops,
+ .ioctl_ops = &fimc_v4l2_ops,
+ .release = fimc_vdev_release,
+ },
+#endif
+};
+
+static int fimc_init_global(struct platform_device *pdev)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ struct s3c_platform_camera *cam;
+ struct clk *srclk;
+ int id, i;
+
+ pdata = to_fimc_plat(&pdev->dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ /* Registering external camera modules. re-arrange order to be sure */
+ for (i = 0; i < FIMC_MAXCAMS; i++) {
+ cam = pdata->camera[i];
+ if (!cam)
+ break;
+ /* WriteBack doesn't need clock setting */
+ if ((cam->id == CAMERA_WB) || (cam->id == CAMERA_WB_B)) {
+ fimc_dev->camera[i] = cam;
+ fimc_dev->camera_isvalid[i] = 1;
+ fimc_dev->camera[i]->initialized = 0;
+ continue;
+ }
+
+ /* source clk for MCLK*/
+ srclk = clk_get(&pdev->dev, cam->srclk_name);
+ if (IS_ERR(srclk)) {
+ fimc_err("%s: failed to get srclk source\n", __func__);
+ return -EINVAL;
+ }
+
+ /* mclk */
+ cam->clk = clk_get(&pdev->dev, cam->clk_name);
+ if (IS_ERR(cam->clk)) {
+ fimc_err("%s: failed to get mclk source\n", __func__);
+ return -EINVAL;
+ }
+
+ if (clk_set_parent(cam->clk, srclk)) {
+ dev_err(&pdev->dev, "unable to set parent %s of clock %s.\n",
+ srclk->name, cam->clk->name);
+ clk_put(srclk);
+ clk_put(cam->clk);
+ return -EINVAL;
+ }
+
+ /* Assign camera device to fimc */
+ fimc_dev->camera[i] = cam;
+ fimc_dev->camera_isvalid[i] = 1;
+ fimc_dev->camera[i]->initialized = 0;
+ }
+
+ fimc_dev->mclk_status = CAM_MCLK_OFF;
+ fimc_dev->active_camera = -1;
+ fimc_dev->initialized = 1;
+
+ return 0;
+}
+
+static int fimc_show_log_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+ int id = -1;
+
+ char temp[150];
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ sprintf(temp, "\t");
+ strcat(buf, temp);
+ if (ctrl->log & FIMC_LOG_DEBUG) {
+ sprintf(temp, "FIMC_LOG_DEBUG | ");
+ strcat(buf, temp);
+ }
+
+ if (ctrl->log & FIMC_LOG_INFO_L2) {
+ sprintf(temp, "FIMC_LOG_INFO_L2 | ");
+ strcat(buf, temp);
+ }
+
+ if (ctrl->log & FIMC_LOG_INFO_L1) {
+ sprintf(temp, "FIMC_LOG_INFO_L1 | ");
+ strcat(buf, temp);
+ }
+
+ if (ctrl->log & FIMC_LOG_WARN) {
+ sprintf(temp, "FIMC_LOG_WARN | ");
+ strcat(buf, temp);
+ }
+
+ if (ctrl->log & FIMC_LOG_ERR) {
+ sprintf(temp, "FIMC_LOG_ERR\n");
+ strcat(buf, temp);
+ }
+
+ return strlen(buf);
+}
+
+static int fimc_store_log_level(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+
+ const char *p = buf;
+ char msg[150] = {0, };
+ int id = -1;
+ u32 match = 0;
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ while (*p != '\0') {
+ if (!isspace(*p))
+ strncat(msg, p, 1);
+ p++;
+ }
+
+ ctrl->log = 0;
+ printk(KERN_INFO "FIMC.%d log level is set as below.\n", id);
+
+ if (strstr(msg, "FIMC_LOG_ERR") != NULL) {
+ ctrl->log |= FIMC_LOG_ERR;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_ERR\n");
+ }
+
+ if (strstr(msg, "FIMC_LOG_WARN") != NULL) {
+ ctrl->log |= FIMC_LOG_WARN;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_WARN\n");
+ }
+
+ if (strstr(msg, "FIMC_LOG_INFO_L1") != NULL) {
+ ctrl->log |= FIMC_LOG_INFO_L1;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_INFO_L1\n");
+ }
+
+ if (strstr(msg, "FIMC_LOG_INFO_L2") != NULL) {
+ ctrl->log |= FIMC_LOG_INFO_L2;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_INFO_L2\n");
+ }
+
+ if (strstr(msg, "FIMC_LOG_DEBUG") != NULL) {
+ ctrl->log |= FIMC_LOG_DEBUG;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_LOG_DEBUG\n");
+ }
+
+ if (!match) {
+ printk(KERN_INFO "FIMC_LOG_ERR \t: Error condition.\n");
+ printk(KERN_INFO "FIMC_LOG_WARN \t: WARNING condition.\n");
+ printk(KERN_INFO "FIMC_LOG_INFO_L1 \t: V4L2 API without QBUF, DQBUF.\n");
+ printk(KERN_INFO "FIMC_LOG_INFO_L2 \t: V4L2 API QBUF, DQBUF.\n");
+ printk(KERN_INFO "FIMC_LOG_DEBUG \t: Queue status report.\n");
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(log_level, 0644, \
+ fimc_show_log_level,
+ fimc_store_log_level);
+
+static int fimc_show_range_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+ int id = -1;
+
+ char temp[150];
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ sprintf(temp, "\t");
+ strcat(buf, temp);
+ if (ctrl->range == FIMC_RANGE_NARROW) {
+ sprintf(temp, "FIMC_RANGE_NARROW\n");
+ strcat(buf, temp);
+ } else {
+ sprintf(temp, "FIMC_RANGE_WIDE\n");
+ strcat(buf, temp);
+ }
+
+ return strlen(buf);
+}
+
+static int fimc_store_range_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+
+ const char *p = buf;
+ char msg[150] = {0, };
+ int id = -1;
+ u32 match = 0;
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ while (*p != '\0') {
+ if (!isspace(*p))
+ strncat(msg, p, 1);
+ p++;
+ }
+
+ ctrl->range = 0;
+ printk(KERN_INFO "FIMC.%d range mode is set as below.\n", id);
+
+ if (strstr(msg, "FIMC_RANGE_WIDE") != NULL) {
+ ctrl->range = FIMC_RANGE_WIDE;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_RANGE_WIDE\n");
+ }
+
+ if (strstr(msg, "FIMC_RANGE_NARROW") != NULL) {
+ ctrl->range = FIMC_RANGE_NARROW;
+ match = 1;
+ printk(KERN_INFO "\tFIMC_RANGE_NARROW\n");
+ }
+
+ return len;
+}
+
+static DEVICE_ATTR(range_mode, 0644, \
+ fimc_show_range_mode,
+ fimc_store_range_mode);
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+ struct s3c_platform_fimc *pdata;
+ struct fimc_control *ctrl;
+ int ret;
+
+ if (!fimc_dev) {
+ fimc_dev = kzalloc(sizeof(*fimc_dev), GFP_KERNEL);
+ if (!fimc_dev) {
+ dev_err(&pdev->dev, "%s: not enough memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ ctrl = fimc_register_controller(pdev);
+ if (!ctrl) {
+ printk(KERN_ERR "%s: cannot register fimc\n", __func__);
+ goto err_alloc;
+ }
+
+ pdata = to_fimc_plat(&pdev->dev);
+ if ((ctrl->id == FIMC0) && (pdata->cfg_gpio))
+ pdata->cfg_gpio(pdev);
+
+ /* V4L2 device-subdev registration */
+ ret = v4l2_device_register(&pdev->dev, &ctrl->v4l2_dev);
+ if (ret) {
+ fimc_err("%s: v4l2 device register failed\n", __func__);
+ goto err_fimc;
+ }
+ ctrl->vd->v4l2_dev = &ctrl->v4l2_dev;
+
+ /* things to initialize once */
+ if (!fimc_dev->initialized) {
+ ret = fimc_init_global(pdev);
+ if (ret)
+ goto err_v4l2;
+ }
+
+ /* video device register */
+ ret = video_register_device(ctrl->vd, VFL_TYPE_GRABBER, ctrl->id);
+ if (ret) {
+ fimc_err("%s: cannot register video driver\n", __func__);
+ goto err_v4l2;
+ }
+
+ video_set_drvdata(ctrl->vd, ctrl);
+
+#ifdef CONFIG_VIDEO_FIMC_RANGE_WIDE
+ ctrl->range = FIMC_RANGE_WIDE;
+#else
+ ctrl->range = FIMC_RANGE_NARROW;
+#endif
+
+ ret = device_create_file(&(pdev->dev), &dev_attr_log_level);
+ if (ret < 0) {
+ fimc_err("failed to add sysfs entries for log level\n");
+ goto err_global;
+ }
+ ret = device_create_file(&(pdev->dev), &dev_attr_range_mode);
+ if (ret < 0) {
+ fimc_err("failed to add sysfs entries for range mode\n");
+ goto err_global;
+ }
+ printk(KERN_INFO "FIMC%d registered successfully\n", ctrl->id);
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ ctrl->power_status = FIMC_POWER_OFF;
+ pm_runtime_enable(&pdev->dev);
+
+ sprintf(buf, "fimc%d_iqr_wq_name", ctrl->id);
+ ctrl->fimc_irq_wq = create_workqueue(buf);
+
+ if (ctrl->fimc_irq_wq == NULL) {
+ fimc_err("Cannot create workqueue for fimc driver\n");
+ goto err_global;
+ }
+
+ INIT_WORK(&ctrl->work_struct, s3c_fimc_irq_work);
+ atomic_set(&ctrl->irq_cnt, 0);
+#endif
+
+ return 0;
+
+err_global:
+ video_unregister_device(ctrl->vd);
+
+err_v4l2:
+ v4l2_device_unregister(&ctrl->v4l2_dev);
+
+err_fimc:
+ fimc_unregister_controller(pdev);
+
+err_alloc:
+ kfree(fimc_dev);
+ return -EINVAL;
+
+}
+
+static int fimc_remove(struct platform_device *pdev)
+{
+ fimc_unregister_controller(pdev);
+
+ device_remove_file(&(pdev->dev), &dev_attr_log_level);
+
+ kfree(fimc_dev);
+ fimc_dev = NULL;
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ pm_runtime_disable(&pdev->dev);
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static inline void fimc_suspend_out_ctx(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL: /* fall through */
+ case FIMC_OVLY_NONE_MULTI_BUF: /* fall through */
+ case FIMC_OVLY_NONE_SINGLE_BUF:
+ if (ctx->status == FIMC_STREAMON) {
+ if (ctx->inq[0] != -1)
+ fimc_err("%s : %d in queue unstable\n",
+ __func__, __LINE__);
+
+ fimc_outdev_stop_streaming(ctrl, ctx);
+ ctx->status = FIMC_ON_SLEEP;
+ } else if (ctx->status == FIMC_STREAMON_IDLE) {
+ fimc_outdev_stop_streaming(ctrl, ctx);
+ ctx->status = FIMC_ON_IDLE_SLEEP;
+ } else {
+ ctx->status = FIMC_OFF_SLEEP;
+ }
+
+ break;
+ case FIMC_OVLY_FIFO:
+ if (ctx->status == FIMC_STREAMON) {
+ if (ctx->inq[0] != -1)
+ fimc_err("%s: %d in queue unstable\n",
+ __func__, __LINE__);
+
+ if ((ctrl->out->idxs.next.idx != -1) ||
+ (ctrl->out->idxs.prev.idx != -1))
+ fimc_err("%s: %d FIMC unstable\n",
+ __func__, __LINE__);
+
+ fimc_outdev_stop_streaming(ctrl, ctx);
+ ctx->status = FIMC_ON_SLEEP;
+ } else {
+ ctx->status = FIMC_OFF_SLEEP;
+ }
+
+ break;
+ case FIMC_OVLY_NOT_FIXED:
+ ctx->status = FIMC_OFF_SLEEP;
+ break;
+ }
+}
+
+static inline int fimc_suspend_out(struct fimc_control *ctrl)
+{
+ struct fimc_ctx *ctx;
+ int i, on_sleep = 0, idle_sleep = 0, off_sleep = 0;
+
+ for (i = 0; i < FIMC_MAX_CTXS; i++) {
+ ctx = &ctrl->out->ctx[i];
+ fimc_suspend_out_ctx(ctrl, ctx);
+
+ switch (ctx->status) {
+ case FIMC_ON_SLEEP:
+ on_sleep++;
+ break;
+ case FIMC_ON_IDLE_SLEEP:
+ idle_sleep++;
+ break;
+ case FIMC_OFF_SLEEP:
+ off_sleep++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (on_sleep)
+ ctrl->status = FIMC_ON_SLEEP;
+ else if (idle_sleep)
+ ctrl->status = FIMC_ON_IDLE_SLEEP;
+ else
+ ctrl->status = FIMC_OFF_SLEEP;
+
+ ctrl->out->last_ctx = -1;
+
+ return 0;
+}
+
+static inline int fimc_suspend_cap(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+
+ fimc_dbg("%s\n", __func__);
+
+ if (ctrl->cam->id == CAMERA_WB || ctrl->cam->id == CAMERA_WB_B) {
+ fimc_dbg("%s\n", __func__);
+ ctrl->suspend_framecnt = fimc_hwget_output_buf_sequence(ctrl);
+ fimc_streamoff_capture((void *)ctrl);
+ fimc_info1("%s : framecnt_seq : %d\n",
+ __func__, ctrl->suspend_framecnt);
+ } else {
+ if (ctrl->id == FIMC0 && ctrl->cam->initialized) {
+ ctrl->cam->initialized = 0;
+
+ v4l2_subdev_call(ctrl->cam->sd, core, s_power, 0);
+
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(0);
+
+ /* shutdown the MCLK */
+ clk_disable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_OFF;
+ }
+ }
+ ctrl->power_status = FIMC_POWER_OFF;
+
+ return 0;
+}
+
+int fimc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ int id;
+
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+ pdata = to_fimc_plat(ctrl->dev);
+
+ if (ctrl->out)
+ fimc_suspend_out(ctrl);
+
+ else if (ctrl->cap)
+ fimc_suspend_cap(ctrl);
+ else
+ ctrl->status = FIMC_OFF_SLEEP;
+
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ if (atomic_read(&ctrl->in_use) && pdata->clk_off)
+ pdata->clk_off(pdev, &ctrl->clk);
+#endif
+
+ return 0;
+}
+
+int fimc_suspend_pd(struct device *dev)
+{
+ struct platform_device *pdev;
+ int ret;
+ pm_message_t state;
+
+ state.event = 0;
+ pdev = to_platform_device(dev);
+ ret = fimc_suspend(pdev, state);
+
+ return 0;
+}
+
+static inline void fimc_resume_out_ctx(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ int index = -1, ret = -1;
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_FIFO:
+ if (ctx->status == FIMC_ON_SLEEP) {
+ ctx->status = FIMC_READY_ON;
+
+ ret = fimc_outdev_set_ctx_param(ctrl, ctx);
+ if (ret < 0)
+ fimc_err("Fail: fimc_outdev_set_ctx_param\n");
+
+#if defined(CONFIG_VIDEO_IPC)
+ if (ctx->pix.field == V4L2_FIELD_INTERLACED_TB)
+ ipc_start();
+#endif
+ index = ctrl->out->idxs.active.idx;
+ fimc_outdev_set_src_addr(ctrl, ctx->src[index].base);
+
+ ret = fimc_start_fifo(ctrl, ctx);
+ if (ret < 0)
+ fimc_err("Fail: fimc_start_fifo\n");
+
+ ctx->status = FIMC_STREAMON;
+ } else if (ctx->status == FIMC_OFF_SLEEP) {
+ ctx->status = FIMC_STREAMOFF;
+ } else {
+ fimc_err("%s: Abnormal (%d)\n", __func__, ctx->status);
+ }
+
+ break;
+ case FIMC_OVLY_DMA_AUTO:
+ if (ctx->status == FIMC_ON_IDLE_SLEEP) {
+ fimc_outdev_resume_dma(ctrl, ctx);
+ ret = fimc_outdev_set_ctx_param(ctrl, ctx);
+ if (ret < 0)
+ fimc_err("Fail: fimc_outdev_set_ctx_param\n");
+
+ ctx->status = FIMC_STREAMON_IDLE;
+ } else if (ctx->status == FIMC_OFF_SLEEP) {
+ ctx->status = FIMC_STREAMOFF;
+ } else {
+ fimc_err("%s: Abnormal (%d)\n", __func__, ctx->status);
+ }
+
+ break;
+ case FIMC_OVLY_DMA_MANUAL:
+ if (ctx->status == FIMC_ON_IDLE_SLEEP) {
+ ret = fimc_outdev_set_ctx_param(ctrl, ctx);
+ if (ret < 0)
+ fimc_err("Fail: fimc_outdev_set_ctx_param\n");
+
+ ctx->status = FIMC_STREAMON_IDLE;
+
+ } else if (ctx->status == FIMC_OFF_SLEEP) {
+ ctx->status = FIMC_STREAMOFF;
+ } else {
+ fimc_err("%s: Abnormal (%d)\n", __func__, ctx->status);
+ }
+
+ break;
+ case FIMC_OVLY_NONE_SINGLE_BUF: /* fall through */
+ case FIMC_OVLY_NONE_MULTI_BUF:
+ if (ctx->status == FIMC_ON_IDLE_SLEEP) {
+ ret = fimc_outdev_set_ctx_param(ctrl, ctx);
+ if (ret < 0)
+ fimc_err("Fail: fimc_outdev_set_ctx_param\n");
+
+ ctx->status = FIMC_STREAMON_IDLE;
+ } else if (ctx->status == FIMC_OFF_SLEEP) {
+ ctx->status = FIMC_STREAMOFF;
+ } else {
+ fimc_err("%s: Abnormal (%d)\n", __func__, ctx->status);
+ }
+
+ break;
+ default:
+ ctx->status = FIMC_STREAMOFF;
+ break;
+ }
+}
+
+static inline int fimc_resume_out(struct fimc_control *ctrl)
+{
+ struct fimc_ctx *ctx;
+ int i;
+ u32 state = 0;
+ u32 timeout;
+ u32 tmp;
+ struct s3c_platform_fimc *pdata;
+
+ pdata = to_fimc_plat(ctrl->dev);
+
+ tmp = __raw_readl(S5P_PMU_CAM_CONF + 0x4) & S5P_INT_LOCAL_PWR_EN;
+
+ if (tmp != S5P_INT_LOCAL_PWR_EN) {
+ __raw_writel(S5P_INT_LOCAL_PWR_EN, S5P_PMU_CAM_CONF);
+
+ /* Wait max 1ms */
+ timeout = 10;
+ while ((__raw_readl(S5P_PMU_CAM_CONF + 0x4) & S5P_INT_LOCAL_PWR_EN)
+ != S5P_INT_LOCAL_PWR_EN) {
+ if (timeout == 0) {
+ printk(KERN_ERR "Power domain CAM enable failed.\n");
+ break;
+ }
+ timeout--;
+ udelay(100);
+ }
+ }
+
+ for (i = 0; i < FIMC_MAX_CTXS; i++) {
+ ctx = &ctrl->out->ctx[i];
+
+ if (pdata->clk_on) {
+ pdata->clk_on(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ }
+
+ fimc_resume_out_ctx(ctrl, ctx);
+
+ if (pdata->clk_off) {
+ pdata->clk_off(to_platform_device(ctrl->dev),
+ &ctrl->clk);
+ }
+
+ switch (ctx->status) {
+ case FIMC_STREAMON:
+ state |= FIMC_STREAMON;
+ break;
+ case FIMC_STREAMON_IDLE:
+ state |= FIMC_STREAMON_IDLE;
+ break;
+ case FIMC_STREAMOFF:
+ state |= FIMC_STREAMOFF;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (tmp != S5P_INT_LOCAL_PWR_EN) {
+ __raw_writel(0, S5P_PMU_CAM_CONF);
+
+ /* Wait max 1ms */
+ timeout = 10;
+ while (__raw_readl(S5P_PMU_CAM_CONF + 0x4) & S5P_INT_LOCAL_PWR_EN) {
+ if (timeout == 0) {
+ printk(KERN_ERR "Power domain CAM disable failed.\n");
+ break;
+ }
+ timeout--;
+ udelay(100);
+ }
+ }
+
+ if ((state & FIMC_STREAMON) == FIMC_STREAMON)
+ ctrl->status = FIMC_STREAMON;
+ else if ((state & FIMC_STREAMON_IDLE) == FIMC_STREAMON_IDLE)
+ ctrl->status = FIMC_STREAMON_IDLE;
+ else
+ ctrl->status = FIMC_STREAMOFF;
+
+ return 0;
+}
+
+static inline int fimc_resume_cap(struct fimc_control *ctrl)
+{
+ struct fimc_global *fimc = get_fimc_dev();
+ int tmp;
+ u32 timeout;
+
+ fimc_dbg("%s\n", __func__);
+
+ __raw_writel(S5P_INT_LOCAL_PWR_EN, S5P_PMU_CAM_CONF);
+ /* Wait max 1ms */
+ timeout = 10;
+ while ((__raw_readl(S5P_PMU_CAM_CONF + 0x4) & S5P_INT_LOCAL_PWR_EN)
+ != S5P_INT_LOCAL_PWR_EN) {
+ if (timeout == 0) {
+ printk(KERN_ERR "Power domain CAM enable failed.\n");
+ break;
+ }
+ timeout--;
+ udelay(100);
+ }
+
+ if (ctrl->cam->id == CAMERA_WB || ctrl->cam->id == CAMERA_WB_B) {
+ fimc_info1("%s : framecnt_seq : %d\n",
+ __func__, ctrl->suspend_framecnt);
+ fimc_hwset_output_buf_sequence_all(ctrl,
+ ctrl->suspend_framecnt);
+ tmp = fimc_hwget_output_buf_sequence(ctrl);
+ fimc_info1("%s : real framecnt_seq : %d\n", __func__, tmp);
+
+ fimc_streamon_capture((void *)ctrl);
+ } else {
+ if (ctrl->id == FIMC0 && ctrl->cam->initialized == 0) {
+ clk_set_rate(ctrl->cam->clk, ctrl->cam->clk_rate);
+ clk_enable(ctrl->cam->clk);
+ fimc->mclk_status = CAM_MCLK_ON;
+ fimc_info1("clock for camera: %d\n", ctrl->cam->clk_rate);
+
+ if (ctrl->cam->cam_power)
+ ctrl->cam->cam_power(1);
+
+ v4l2_subdev_call(ctrl->cam->sd, core, s_power, 1);
+
+ ctrl->cam->initialized = 1;
+ }
+ }
+ /* fimc_streamon_capture((void *)ctrl); */
+ ctrl->power_status = FIMC_POWER_ON;
+
+ return 0;
+}
+
+int fimc_resume(struct platform_device *pdev)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ int id = pdev->id;
+
+ ctrl = get_fimc_ctrl(id);
+ pdata = to_fimc_plat(ctrl->dev);
+
+ if (atomic_read(&ctrl->in_use) && pdata->clk_on)
+ pdata->clk_on(pdev, &ctrl->clk);
+
+ if (ctrl->out)
+ fimc_resume_out(ctrl);
+
+ else if (ctrl->cap)
+ fimc_resume_cap(ctrl);
+ else
+ ctrl->status = FIMC_STREAMOFF;
+
+ return 0;
+}
+
+int fimc_resume_pd(struct device *dev)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = to_platform_device(dev);
+ ret = fimc_resume(pdev);
+ return 0;
+}
+
+
+#else
+#define fimc_suspend NULL
+#define fimc_resume NULL
+#endif
+
+static int fimc_runtime_suspend_out(struct fimc_control *ctrl)
+{
+ struct s3c_platform_fimc *pdata;
+ int ret;
+
+ pdata = to_fimc_plat(ctrl->dev);
+
+ if (pdata->clk_off) {
+ ret = pdata->clk_off(to_platform_device(ctrl->dev), &ctrl->clk);
+ if (ret == 0)
+ ctrl->power_status = FIMC_POWER_OFF;
+ }
+
+ return 0;
+}
+static int fimc_runtime_suspend_cap(struct fimc_control *ctrl)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+ struct clk *pxl_async = NULL;
+ int ret = 0;
+ fimc_dbg("%s FIMC%d\n", __func__, ctrl->id);
+
+ ctrl->power_status = FIMC_POWER_SUSPEND;
+
+ if (ctrl->cap && (ctrl->status != FIMC_STREAMOFF)) {
+ fimc_streamoff_capture((void *)ctrl);
+ ctrl->status = FIMC_STREAMOFF;
+ }
+
+ if (pdata->clk_off) {
+ ret = pdata->clk_off(pdev, &ctrl->clk);
+ if (ret == 0)
+ ctrl->power_status = FIMC_POWER_OFF;
+ }
+
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ctrl->cam->id == CAMERA_WB) {
+ fimc_info1("%s : writeback 0 suspend\n", __func__);
+ pxl_async = clk_get(&pdev->dev, "pxl_async0");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_disable(pxl_async);
+ clk_put(pxl_async);
+ } else if (ctrl->cam->id == CAMERA_WB_B) {
+ fimc_info1("%s : writeback 1 suspend\n", __func__);
+ pxl_async = clk_get(&pdev->dev, "pxl_async1");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_disable(pxl_async);
+ clk_put(pxl_async);
+ }
+
+
+ return 0;
+}
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_control *ctrl;
+ struct platform_device *pdev;
+ int id;
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ fimc_dbg("%s FIMC%d\n", __func__, ctrl->id);
+
+ if (ctrl->out) {
+ fimc_info1("%s: fimc m2m\n", __func__);
+ fimc_runtime_suspend_out(ctrl);
+ } else if (ctrl->cap) {
+ fimc_info1("%s: fimc capture\n", __func__);
+ fimc_runtime_suspend_cap(ctrl);
+ } else
+ fimc_err("%s : invalid fimc control\n", __func__);
+
+ fimc_dev->backup_regs[id] = ctrl->regs;
+ ctrl->regs = NULL;
+
+ return 0;
+}
+
+static int fimc_runtime_resume_cap(struct fimc_control *ctrl)
+{
+ struct platform_device *pdev = to_platform_device(ctrl->dev);
+ struct clk *pxl_async = NULL;
+ fimc_dbg("%s\n", __func__);
+
+ if (!ctrl->cam) {
+ fimc_err("%s: No capture device.\n", __func__);
+ return -ENODEV;
+ }
+
+ if (ctrl->cam->id == CAMERA_WB) {
+ fimc_info1("%s : writeback 0 resume\n", __func__);
+ pxl_async = clk_get(&pdev->dev, "pxl_async0");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_enable(pxl_async);
+ clk_put(pxl_async);
+ } else if (ctrl->cam->id == CAMERA_WB_B) {
+ fimc_info1("%s : writeback 1 resume\n", __func__);
+ pxl_async = clk_get(&pdev->dev, "pxl_async1");
+ if (IS_ERR(pxl_async)) {
+ dev_err(&pdev->dev, "failed to get pxl_async\n");
+ return -ENODEV;
+ }
+
+ clk_enable(pxl_async);
+ clk_put(pxl_async);
+ }
+
+ return 0;
+}
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_control *ctrl;
+ struct s3c_platform_fimc *pdata;
+ struct platform_device *pdev;
+ int id, ret = 0;
+
+ pdev = to_platform_device(dev);
+ id = pdev->id;
+ ctrl = get_fimc_ctrl(id);
+
+ ctrl->regs = fimc_dev->backup_regs[id];
+ fimc_dev->backup_regs[id] = NULL;
+
+ fimc_dbg("%s\n", __func__);
+
+ pdata = to_fimc_plat(ctrl->dev);
+ if (pdata->clk_on) {
+ ret = pdata->clk_on(pdev, &ctrl->clk);
+ if (ret == 0)
+ ctrl->power_status = FIMC_POWER_ON;
+ }
+
+ /* if status is FIMC_PROBE, not need to know differlence of out or
+ * cap */
+
+ if (ctrl->out) {
+ /* do not need to sub function in m2m mode */
+ fimc_info1("%s: fimc m2m\n", __func__);
+ } else if (ctrl->cap) {
+ fimc_info1("%s: fimc cap\n", __func__);
+ fimc_runtime_resume_cap(ctrl);
+ } else {
+ fimc_err("%s: runtime resume error\n", __func__);
+ }
+ return 0;
+}
+static const struct dev_pm_ops fimc_pm_ops = {
+ .suspend = fimc_suspend_pd,
+ .resume = fimc_resume_pd,
+ .runtime_suspend = fimc_runtime_suspend,
+ .runtime_resume = fimc_runtime_resume,
+};
+
+static struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = fimc_remove,
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ .suspend = fimc_suspend,
+ .resume = fimc_resume,
+#endif
+ .driver = {
+ .name = FIMC_NAME,
+ .owner = THIS_MODULE,
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ .pm = &fimc_pm_ops,
+#else
+ .pm = NULL,
+#endif
+
+ },
+};
+
+static int fimc_register(void)
+{
+ platform_driver_register(&fimc_driver);
+
+ return 0;
+}
+
+static void fimc_unregister(void)
+{
+ platform_driver_unregister(&fimc_driver);
+}
+
+late_initcall(fimc_register);
+module_exit(fimc_unregister);
+
+MODULE_AUTHOR("Dongsoo, Kim <dongsoo45.kim@samsung.com>");
+MODULE_AUTHOR("Jinsung, Yang <jsgood.yang@samsung.com>");
+MODULE_AUTHOR("Jonghun, Han <jonghun.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung Camera Interface (FIMC) driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/fimc/fimc_output.c b/drivers/media/video/samsung/fimc/fimc_output.c
new file mode 100644
index 0000000..895cb7f
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc_output.c
@@ -0,0 +1,3274 @@
+/* linux/drivers/media/video/samsung/fimc/fimc_output.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * V4L2 Output device support file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/videodev2.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/videodev2_exynos_camera.h>
+#include <media/videobuf-core.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/mman.h>
+#include <plat/media.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include "fimc.h"
+#include "fimc-ipc.h"
+
+static __u32 fimc_get_pixel_format_type(__u32 pixelformat)
+{
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_RGB565:
+ return FIMC_RGB;
+
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV12M:
+ case V4L2_PIX_FMT_NV12T:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ return FIMC_YUV420;
+
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_YUV422P:
+ return FIMC_YUV422;
+
+ default:
+ return FIMC_YUV444;
+ }
+}
+
+void fimc_outdev_set_src_addr(struct fimc_control *ctrl, dma_addr_t *base)
+{
+ if (ctrl && (ctrl->regs == NULL)) {
+ fimc_dbg("%s FIMC%d power is off: skip to set config\n",
+ __func__, ctrl->id);
+ return;
+ }
+ fimc_hwset_addr_change_disable(ctrl);
+ fimc_hwset_input_address(ctrl, base);
+ fimc_hwset_addr_change_enable(ctrl);
+}
+
+int fimc_outdev_start_camif(void *param)
+{
+ struct fimc_control *ctrl = (struct fimc_control *)param;
+
+ fimc_hwset_start_scaler(ctrl);
+ fimc_hwset_enable_capture(ctrl, 0); /* bypass disable */
+ fimc_hwset_start_input_dma(ctrl);
+
+ return 0;
+}
+
+static int fimc_outdev_stop_camif(void *param)
+{
+ struct fimc_control *ctrl = (struct fimc_control *)param;
+
+ if (ctrl->regs == 0) {
+ fimc_err("%s: power offed", __func__);
+ return 0;
+ }
+ fimc_hwset_stop_input_dma(ctrl);
+ fimc_hwset_disable_autoload(ctrl);
+ fimc_hwset_stop_scaler(ctrl);
+ fimc_hwset_disable_capture(ctrl);
+
+ return 0;
+}
+
+static int fimc_outdev_stop_fifo(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ int ret = -1;
+
+ fimc_dbg("%s: called\n", __func__);
+
+ if (pdata->hw_ver == 0x40) { /* to support C100 */
+ ret = ctrl->fb.close_fifo(ctrl->id, fimc_outdev_stop_camif,
+ (void *)ctrl);
+ if (ret < 0)
+ fimc_err("FIMD FIFO close fail\n");
+ } else if ((pdata->hw_ver == 0x43) || (pdata->hw_ver >= 0x50)) {
+ ret = ctrl->fb.close_fifo(ctrl->id, NULL, NULL);
+ if (ret < 0)
+ fimc_err("FIMD FIFO close fail\n");
+ fimc_hw_wait_winoff(ctrl);
+ fimc_outdev_stop_camif(ctrl);
+ fimc_hw_wait_stop_input_dma(ctrl);
+#if defined(CONFIG_VIDEO_IPC)
+ if (ctx->pix.field == V4L2_FIELD_INTERLACED_TB)
+ ipc_stop();
+#endif
+ }
+
+ return 0;
+}
+
+int fimc_outdev_stop_streaming(struct fimc_control *ctrl, struct fimc_ctx *ctx)
+{
+ int ret = 0;
+
+ fimc_dbg("%s: called\n", __func__);
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_FIFO:
+ ctx->status = FIMC_READY_OFF;
+ fimc_outdev_stop_fifo(ctrl, ctx);
+ break;
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL:
+ if (ctx->status == FIMC_STREAMON_IDLE)
+ ctx->status = FIMC_STREAMOFF;
+ else
+ ctx->status = FIMC_READY_OFF;
+ break;
+ case FIMC_OVLY_NONE_SINGLE_BUF: /* fall through */
+ case FIMC_OVLY_NONE_MULTI_BUF:
+ if (ctx->status == FIMC_STREAMON_IDLE)
+ ctx->status = FIMC_STREAMOFF;
+ else
+ ctx->status = FIMC_READY_OFF;
+
+ ret = wait_event_timeout(ctrl->wq,
+ (ctx->status == FIMC_STREAMOFF),
+ FIMC_ONESHOT_TIMEOUT);
+ if (ret == 0) {
+ if (ctrl->out == NULL) {
+ fimc_err("%s: ctrl->out is changed to null\n",
+ __func__);
+ return -EINVAL;
+ }
+ fimc_dump_context(ctrl, ctx);
+ fimc_err("fail %s: %d\n", __func__, ctx->ctx_num);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int fimc_outdev_dma_auto_dst_resize(struct v4l2_rect *org)
+{
+ if (org->width % 16)
+ org->width = org->width + 16 - (org->width % 16);
+
+ return 0;
+}
+
+int fimc_outdev_resume_dma(struct fimc_control *ctrl, struct fimc_ctx *ctx)
+{
+ struct v4l2_rect fimd_rect, fimd_rect_virtual;
+ struct fb_var_screeninfo var;
+ struct s3cfb_user_window window;
+ int ret = -1, idx;
+ u32 id = ctrl->id;
+
+ memset(&fimd_rect, 0, sizeof(struct v4l2_rect));
+ ret = fimc_fimd_rect(ctrl, ctx, &fimd_rect);
+ if (ret < 0) {
+ fimc_err("fimc_fimd_rect fail\n");
+ return -EINVAL;
+ }
+
+ /* Support any size */
+ memcpy(&fimd_rect_virtual, &fimd_rect, sizeof(fimd_rect));
+ fimc_outdev_dma_auto_dst_resize(&fimd_rect_virtual);
+
+ /* Get WIN var_screeninfo */
+ ret = s3cfb_direct_ioctl(id, FBIOGET_VSCREENINFO,
+ (unsigned long)&var);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(FBIOGET_VSCREENINFO) fail\n");
+ return -EINVAL;
+ }
+
+ /* window path : DMA */
+ ret = s3cfb_direct_ioctl(id, S3CFB_SET_WIN_PATH, DATA_PATH_DMA);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_PATH) fail\n");
+ return -EINVAL;
+ }
+
+ /* Don't allocate the memory. */
+ ret = s3cfb_direct_ioctl(id, S3CFB_SET_WIN_MEM, DMA_MEM_OTHER);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_MEM) fail\n");
+ return -EINVAL;
+ }
+
+ /* Update WIN size */
+ var.xres_virtual = fimd_rect_virtual.width;
+ var.yres_virtual = fimd_rect_virtual.height;
+ var.xres = fimd_rect.width;
+ var.yres = fimd_rect.height;
+
+ ret = s3cfb_direct_ioctl(id, FBIOPUT_VSCREENINFO,
+ (unsigned long)&var);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(FBIOPUT_VSCREENINFO) fail\n");
+ return -EINVAL;
+ }
+
+ /* Update WIN position */
+ window.x = fimd_rect.left;
+ window.y = fimd_rect.top;
+ ret = s3cfb_direct_ioctl(id, S3CFB_WIN_POSITION,
+ (unsigned long)&window);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_WIN_POSITION) fail\n");
+ return -EINVAL;
+ }
+
+ idx = ctx->outq[0];
+ if (idx == -1) {
+ fimc_err("out going queue is empty.\n");
+ return -EINVAL;
+ }
+
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_ADDR,
+ (unsigned long)ctx->dst[idx].base[FIMC_ADDR_Y]);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_ADDR) fail\n");
+ return -EINVAL;
+ }
+
+#if 0
+ /* Remarked for better screen display
+ * when dynamic screen size change is requested
+ */
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_ON,
+ (unsigned long)NULL);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_ON) fail\n");
+ return -EINVAL;
+ }
+#endif
+
+ ctrl->fb.is_enable = 1;
+
+ return 0;
+}
+
+static void fimc_init_out_buf(struct fimc_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->src[i].state = VIDEOBUF_IDLE;
+ ctx->src[i].flags = 0x0;
+
+ ctx->inq[i] = -1;
+ ctx->outq[i] = -1;
+ }
+}
+
+static int fimc_outdev_set_src_buf(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ u32 width = ctx->pix.width;
+ u32 height = ctx->pix.height;
+ u32 format = ctx->pix.pixelformat;
+ u32 y_size = width * height;
+ u32 cb_size = 0, cr_size = 0;
+ u32 i, size;
+ dma_addr_t *curr = &ctrl->mem.curr;
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ int err;
+ struct cma_info mem_info;
+#endif
+
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ size = PAGE_ALIGN(y_size << 2);
+ break;
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YUYV:
+ size = PAGE_ALIGN(y_size << 1);
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ cb_size = y_size >> 2;
+ cr_size = y_size >> 2;
+ size = PAGE_ALIGN(y_size + cb_size + cr_size);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ cb_size = y_size >> 1;
+ size = PAGE_ALIGN(y_size + cb_size);
+ break;
+ case V4L2_PIX_FMT_NV12T:
+ fimc_get_nv12t_size(width, height, &y_size, &cb_size);
+ size = PAGE_ALIGN(y_size + cb_size);
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n", __func__, format);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ ctrl->mem.size = size * FIMC_OUTBUFS;
+ err = cma_info(&mem_info, ctrl->dev, 0);
+ printk(KERN_DEBUG "%s : [cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x req_size : 0x%x\n",
+ __func__, mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size,
+ ctrl->mem.size);
+ if (err || (mem_info.free_size < ctrl->mem.size)) {
+ fimc_err("%s: get cma info failed\n", __func__);
+ ctrl->mem.size = 0;
+ ctrl->mem.base = 0;
+ return -ENOMEM;
+ } else {
+ ctrl->mem.base = (dma_addr_t)cma_alloc
+ (ctrl->dev, ctrl->cma_name, (size_t)ctrl->mem.size, 0);
+ }
+
+ fimc_dbg("%s : ctrl->mem.base = 0x%x\n", __func__, ctrl->mem.base);
+ fimc_dbg("%s : ctrl->mem.size = 0x%x\n", __func__, ctrl->mem.size);
+
+ ctrl->mem.curr = ctrl->mem.base;
+#endif
+
+ if ((*curr + size * FIMC_OUTBUFS) > (ctrl->mem.base + ctrl->mem.size)) {
+ fimc_err("%s: Reserved memory is not sufficient\n", __func__);
+ fimc_err("ctrl->mem.base = 0x%x\n", ctrl->mem.base);
+ fimc_err("ctrl->mem.size = 0x%x\n", ctrl->mem.size);
+ fimc_err("*curr = 0x%x\n", *curr);
+ fimc_err("size = 0x%x\n", size);
+ fimc_err("FIMC_OUTBUFS = 0x%x\n", FIMC_OUTBUFS);
+ return -EINVAL;
+ }
+
+ /* Initialize source buffer addr */
+ switch (format) {
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_RGB32: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YUYV:
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->src[i].base[FIMC_ADDR_Y] = *curr;
+ ctx->src[i].length[FIMC_ADDR_Y] = size;
+ ctx->src[i].base[FIMC_ADDR_CB] = 0;
+ ctx->src[i].length[FIMC_ADDR_CB] = 0;
+ ctx->src[i].base[FIMC_ADDR_CR] = 0;
+ ctx->src[i].length[FIMC_ADDR_CR] = 0;
+ *curr += size;
+ }
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->src[i].base[FIMC_ADDR_Y] = *curr;
+ ctx->src[i].length[FIMC_ADDR_Y] = y_size;
+ ctx->src[i].base[FIMC_ADDR_CB] = *curr + y_size;
+ ctx->src[i].length[FIMC_ADDR_CB] = cb_size;
+ ctx->src[i].base[FIMC_ADDR_CR] = 0;
+ ctx->src[i].length[FIMC_ADDR_CR] = 0;
+ *curr += size;
+ }
+ break;
+ case V4L2_PIX_FMT_NV12T:
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->src[i].base[FIMC_ADDR_Y] = *curr;
+ ctx->src[i].base[FIMC_ADDR_CB] = *curr + y_size;
+ ctx->src[i].length[FIMC_ADDR_Y] = y_size;
+ ctx->src[i].length[FIMC_ADDR_CB] = cb_size;
+ ctx->src[i].base[FIMC_ADDR_CR] = 0;
+ ctx->src[i].length[FIMC_ADDR_CR] = 0;
+ *curr += size;
+ }
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->src[i].base[FIMC_ADDR_Y] = *curr;
+ ctx->src[i].base[FIMC_ADDR_CB] = *curr + y_size;
+ ctx->src[i].base[FIMC_ADDR_CR] = *curr + y_size + cb_size;
+ ctx->src[i].length[FIMC_ADDR_Y] = y_size;
+ ctx->src[i].length[FIMC_ADDR_CB] = cb_size;
+ ctx->src[i].length[FIMC_ADDR_CR] = cr_size;
+ *curr += size;
+ }
+ break;
+
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n", __func__, format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_outdev_set_dst_buf(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ dma_addr_t *curr = &ctrl->mem.curr;
+ dma_addr_t end;
+ u32 width = ctrl->fb.lcd_hres;
+ u32 height = ctrl->fb.lcd_vres;
+ u32 i, size;
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ int err;
+ struct cma_info mem_info;
+#endif
+
+ end = ctrl->mem.base + ctrl->mem.size;
+ size = PAGE_ALIGN(width * height * 4);
+
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ ctrl->mem.size = size * FIMC_OUTBUFS;
+ err = cma_info(&mem_info, ctrl->dev, 0);
+ printk(KERN_DEBUG "%s : [cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x req_size=0x%x\n",
+ __func__, mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size,
+ ctrl->mem.size);
+ if (err || (mem_info.free_size < ctrl->mem.size)) {
+ fimc_err("%s: get cma info failed\n", __func__);
+ ctrl->mem.size = 0;
+ ctrl->mem.base = 0;
+ return -ENOMEM;
+ } else {
+ ctrl->mem.base = (dma_addr_t)cma_alloc
+ (ctrl->dev, ctrl->cma_name,
+ (size_t) (size * FIMC_OUTBUFS), 0);
+ }
+
+ fimc_dbg("%s : ctrl->mem.base = 0x%x\n", __func__, ctrl->mem.base);
+ fimc_dbg("%s : ctrl->mem.size = 0x%x\n", __func__, ctrl->mem.size);
+
+ end = ctrl->mem.base + (size * FIMC_OUTBUFS);
+ ctrl->mem.curr = ctrl->mem.base;
+#endif
+
+ if ((*curr + (size * FIMC_OUTBUFS)) > end) {
+ fimc_err("%s: Reserved memory is not sufficient\n", __func__);
+ fimc_err("ctrl->mem.base = 0x%x\n", ctrl->mem.base);
+ fimc_err("ctrl->mem.size = 0x%x\n", ctrl->mem.size);
+ fimc_err("*curr = 0x%x\n", *curr);
+ fimc_err("size = 0x%x\n", size);
+ fimc_err("FIMC_OUTBUFS = 0x%x\n", FIMC_OUTBUFS);
+ return -EINVAL;
+ }
+
+ /* Initialize destination buffer addr */
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->dst[i].base[FIMC_ADDR_Y] = *curr;
+ ctx->dst[i].length[FIMC_ADDR_Y] = size;
+ ctx->dst[i].base[FIMC_ADDR_CB] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CB] = 0;
+ ctx->dst[i].base[FIMC_ADDR_CR] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CR] = 0;
+ *curr += size;
+ }
+
+ return 0;
+}
+
+static int fimc_set_rot_degree(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ int degree)
+{
+ switch (degree) {
+ case 0: /* fall through */
+ case 90: /* fall through */
+ case 180: /* fall through */
+ case 270:
+ ctx->rotate = degree;
+ break;
+
+ default:
+ fimc_err("Invalid rotate value : %d\n", degree);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int fimc_outdev_check_param(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct v4l2_rect dst, bound;
+ u32 rot = 0;
+ int ret = 0, i, exclusive = 0;
+
+ rot = fimc_mapping_rot_flip(ctx->rotate, ctx->flip);
+ dst.top = ctx->win.w.top;
+ dst.left = ctx->win.w.left;
+ dst.width = ctx->win.w.width;
+ dst.height = ctx->win.w.height;
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_FIFO: /* fall through */
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL:
+ if (rot & FIMC_ROT) {
+ bound.width = ctrl->fb.lcd_vres;
+ bound.height = ctrl->fb.lcd_hres;
+ } else {
+ bound.width = ctrl->fb.lcd_hres;
+ bound.height = ctrl->fb.lcd_vres;
+ }
+ break;
+ case FIMC_OVLY_NONE_SINGLE_BUF: /* fall through */
+ case FIMC_OVLY_NONE_MULTI_BUF:
+ bound.width = ctx->fbuf.fmt.width;
+ bound.height = ctx->fbuf.fmt.height;
+ break;
+
+ default:
+ fimc_err("%s: invalid ovelay mode.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((dst.left + dst.width) > bound.width) {
+ fimc_err("Horizontal position setting is failed\n");
+ fimc_err("\tleft = %d, width = %d, bound width = %d,\n",
+ dst.left, dst.width, bound.width);
+ ret = -EINVAL;
+ } else if ((dst.top + dst.height) > bound.height) {
+ fimc_err("Vertical position setting is failed\n");
+ fimc_err("\ttop = %d, height = %d, bound height = %d,\n",
+ dst.top, dst.height, bound.height);
+ ret = -EINVAL;
+ }
+
+ if ((ctx->status != FIMC_STREAMOFF) &&
+ (ctx->status != FIMC_READY_ON) &&
+ (ctx->status != FIMC_STREAMON_IDLE) &&
+ (ctx->status != FIMC_ON_IDLE_SLEEP)) {
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ /* check other open instance */
+ for (i = 0; i < FIMC_MAX_CTXS; i++) {
+ switch (ctrl->out->ctx[i].overlay.mode) {
+ case FIMC_OVLY_FIFO: /* fall through */
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL:
+ exclusive++;
+ break;
+ case FIMC_OVLY_NONE_SINGLE_BUF: /* fall through */
+ case FIMC_OVLY_NONE_MULTI_BUF: /* fall through */
+ case FIMC_OVLY_NOT_FIXED:
+ break;
+ }
+ }
+
+ if (exclusive > 1) {
+ for (i = 0; i < FIMC_MAX_CTXS; i++)
+ fimc_err("%s: ctx %d mode = %d", __func__, i,
+ ctrl->out->ctx[i].overlay.mode);
+ return -EBUSY;
+ }
+
+ return ret;
+}
+
+static void fimc_outdev_set_src_format(struct fimc_control *ctrl,
+ u32 pixfmt, enum v4l2_field field)
+{
+ fimc_hwset_input_burst_cnt(ctrl, 4);
+ fimc_hwset_input_colorspace(ctrl, pixfmt);
+ fimc_hwset_input_yuv(ctrl, pixfmt);
+ fimc_hwset_input_rgb(ctrl, pixfmt);
+ fimc_hwset_intput_field(ctrl, field);
+ fimc_hwset_ext_rgb(ctrl, 1);
+ fimc_hwset_input_addr_style(ctrl, pixfmt);
+}
+
+static void fimc_outdev_set_dst_format(struct fimc_control *ctrl,
+ struct v4l2_pix_format *pixfmt)
+{
+ fimc_hwset_output_colorspace(ctrl, pixfmt->pixelformat);
+ fimc_hwset_output_yuv(ctrl, pixfmt->pixelformat);
+ fimc_hwset_output_rgb(ctrl, pixfmt->pixelformat);
+ fimc_hwset_output_scan(ctrl, pixfmt);
+ fimc_hwset_output_addr_style(ctrl, pixfmt->pixelformat);
+}
+
+static void fimc_outdev_set_format(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct v4l2_pix_format pixfmt;
+ memset(&pixfmt, 0, sizeof(pixfmt));
+
+ fimc_outdev_set_src_format(ctrl, ctx->pix.pixelformat, ctx->pix.field);
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_FIFO: /* fall through */
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL: /* Non-destructive overlay mode */
+ if (ctx->pix.field == V4L2_FIELD_NONE) {
+ pixfmt.pixelformat = V4L2_PIX_FMT_RGB32;
+ pixfmt.field = V4L2_FIELD_NONE;
+ } else if (ctx->pix.field == V4L2_FIELD_INTERLACED_TB) {
+ pixfmt.pixelformat = V4L2_PIX_FMT_YUV444;
+ pixfmt.field = V4L2_FIELD_INTERLACED_TB;
+ } else if (ctx->pix.field == V4L2_FIELD_ANY) {
+ pixfmt.pixelformat = V4L2_PIX_FMT_RGB32;
+ pixfmt.field = V4L2_FIELD_NONE;
+ }
+
+ break;
+ case FIMC_OVLY_NONE_SINGLE_BUF: /* fall through */
+ case FIMC_OVLY_NONE_MULTI_BUF: /* Destructive overlay mode */
+ pixfmt.pixelformat = ctx->fbuf.fmt.pixelformat;
+ pixfmt.field = V4L2_FIELD_NONE;
+
+ break;
+ default:
+ fimc_err("Invalid overlay mode %d\n", ctx->overlay.mode);
+ break;
+ }
+
+ fimc_outdev_set_dst_format(ctrl, &pixfmt);
+}
+
+static void fimc_outdev_set_path(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ /* source path */
+ fimc_hwset_input_source(ctrl, FIMC_SRC_MSDMA);
+
+ if (ctx->overlay.mode == FIMC_OVLY_FIFO) {
+ fimc_hwset_enable_lcdfifo(ctrl);
+ fimc_hwset_enable_autoload(ctrl);
+ } else {
+ fimc_hwset_disable_lcdfifo(ctrl);
+ fimc_hwset_disable_autoload(ctrl);
+ }
+}
+
+static void fimc_outdev_set_rot(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ u32 rot = ctx->rotate;
+ u32 flip = ctx->flip;
+
+ if (ctx->overlay.mode == FIMC_OVLY_FIFO) {
+ fimc_hwset_input_rot(ctrl, rot, flip);
+ fimc_hwset_input_flip(ctrl, rot, flip);
+ fimc_hwset_output_rot_flip(ctrl, 0, 0);
+ } else {
+ fimc_hwset_input_rot(ctrl, 0, 0);
+ fimc_hwset_input_flip(ctrl, 0, 0);
+ fimc_hwset_output_rot_flip(ctrl, rot, flip);
+ }
+}
+
+static void fimc_outdev_set_src_dma_offset(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct v4l2_rect bound, crop;
+ u32 pixfmt = ctx->pix.pixelformat;
+
+ bound.width = ctx->pix.width;
+ bound.height = ctx->pix.height;
+
+ crop.left = ctx->crop.left;
+ crop.top = ctx->crop.top;
+ crop.width = ctx->crop.width;
+ crop.height = ctx->crop.height;
+
+ fimc_hwset_input_offset(ctrl, pixfmt, &bound, &crop);
+}
+
+static int fimc4x_outdev_check_src_size(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ struct v4l2_rect *real,
+ struct v4l2_rect *org)
+{
+ u32 rot = ctx->rotate;
+
+ if ((ctx->overlay.mode == FIMC_OVLY_FIFO) &&
+ ((rot == 90) || (rot == 270))) {
+ /* Input Rotator */
+ if (real->height % 16) {
+ fimc_err("SRC Real_H(%d): multiple of 16 !\n",
+ real->height);
+ return -EINVAL;
+ }
+
+ if (ctx->sc.pre_hratio) {
+ if (real->height % (ctx->sc.pre_hratio * 4)) {
+ fimc_err("SRC Real_H(%d): multiple of "
+ "4*pre_hratio(%d)\n",
+ real->height,
+ ctx->sc.pre_hratio);
+ return -EINVAL;
+ }
+ }
+
+ if (ctx->sc.pre_vratio) {
+ if (real->width % ctx->sc.pre_vratio) {
+ fimc_err("SRC Real_W(%d): multiple of "
+ "pre_vratio(%d)\n",
+ real->width,
+ ctx->sc.pre_vratio);
+ return -EINVAL;
+ }
+ }
+
+ if (real->height < 16) {
+ fimc_err("SRC Real_H(%d): Min 16\n", real->height);
+ return -EINVAL;
+ }
+ if (real->width < 8) {
+ fimc_err("SRC Real_W(%d): Min 8\n", real->width);
+ return -EINVAL;
+ }
+ } else {
+ /* No Input Rotator */
+ if (real->height < 8) {
+ fimc_err("SRC Real_H(%d): Min 8\n", real->height);
+ return -EINVAL;
+ }
+
+ if (real->width < 16) {
+ fimc_err("SRC Real_W(%d): Min 16\n", real->width);
+ return -EINVAL;
+ }
+
+ if (real->width > ctrl->limit->real_w_no_rot) {
+ fimc_err("SRC REAL_W(%d): Real_W <= %d\n", real->width,
+ ctrl->limit->real_w_no_rot);
+ return -EINVAL;
+ }
+ }
+
+ if (org->height < real->height) {
+ fimc_err("SRC Org_H(%d): larger than Real_H(%d)\n",
+ org->height, real->height);
+ return -EINVAL;
+ }
+
+ if (org->width < real->width) {
+ fimc_err("SRC Org_W: Org_W(%d) >= Real_W(%d)\n", org->width,
+ real->width);
+ return -EINVAL;
+ }
+
+ if (ctx->sc.pre_vratio) {
+ if (real->height % ctx->sc.pre_vratio) {
+ fimc_err("SRC Real_H(%d): multi of pre_vratio(%d)!\n",
+ real->height, ctx->sc.pre_vratio);
+ return -EINVAL;
+ }
+ }
+
+ if (real->width % 16) {
+ fimc_err("SRC Real_W(%d): multiple of 16 !\n", real->width);
+ return -EINVAL;
+ }
+
+ if (ctx->sc.pre_hratio) {
+ if (real->width % (ctx->sc.pre_hratio * 4)) {
+ fimc_err("SRC Real_W(%d): "
+ "multiple of 4 * pre_hratio(%d)!\n",
+ real->width, ctx->sc.pre_hratio);
+ return -EINVAL;
+ }
+ }
+
+ if (org->width % 16) {
+ fimc_err("SRC Org_W(%d): multiple of 16\n", org->width);
+ return -EINVAL;
+ }
+
+ if (org->height < 8) {
+ fimc_err("SRC Org_H(%d): Min 8\n", org->height);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc50_outdev_check_src_size(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ struct v4l2_rect *real,
+ struct v4l2_rect *org)
+{
+ u32 rot = ctx->rotate;
+ u32 pixelformat = ctx->pix.pixelformat;
+
+ if ((ctx->overlay.mode == FIMC_OVLY_FIFO) &&
+ ((rot == 90) || (rot == 270))) {
+ /* Input Rotator */
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV422P: /* fall through */
+ case V4L2_PIX_FMT_YVU420:
+ if (real->height % 2) {
+ fimc_err("SRC Real_H(%d): multiple of 2\n",
+ real->height);
+ return -EINVAL;
+ }
+ }
+
+ if (real->height < 16) {
+ fimc_err("SRC Real_H(%d): Min 16\n", real->height);
+ return -EINVAL;
+ }
+ if (real->width < 8) {
+ fimc_err("SRC Real_W(%d): Min 8\n", real->width);
+ return -EINVAL;
+ }
+ } else {
+ /* No Input Rotator */
+ if (real->height < 8) {
+ fimc_err("SRC Real_H(%d): Min 8\n", real->height);
+ return -EINVAL;
+ }
+
+ if (real->width < 16) {
+ fimc_err("SRC Real_W(%d): Min 16\n", real->width);
+ return -EINVAL;
+ }
+
+ if (real->width > ctrl->limit->real_w_no_rot) {
+ fimc_err("SRC REAL_W(%d): Real_W <= %d\n", real->width,
+ ctrl->limit->real_w_no_rot);
+ return -EINVAL;
+ }
+ }
+
+ if (org->height < real->height) {
+ fimc_err("SRC Org_H: larger than Real_H, "
+ "org %dx%d, real %dx%d\n",
+ org->width, org->height,
+ real->width, real->height);
+ return -EINVAL;
+ }
+
+ if (org->width < real->width) {
+ fimc_err("SRC Org_W: Org_W(%d) >= Real_W(%d)\n",
+ org->width, real->width);
+ return -EINVAL;
+ }
+
+ if (ctx->pix.field == V4L2_FIELD_INTERLACED_TB) {
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV444: /* fall through */
+ case V4L2_PIX_FMT_RGB32:
+ if (real->height % 2) {
+ fimc_err("SRC Real_H(%d): multiple of 2\n",
+ real->height);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ if (real->height % 2) {
+ fimc_err("SRC Real_H(%d): multiple of 2\n",
+ real->height);
+ return -EINVAL;
+ } else if (real->width % 2) {
+ fimc_err("SRC Real_H(%d): multiple of 2\n",
+ real->width);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ if (real->height % 4) {
+ fimc_err("SRC Real_H(%d): multiple of 4\n",
+ real->height);
+ return -EINVAL;
+ } else if (real->width % 2) {
+ fimc_err("SRC Real_H(%d): multiple of 2\n",
+ real->width);
+ return -EINVAL;
+ }
+ break;
+ }
+ } else if (ctx->pix.field == V4L2_FIELD_NONE) {
+ if (pixelformat == V4L2_PIX_FMT_YUV422P) {
+ if (real->height % 2) {
+ fimc_err("SRC Real_H(%d): multiple of 2\n",
+ real->height);
+ return -EINVAL;
+ } else if (real->width % 2) {
+ fimc_err("SRC Real_H(%d): multiple of 2\n",
+ real->width);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int fimc_outdev_set_src_dma_size(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ struct v4l2_rect real, org;
+ int ret = 0;
+
+ real.width = ctx->crop.width;
+ real.height = ctx->crop.height;
+ org.width = ctx->pix.width;
+ org.height = ctx->pix.height;
+
+ if (pdata->hw_ver >= 0x50)
+ ret = fimc50_outdev_check_src_size(ctrl, ctx, &real, &org);
+ else
+ ret = fimc4x_outdev_check_src_size(ctrl, ctx, &real, &org);
+
+ if (ret < 0)
+ return ret;
+
+ fimc_hwset_org_input_size(ctrl, org.width, org.height);
+ fimc_hwset_real_input_size(ctrl, real.width, real.height);
+
+ return 0;
+}
+
+static void fimc_outdev_set_dst_dma_offset(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct v4l2_rect bound, win;
+ struct v4l2_rect *w = &ctx->win.w;
+ u32 pixfmt = ctx->fbuf.fmt.pixelformat;
+
+ memset(&bound, 0, sizeof(bound));
+ memset(&win, 0, sizeof(win));
+
+ switch (ctx->rotate) {
+ case 0:
+ bound.width = ctx->fbuf.fmt.width;
+ bound.height = ctx->fbuf.fmt.height;
+
+ win.left = w->left;
+ win.top = w->top;
+ win.width = w->width;
+ win.height = w->height;
+
+ break;
+ case 90:
+ bound.width = ctx->fbuf.fmt.height;
+ bound.height = ctx->fbuf.fmt.width;
+
+ win.left = ctx->fbuf.fmt.height - (w->height + w->top);
+ win.top = w->left;
+ win.width = w->height;
+ win.height = w->width;
+
+ break;
+ case 180:
+ bound.width = ctx->fbuf.fmt.width;
+ bound.height = ctx->fbuf.fmt.height;
+
+ win.left = ctx->fbuf.fmt.width - (w->left + w->width);
+ win.top = ctx->fbuf.fmt.height - (w->top + w->height);
+ win.width = w->width;
+ win.height = w->height;
+
+ break;
+ case 270:
+ bound.width = ctx->fbuf.fmt.height;
+ bound.height = ctx->fbuf.fmt.width;
+
+ win.left = ctx->win.w.top;
+ win.top = ctx->fbuf.fmt.width - (w->left + w->width);
+ win.width = w->height;
+ win.height = w->width;
+
+ break;
+ default:
+ fimc_err("Rotation degree is invalid(%d)\n", ctx->rotate);
+ break;
+ }
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_FIFO: /* fall through */
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL:
+ memset(&bound, 0, sizeof(bound));
+ memset(&win, 0, sizeof(win));
+ fimc_hwset_output_offset(ctrl, pixfmt, &bound, &win);
+ break;
+ default:
+ fimc_hwset_output_offset(ctrl, pixfmt, &bound, &win);
+ break;
+ }
+
+ fimc_dbg("bound:width(%d), height(%d)\n", bound.width, bound.height);
+ fimc_dbg("win:width(%d), height(%d)\n", win.width, win.height);
+ fimc_dbg("win:top(%d), left(%d)\n", win.top, win.left);
+}
+
+static int fimc_outdev_check_dst_size(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ struct v4l2_rect *real,
+ struct v4l2_rect *org)
+{
+ u32 rot = ctx->rotate;
+ __u32 pixel_type;
+
+ pixel_type = fimc_get_pixel_format_type(ctx->fbuf.fmt.pixelformat);
+
+ if (FIMC_YUV420 == pixel_type && real->height % 2) {
+ fimc_err("DST Real_H(%d): even number for YUV420 formats\n",
+ real->height);
+ return -EINVAL;
+ }
+
+ if ((ctx->overlay.mode != FIMC_OVLY_FIFO) &&
+ ((rot == 90) || (rot == 270))) {
+ /* Use Output Rotator */
+ if (org->height < real->width) {
+ fimc_err("DST Org_H: Org_H(%d) >= Real_W(%d)\n",
+ org->height, real->width);
+ return -EINVAL;
+ }
+
+ if (org->width < real->height) {
+ fimc_err("DST Org_W: Org_W(%d) >= Real_H(%d)\n",
+ org->width, real->height);
+ return -EINVAL;
+ }
+
+ if (real->height > ctrl->limit->trg_h_rot) {
+ fimc_err("DST REAL_H: Real_H(%d) <= %d\n", real->height,
+ ctrl->limit->trg_h_rot);
+ return -EINVAL;
+ }
+ } else if (ctx->overlay.mode != FIMC_OVLY_FIFO) {
+ /* No Output Rotator */
+ if (org->height < 8) {
+ fimc_err("DST Org_H(%d): Min 8\n", org->height);
+ return -EINVAL;
+ }
+
+ if (org->height < real->height) {
+ fimc_err("DST Org_H: Org_H(%d) >= Real_H(%d)\n",
+ org->height, real->height);
+ return -EINVAL;
+ }
+ /*
+ if (org->width % 8) {
+ fimc_err("DST Org_W: multiple of 8\n");
+ return -EINVAL;
+ }*/
+
+ if (org->width < real->width) {
+ fimc_err("DST Org_W: Org_W(%d) >= Real_W(%d)\n",
+ org->width, real->width);
+ return -EINVAL;
+ }
+
+ if (real->height > ctrl->limit->trg_h_no_rot) {
+ fimc_err("DST REAL_H: Real_H(%d) <= %d\n", real->height,
+ ctrl->limit->trg_h_no_rot);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int fimc_outdev_set_dst_dma_size(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct v4l2_rect org, real;
+ int ret = -1;
+
+ memset(&org, 0, sizeof(org));
+ memset(&real, 0, sizeof(real));
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_NONE_MULTI_BUF: /* fall through */
+ case FIMC_OVLY_NONE_SINGLE_BUF:
+ real.width = ctx->win.w.width;
+ real.height = ctx->win.w.height;
+
+ switch (ctx->rotate) {
+ case 0: /* fall through */
+ case 180:
+ org.width = ctx->fbuf.fmt.width;
+ org.height = ctx->fbuf.fmt.height;
+ break;
+ case 90: /* fall through */
+ case 270:
+ org.width = ctx->fbuf.fmt.height;
+ org.height = ctx->fbuf.fmt.width;
+ break;
+ default:
+ fimc_err("Rotation degree is invalid(%d)\n",
+ ctx->rotate);
+ break;
+ }
+
+ break;
+
+ case FIMC_OVLY_DMA_MANUAL: /* fall through */
+ case FIMC_OVLY_DMA_AUTO:
+ real.width = ctx->win.w.width;
+ real.height = ctx->win.w.height;
+
+ switch (ctx->rotate) {
+ case 0: /* fall through */
+ case 180:
+ org.width = ctx->win.w.width;
+ org.height = ctx->win.w.height;
+ break;
+ case 90: /* fall through */
+ case 270:
+ org.width = ctx->win.w.height;
+ org.height = ctx->win.w.width;
+ break;
+ default:
+ fimc_err("Rotation degree is invalid(%d)\n",
+ ctx->rotate);
+ break;
+ }
+
+ break;
+ case FIMC_OVLY_FIFO:
+ switch (ctx->rotate) {
+ case 0: /* fall through */
+ case 180:
+ real.width = ctx->win.w.width;
+ real.height = ctx->win.w.height;
+ org.width = ctrl->fb.lcd_hres;
+ org.height = ctrl->fb.lcd_vres;
+ break;
+ case 90: /* fall through */
+ case 270:
+ real.width = ctx->win.w.height;
+ real.height = ctx->win.w.width;
+ org.width = ctrl->fb.lcd_vres;
+ org.height = ctrl->fb.lcd_hres;
+ break;
+
+ default:
+ fimc_err("Rotation degree is invalid(%d)\n",
+ ctx->rotate);
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO)
+ fimc_outdev_dma_auto_dst_resize(&org);
+
+ fimc_dbg("DST org: width(%d), height(%d)\n", org.width, org.height);
+ fimc_dbg("DST real: width(%d), height(%d)\n", real.width, real.height);
+
+ ret = fimc_outdev_check_dst_size(ctrl, ctx, &real, &org);
+ if (ret < 0)
+ return ret;
+
+ fimc_hwset_output_size(ctrl, real.width, real.height);
+ fimc_hwset_output_area(ctrl, real.width, real.height);
+ fimc_hwset_org_output_size(ctrl, org.width, org.height);
+ fimc_hwset_ext_output_size(ctrl, real.width, real.height);
+
+ return 0;
+}
+
+static void fimc_outdev_calibrate_scale_info(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ struct v4l2_rect *src,
+ struct v4l2_rect *dst)
+{
+ if (ctx->overlay.mode != FIMC_OVLY_FIFO) {
+ /* OUTPUT ROTATOR */
+ src->width = ctx->crop.width;
+ src->height = ctx->crop.height;
+ dst->width = ctx->win.w.width;
+ dst->height = ctx->win.w.height;
+ } else {
+ /* INPUT ROTATOR */
+ switch (ctx->rotate) {
+ case 0: /* fall through */
+ case 180:
+ src->width = ctx->crop.width;
+ src->height = ctx->crop.height;
+ dst->width = ctx->win.w.width;
+ dst->height = ctx->win.w.height;
+ break;
+ case 90: /* fall through */
+ case 270:
+ src->width = ctx->crop.height;
+ src->height = ctx->crop.width;
+ dst->width = ctx->win.w.height;
+ dst->height = ctx->win.w.width;
+ break;
+ default:
+ fimc_err("Rotation degree is invalid(%d)\n",
+ ctx->rotate);
+ break;
+ }
+ }
+
+ fimc_dbg("src->width(%d), src->height(%d)\n", src->width, src->height);
+ fimc_dbg("dst->width(%d), dst->height(%d)\n", dst->width, dst->height);
+}
+
+static int fimc_outdev_check_scaler(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ struct v4l2_rect *src,
+ struct v4l2_rect *dst)
+{
+ u32 pixels = 0, dstfmt = 0;
+
+ /* Check scaler limitation */
+ if (ctx->sc.pre_dst_width > ctrl->limit->pre_dst_w) {
+ fimc_err("MAX PreDstWidth(%d) is %d\n", ctx->sc.pre_dst_width,
+ ctrl->limit->pre_dst_w);
+ return -EDOM;
+ }
+
+ /* SRC width double boundary check */
+ switch (ctx->pix.pixelformat) {
+ case V4L2_PIX_FMT_RGB32:
+ pixels = 1;
+ break;
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_RGB565:
+ pixels = 2;
+ break;
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ pixels = 8;
+ break;
+ default:
+ fimc_err("Invalid color format(0x%x)\n", ctx->pix.pixelformat);
+ return -EINVAL;
+ }
+
+ if (src->width % pixels) {
+ fimc_err("source width(%d) multiple of %d pixels\n", src->width,
+ pixels);
+ return -EDOM;
+ }
+
+ /* DST width double boundary check */
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_FIFO: /* fall through */
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL:
+ dstfmt = V4L2_PIX_FMT_RGB32;
+ break;
+ case FIMC_OVLY_NONE_SINGLE_BUF: /* fall through */
+ case FIMC_OVLY_NONE_MULTI_BUF:
+ dstfmt = ctx->fbuf.fmt.pixelformat;
+ break;
+ default:
+ break;
+ }
+
+ switch (dstfmt) {
+ case V4L2_PIX_FMT_RGB32:
+ pixels = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ pixels = 2;
+ break;
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV12M: /* fall through */
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ pixels = 8;
+ break;
+ default:
+ fimc_err("Invalid color format(0x%x)\n", dstfmt);
+ return -EINVAL;
+ }
+
+ if (dst->width % pixels) {
+ fimc_err("source width(%d) multiple of %d pixels\n",
+ dst->width, pixels);
+ return -EDOM;
+ }
+
+ return 0;
+}
+
+static int fimc_outdev_set_scaler(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ struct v4l2_rect src, dst;
+ int ret = 0;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ memset(&src, 0, sizeof(src));
+ memset(&dst, 0, sizeof(dst));
+
+ fimc_outdev_calibrate_scale_info(ctrl, ctx, &src, &dst);
+
+ ret = fimc_get_scaler_factor(src.width, dst.width,
+ &ctx->sc.pre_hratio, &ctx->sc.hfactor);
+ if (ret < 0) {
+ fimc_err("Fail : Out of Width scale range(%d, %d)\n",
+ src.width, dst.width);
+ return ret;
+ }
+
+ ret = fimc_get_scaler_factor(src.height, dst.height,
+ &ctx->sc.pre_vratio, &ctx->sc.vfactor);
+ if (ret < 0) {
+ fimc_err("Fail : Out of Height scale range(%d, %d)\n",
+ src.height, dst.height);
+ return ret;
+ }
+
+ if (src.width == src.height) {
+ if ((src.width * 10 / dst.width) >= 15 &&
+ (src.width * 10 / dst.width) < 20) {
+ ctx->sc.pre_hratio = 2;
+ ctx->sc.hfactor = 1;
+ }
+ if ((src.height * 10 / dst.height) >= 15 &&
+ (src.height * 10 / dst.height) < 20) {
+ ctx->sc.pre_vratio = 2;
+ ctx->sc.vfactor = 1;
+ }
+ }
+
+ ctx->sc.pre_dst_width = src.width / ctx->sc.pre_hratio;
+ ctx->sc.pre_dst_height = src.height / ctx->sc.pre_vratio;
+
+ if (pdata->hw_ver >= 0x50) {
+ ctx->sc.main_hratio = (src.width << 14) /
+ (dst.width << ctx->sc.hfactor);
+ ctx->sc.main_vratio = (src.height << 14) /
+ (dst.height << ctx->sc.vfactor);
+ } else {
+ ctx->sc.main_hratio = (src.width << 8) /
+ (dst.width << ctx->sc.hfactor);
+ ctx->sc.main_vratio = (src.height << 8) /
+ (dst.height << ctx->sc.vfactor);
+ }
+
+ fimc_dbg("pre_hratio(%d), hfactor(%d), pre_vratio(%d), vfactor(%d)\n",
+ ctx->sc.pre_hratio, ctx->sc.hfactor,
+ ctx->sc.pre_vratio, ctx->sc.vfactor);
+
+
+ fimc_dbg("pre_dst_width(%d), main_hratio(%d), "
+ "pre_dst_height(%d), main_vratio(%d)\n",
+ ctx->sc.pre_dst_width, ctx->sc.main_hratio,
+ ctx->sc.pre_dst_height, ctx->sc.main_vratio);
+
+ ctx->sc.bypass = 0; /* Input DMA cannot support scaler bypass. */
+ ctx->sc.scaleup_h = (dst.width >= src.width) ? 1 : 0;
+ ctx->sc.scaleup_v = (dst.height >= src.height) ? 1 : 0;
+ ctx->sc.shfactor = 10 - (ctx->sc.hfactor + ctx->sc.vfactor);
+
+ if (pdata->hw_ver < 0x50) {
+ ret = fimc_outdev_check_scaler(ctrl, ctx, &src, &dst);
+ if (ret < 0)
+ return ret;
+ }
+
+ fimc_hwset_prescaler(ctrl, &ctx->sc);
+ fimc_hwset_scaler(ctrl, &ctx->sc);
+
+ return 0;
+}
+
+int fimc_outdev_set_ctx_param(struct fimc_control *ctrl, struct fimc_ctx *ctx)
+{
+ int ret;
+ if (ctrl && (ctrl->regs == NULL)) {
+ fimc_dbg("%s FIMC%d power is off: skip to set config\n",
+ __func__, ctrl->id);
+ return 0;
+ }
+#if defined(CONFIG_VIDEO_IPC)
+ u32 use_ipc = 0;
+ struct v4l2_rect src, dst;
+ memset(&src, 0, sizeof(src));
+ memset(&dst, 0, sizeof(dst));
+#endif
+
+ fimc_hwset_sw_reset(ctrl);
+
+ if ((ctrl->status == FIMC_READY_ON) ||
+ (ctrl->status == FIMC_STREAMON_IDLE))
+ fimc_hwset_enable_irq(ctrl, 0, 1);
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ fimc_hwset_output_buf_sequence_all(ctrl, FRAME_SEQ);
+#endif
+
+ fimc_outdev_set_format(ctrl, ctx);
+ fimc_outdev_set_path(ctrl, ctx);
+ fimc_outdev_set_rot(ctrl, ctx);
+
+ fimc_outdev_set_src_dma_offset(ctrl, ctx);
+ ret = fimc_outdev_set_src_dma_size(ctrl, ctx);
+ if (ret < 0)
+ return ret;
+
+ fimc_outdev_set_dst_dma_offset(ctrl, ctx);
+
+ ret = fimc_outdev_set_dst_dma_size(ctrl, ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = fimc_outdev_set_scaler(ctrl, ctx);
+ if (ret < 0)
+ return ret;
+
+#if defined(CONFIG_VIDEO_IPC)
+ if (ctx->overlay.mode == FIMC_OVLY_FIFO)
+ if (ctx->pix.field == V4L2_FIELD_INTERLACED_TB)
+ use_ipc = 1;
+
+ if (use_ipc) {
+ fimc_outdev_calibrate_scale_info(ctrl, ctx, &src, &dst);
+ ret = ipc_init(dst.width, dst.height/2, IPC_2D);
+ if (ret < 0)
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+
+int fimc_fimd_rect(const struct fimc_control *ctrl,
+ const struct fimc_ctx *ctx,
+ struct v4l2_rect *fimd_rect)
+{
+ switch (ctx->rotate) {
+ case 0:
+ fimd_rect->left = ctx->win.w.left;
+ fimd_rect->top = ctx->win.w.top;
+ fimd_rect->width = ctx->win.w.width;
+ fimd_rect->height = ctx->win.w.height;
+
+ break;
+
+ case 90:
+ fimd_rect->left = ctrl->fb.lcd_hres -
+ (ctx->win.w.top + ctx->win.w.height);
+ fimd_rect->top = ctx->win.w.left;
+ fimd_rect->width = ctx->win.w.height;
+ fimd_rect->height = ctx->win.w.width;
+
+ break;
+
+ case 180:
+ fimd_rect->left = ctrl->fb.lcd_hres -
+ (ctx->win.w.left + ctx->win.w.width);
+ fimd_rect->top = ctrl->fb.lcd_vres -
+ (ctx->win.w.top + ctx->win.w.height);
+ fimd_rect->width = ctx->win.w.width;
+ fimd_rect->height = ctx->win.w.height;
+
+ break;
+
+ case 270:
+ fimd_rect->left = ctx->win.w.top;
+ fimd_rect->top = ctrl->fb.lcd_vres -
+ (ctx->win.w.left + ctx->win.w.width);
+ fimd_rect->width = ctx->win.w.height;
+ fimd_rect->height = ctx->win.w.width;
+
+ break;
+
+ default:
+ fimc_err("Rotation degree is invalid(%d)\n", ctx->rotate);
+ return -EINVAL;
+
+ break;
+ }
+
+ return 0;
+}
+
+int fimc_start_fifo(struct fimc_control *ctrl, struct fimc_ctx *ctx)
+{
+ struct v4l2_rect fimd_rect;
+ struct fb_var_screeninfo var;
+ struct s3cfb_user_window window;
+ int ret = -1;
+ u32 id = ctrl->id;
+
+ memset(&fimd_rect, 0, sizeof(struct v4l2_rect));
+ ret = fimc_fimd_rect(ctrl, ctx, &fimd_rect);
+ if (ret < 0) {
+ fimc_err("fimc_fimd_rect fail\n");
+ return -EINVAL;
+ }
+
+ /* Get WIN var_screeninfo */
+ ret = s3cfb_direct_ioctl(id, FBIOGET_VSCREENINFO,
+ (unsigned long)&var);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(FBIOGET_VSCREENINFO) fail\n");
+ return -EINVAL;
+ }
+
+ /* Don't allocate the memory. */
+ if (ctx->pix.field == V4L2_FIELD_NONE)
+ ret = s3cfb_direct_ioctl(id,
+ S3CFB_SET_WIN_PATH, DATA_PATH_FIFO);
+ else if (ctx->pix.field == V4L2_FIELD_INTERLACED_TB)
+ ret = s3cfb_direct_ioctl(id,
+ S3CFB_SET_WIN_PATH, DATA_PATH_IPC);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_PATH) fail\n");
+ return -EINVAL;
+ }
+
+ ret = s3cfb_direct_ioctl(id, S3CFB_SET_WIN_MEM, DMA_MEM_NONE);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_MEM) fail\n");
+ return -EINVAL;
+ }
+
+ ret = s3cfb_direct_ioctl(id, S3CFB_SET_WIN_ADDR, 0x00000000);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_ADDR) fail\n");
+ return -EINVAL;
+ }
+
+ /* Update WIN size */
+ var.xres_virtual = fimd_rect.width;
+ var.yres_virtual = fimd_rect.height;
+ var.xres = fimd_rect.width;
+ var.yres = fimd_rect.height;
+ ret = s3cfb_direct_ioctl(id, FBIOPUT_VSCREENINFO,
+ (unsigned long)&var);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(FBIOPUT_VSCREENINFO) fail\n");
+ return -EINVAL;
+ }
+
+ /* Update WIN position */
+ window.x = fimd_rect.left;
+ window.y = fimd_rect.top;
+ ret = s3cfb_direct_ioctl(id, S3CFB_WIN_POSITION,
+ (unsigned long)&window);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_WIN_POSITION) fail\n");
+ return -EINVAL;
+ }
+
+ /* Open WIN FIFO */
+ ret = ctrl->fb.open_fifo(id, 0, fimc_outdev_start_camif, (void *)ctrl);
+ if (ret < 0) {
+ fimc_err("FIMD FIFO close fail\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int fimc_outdev_overlay_buf(struct file *filp,
+ struct fimc_control *ctrl,
+ struct fimc_ctx *ctx)
+{
+ int ret = 0, i;
+ struct fimc_overlay_buf *buf;
+
+ buf = &ctx->overlay.buf;
+
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->overlay.req_idx = i;
+ buf->size[i] = ctx->dst[i].length[0];
+ buf->phy_addr[i] = ctx->dst[i].base[0];
+ buf->vir_addr[i] = do_mmap(filp, 0, buf->size[i],
+ PROT_READ|PROT_WRITE, MAP_SHARED, 0);
+ if (buf->vir_addr[i] == -EINVAL) {
+ fimc_err("%s: fail\n", __func__);
+ return -EINVAL;
+ }
+
+ fimc_dbg("idx : %d, size(0x%08x), phy_addr(0x%08x), "
+ "vir_addr(0x%08x)\n", i, buf->size[i],
+ buf->phy_addr[i], buf->vir_addr[i]);
+ }
+
+ ctx->overlay.req_idx = -1;
+
+ return ret;
+}
+
+int fimc_reqbufs_output(void *fh, struct v4l2_requestbuffers *b)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_ctx *ctx;
+ struct fimc_overlay_buf *buf;
+ struct mm_struct *mm = current->mm;
+ enum fimc_overlay_mode mode;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ int ret = -1, i;
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ buf = &ctx->overlay.buf;
+ mode = ctx->overlay.mode;
+
+ fimc_info1("%s: called\n", __func__);
+
+ if (ctx->status != FIMC_STREAMOFF && b->count != 0) {
+ fimc_dump_context(ctrl, ctx);
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ if (ctx->is_requested == 1 && b->count != 0) {
+ fimc_err("Buffers were already requested\n");
+ return -EBUSY;
+ }
+
+ if (b->count > FIMC_OUTBUFS) {
+ fimc_warn("The buffer count is modified by driver "
+ "from %d to %d\n", b->count, FIMC_OUTBUFS);
+ b->count = FIMC_OUTBUFS;
+ }
+
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ fimc_hwset_output_buf_sequence_all(ctrl, FRAME_SEQ);
+#endif
+
+ fimc_init_out_buf(ctx);
+ ctx->is_requested = 0;
+
+ if (b->count == 0) {
+ ctrl->mem.curr = ctrl->mem.base;
+ ctx->status = FIMC_STREAMOFF;
+#ifdef CONFIG_VIDEO_SAMSUNG_USE_DMA_MEM
+ if (ctrl->mem.base && b->memory == V4L2_MEMORY_MMAP) {
+ cma_free(ctrl->mem.base);
+ ctrl->mem.base = 0;
+ ctrl->mem.size = 0;
+ }
+#endif
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_DMA_AUTO: /* fall through */
+ case FIMC_OVLY_DMA_MANUAL:
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ if (buf->vir_addr[i]) {
+ ret = do_munmap(mm,
+ buf->vir_addr[i],
+ buf->size[i]);
+ if (ret < 0)
+ fimc_err("%s: do_munmap fail. "
+ "vir_addr[%d](0x%08x)\n",
+ __func__, i, buf->vir_addr[i]);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* initialize source buffers */
+ if (b->memory == V4L2_MEMORY_MMAP) {
+ ret = fimc_outdev_set_src_buf(ctrl, ctx);
+ ctx->overlay.req_idx = FIMC_MMAP_IDX;
+ if (ret)
+ return ret;
+ } else if (b->memory == V4L2_MEMORY_USERPTR) {
+ if (mode == FIMC_OVLY_DMA_AUTO ||
+ mode == FIMC_OVLY_NOT_FIXED)
+ ctx->overlay.req_idx = FIMC_USERPTR_IDX;
+ }
+
+ ctx->is_requested = 1;
+ }
+
+ ctx->buf_num = b->count;
+
+ return 0;
+}
+
+int fimc_querybuf_output(void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_ctx *ctx;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ u32 buf_length = 0;
+
+ fimc_info1("%s: called\n", __func__);
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ if (ctx->status != FIMC_STREAMOFF) {
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ if (b->index >= ctx->buf_num) {
+ fimc_err("The index is out of bounds. You requested %d buffers."
+ "But requested index is %d\n", ctx->buf_num, b->index);
+ return -EINVAL;
+ }
+
+ b->flags = ctx->src[b->index].flags;
+ b->m.offset = b->index * PAGE_SIZE;
+ buf_length = ctx->src[b->index].length[FIMC_ADDR_Y] +
+ ctx->src[b->index].length[FIMC_ADDR_CB] +
+ ctx->src[b->index].length[FIMC_ADDR_CR];
+ b->length = PAGE_ALIGN(buf_length);
+
+ return 0;
+}
+
+int fimc_g_ctrl_output(void *fh, struct v4l2_control *c)
+{
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ if (ctx->status != FIMC_STREAMOFF) {
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ switch (c->id) {
+ case V4L2_CID_ROTATION:
+ c->value = ctx->rotate;
+ break;
+
+ case V4L2_CID_HFLIP:
+ if (ctx->flip & V4L2_CID_HFLIP)
+ c->value = 1;
+ else
+ c->value = 0;
+ break;
+
+ case V4L2_CID_VFLIP:
+ if (ctx->flip & V4L2_CID_VFLIP)
+ c->value = 1;
+ else
+ c->value = 0;
+ break;
+
+ case V4L2_CID_OVERLAY_VADDR0:
+ c->value = ctx->overlay.buf.vir_addr[0];
+ break;
+
+ case V4L2_CID_OVERLAY_VADDR1:
+ c->value = ctx->overlay.buf.vir_addr[1];
+ break;
+
+ case V4L2_CID_OVERLAY_VADDR2:
+ c->value = ctx->overlay.buf.vir_addr[2];
+ break;
+
+ case V4L2_CID_OVERLAY_AUTO:
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO)
+ c->value = 1;
+ else
+ c->value = 0;
+ break;
+
+ case V4L2_CID_RESERVED_MEM_BASE_ADDR:
+ c->value = ctrl->mem.base;
+ break;
+
+ case V4L2_CID_RESERVED_MEM_SIZE:
+ /* return KB size */
+ c->value = (ctrl->mem.size) / 1024;
+ break;
+
+ case V4L2_CID_FIMC_VERSION:
+ c->value = pdata->hw_ver;
+ break;
+
+ default:
+ fimc_err("Invalid control id: %d\n", c->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_set_dst_info(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ struct fimc_buf *fimc_buf)
+{
+ struct fimc_buf *buf;
+ int i;
+
+ for (i = 0; i < ctx->buf_num; i++) {
+ buf = &fimc_buf[i];
+ ctx->dst[i].base[FIMC_ADDR_Y] = buf->base[FIMC_ADDR_Y];
+ ctx->dst[i].length[FIMC_ADDR_Y] = buf->length[FIMC_ADDR_Y];
+
+ ctx->dst[i].base[FIMC_ADDR_CB] = buf->base[FIMC_ADDR_CB];
+ ctx->dst[i].length[FIMC_ADDR_CB] = buf->length[FIMC_ADDR_CB];
+
+ ctx->dst[i].base[FIMC_ADDR_CR] = buf->base[FIMC_ADDR_CR];
+ ctx->dst[i].length[FIMC_ADDR_CR] = buf->length[FIMC_ADDR_CR];
+ }
+
+ for (i = ctx->buf_num; i < FIMC_OUTBUFS; i++) {
+ ctx->dst[i].base[FIMC_ADDR_Y] = 0;
+ ctx->dst[i].length[FIMC_ADDR_Y] = 0;
+
+ ctx->dst[i].base[FIMC_ADDR_CB] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CB] = 0;
+
+ ctx->dst[i].base[FIMC_ADDR_CR] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CR] = 0;
+ }
+
+ /* for debugging */
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ fimc_dbg("dst[%d]: base[0]=0x%08x, size[0]=0x%08x\n",
+ i, ctx->dst[i].base[0], ctx->dst[i].length[0]);
+
+ fimc_dbg("dst[%d]: base[1]=0x%08x, size[1]=0x%08x\n",
+ i, ctx->dst[i].base[1], ctx->dst[i].length[2]);
+
+ fimc_dbg("dst[%d]: base[2]=0x%08x, size[2]=0x%08x\n",
+ i, ctx->dst[i].base[1], ctx->dst[i].length[2]);
+ }
+
+ return 0;
+}
+
+void fimc_cache_flush(struct fimc_buf *buf)
+{
+ size_t length = 0;
+ int i = 0;
+
+ for (i = 0; i < 3; i++) {
+ length += buf->length[i];
+ }
+ if (length > (unsigned long) L2_FLUSH_ALL) {
+ outer_flush_all(); /* L2 */
+ } else if (length > (unsigned long) L1_FLUSH_ALL) {
+ for (i = 0; i < 3; i++) {
+ phys_addr_t start = buf->base[i];
+ phys_addr_t end = buf->base[i] +
+ buf->length[i] - 1;
+
+ if (!start)
+ continue;
+
+ outer_flush_range(start, end); /* L2 */
+ }
+ } else {
+ for (i = 0; i < 3; i++) {
+ phys_addr_t start = buf->base[i];
+ phys_addr_t end = buf->base[i] +
+ buf->length[i] - 1;
+
+ if (!start)
+ continue;
+
+ outer_flush_range(start, end); /* L2 */
+ }
+ }
+}
+
+int fimc_s_ctrl_output(struct file *filp, void *fh, struct v4l2_control *c)
+{
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ int ret = 0;
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ if (ctx->status != FIMC_STREAMOFF) {
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ switch (c->id) {
+ case V4L2_CID_ROTATION:
+ ret = fimc_set_rot_degree(ctrl, ctx, c->value);
+
+ break;
+ case V4L2_CID_HFLIP:
+ if (c->value)
+ ctx->flip |= FIMC_YFLIP;
+ else
+ ctx->flip &= ~FIMC_YFLIP;
+
+ break;
+ case V4L2_CID_VFLIP:
+ if (c->value)
+ ctx->flip |= FIMC_XFLIP;
+ else
+ ctx->flip &= ~FIMC_XFLIP;
+
+ break;
+ case V4L2_CID_OVERLAY_AUTO:
+ if (c->value == 1) {
+ ctx->overlay.mode = FIMC_OVLY_DMA_AUTO;
+ } else {
+ ctx->overlay.mode = FIMC_OVLY_DMA_MANUAL;
+ ret = fimc_outdev_set_dst_buf(ctrl, ctx);
+ fimc_outdev_overlay_buf(filp, ctrl, ctx);
+ }
+
+ break;
+ case V4L2_CID_OVLY_MODE:
+ ctx->overlay.mode = c->value;
+
+ break;
+ case V4L2_CID_DST_INFO:
+ ret = fimc_set_dst_info(ctrl, ctx,
+ (struct fimc_buf *)c->value);
+ break;
+ case V4L2_CID_GET_PHY_SRC_YADDR:
+ c->value = ctx->src[c->value].base[FIMC_ADDR_Y];
+ break;
+ case V4L2_CID_GET_PHY_SRC_CADDR:
+ c->value = ctx->src[c->value].base[FIMC_ADDR_CB];
+ break;
+ default:
+ fimc_err("Invalid control id: %d\n", c->id);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int fimc_cropcap_output(void *fh, struct v4l2_cropcap *a)
+{
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ u32 is_rotate = 0, max_w = 0, max_h = 0, pixelformat;
+
+ fimc_info1("%s: called\n", __func__);
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ pixelformat = ctx->pix.pixelformat;
+ if (ctx->status != FIMC_STREAMOFF) {
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ is_rotate = fimc_mapping_rot_flip(ctx->rotate, ctx->flip);
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV12T: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ max_w = FIMC_SRC_MAX_W;
+ max_h = FIMC_SRC_MAX_H;
+ break;
+ case V4L2_PIX_FMT_RGB32: /* fall through */
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ if (is_rotate & FIMC_ROT) { /* Landscape mode */
+ max_w = ctrl->fb.lcd_vres;
+ max_h = ctrl->fb.lcd_hres;
+ } else { /* Portrait */
+ max_w = ctrl->fb.lcd_hres;
+ max_h = ctrl->fb.lcd_vres;
+ }
+
+ break;
+ default:
+ fimc_warn("Supported format : V4L2_PIX_FMT_YUYV, "
+ "V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_NV12T, "
+ "V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_RGB565\n");
+ return -EINVAL;
+ }
+
+ /* crop bounds */
+ ctx->cropcap.bounds.left = 0;
+ ctx->cropcap.bounds.top = 0;
+ ctx->cropcap.bounds.width = max_w;
+ ctx->cropcap.bounds.height = max_h;
+
+ /* crop default values */
+ ctx->cropcap.defrect.left = 0;
+ ctx->cropcap.defrect.top = 0;
+ ctx->cropcap.defrect.width = max_w;
+ ctx->cropcap.defrect.height = max_h;
+
+ /* crop pixel aspec values */
+ /* To Do : Have to modify but I don't know the meaning. */
+ ctx->cropcap.pixelaspect.numerator = 16;
+ ctx->cropcap.pixelaspect.denominator = 9;
+
+ a->bounds = ctx->cropcap.bounds;
+ a->defrect = ctx->cropcap.defrect;
+ a->pixelaspect = ctx->cropcap.pixelaspect;
+
+ return 0;
+}
+
+int fimc_g_crop_output(void *fh, struct v4l2_crop *a)
+{
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ fimc_info1("%s: called\n", __func__);
+
+ mutex_lock(&ctrl->v4l2_lock);
+ a->c.left = ctx->crop.left;
+ a->c.top = ctx->crop.top;
+ a->c.width = ctx->crop.width;
+ a->c.height = ctx->crop.height;
+ mutex_unlock(&ctrl->v4l2_lock);
+
+ return 0;
+}
+
+int fimc_s_crop_output(void *fh, struct v4l2_crop *a)
+{
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+
+ fimc_info1("%s: called: left(%d), top(%d), width(%d), height(%d),\n",
+ __func__, a->c.left, a->c.top, a->c.width, a->c.height);
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ if (ctx->status != FIMC_STREAMOFF) {
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ /* Check arguments : widht and height */
+ if ((a->c.width < 0) || (a->c.height < 0)) {
+ fimc_err("The crop rect must be bigger than 0\n");
+ fimc_err("width = %d, height = %d\n", a->c.width, a->c.height);
+ return -EINVAL;
+ }
+
+ if ((a->c.width > FIMC_SRC_MAX_W) || (a->c.height > FIMC_SRC_MAX_H)) {
+ fimc_err("The crop width/height must be smaller than "
+ "%d and %d\n", FIMC_SRC_MAX_W, FIMC_SRC_MAX_H);
+ fimc_err("width = %d, height = %d\n", a->c.width, a->c.height);
+ return -EINVAL;
+ }
+
+ /* Check arguments : left and top */
+ if ((a->c.left < 0) || (a->c.top < 0)) {
+ fimc_err("The crop left, top must be bigger than 0\n");
+ fimc_err("left = %d, top = %d\n", a->c.left, a->c.top);
+ return -EINVAL;
+ }
+
+ if ((a->c.left > FIMC_SRC_MAX_W) || (a->c.top > FIMC_SRC_MAX_H)) {
+ fimc_err("The crop left, top must be smaller than %d, %d\n",
+ FIMC_SRC_MAX_W, FIMC_SRC_MAX_H);
+ fimc_err("left = %d, top = %d\n", a->c.left, a->c.top);
+ return -EINVAL;
+ }
+
+ if ((a->c.left + a->c.width) > FIMC_SRC_MAX_W) {
+ fimc_err("The crop rect must be in bound rect\n");
+ fimc_err("left = %d, width = %d\n", a->c.left, a->c.width);
+ return -EINVAL;
+ }
+
+ if ((a->c.top + a->c.height) > FIMC_SRC_MAX_H) {
+ fimc_err("The crop rect must be in bound rect\n");
+ fimc_err("top = %d, width = %d\n", a->c.top, a->c.height);
+ return -EINVAL;
+ }
+
+ ctx->crop.left = a->c.left;
+ ctx->crop.top = a->c.top;
+ ctx->crop.width = a->c.width;
+ ctx->crop.height = a->c.height;
+
+ return 0;
+}
+
+int fimc_streamon_output(void *fh)
+{
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ int ret = -1;
+
+ fimc_info1("%s: called\n", __func__);
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ if (ctx->overlay.mode == FIMC_OVLY_NOT_FIXED)
+ ctx->overlay.mode = FIMC_OVLY_MODE;
+
+ /* initialize destination buffers */
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO) {
+ ret = fimc_outdev_set_dst_buf(ctrl, ctx);
+ if (ret)
+ return ret;
+ }
+
+ ret = fimc_outdev_check_param(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_outdev_check_param\n");
+ return ret;
+ }
+
+ ctx->status = FIMC_READY_ON;
+ if (ctrl->status == FIMC_STREAMOFF)
+ ctrl->status = FIMC_READY_ON;
+
+ return ret;
+}
+
+void fimc_outdev_init_idxs(struct fimc_control *ctrl)
+{
+ ctrl->out->idxs.prev.ctx = -1;
+ ctrl->out->idxs.prev.idx = -1;
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctrl->out->idxs.next.ctx = -1;
+ ctrl->out->idxs.next.idx = -1;
+}
+
+int fimc_streamoff_output(void *fh)
+{
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ int ret = -1, i = 0, off_cnt = 0;
+ struct s3cfb_user_window window;
+ fimc_info1("%s: called\n", __func__);
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ /* Move it to here to ignore fimc_irq_out_dma operation. */
+ ctx->status = FIMC_STREAMOFF;
+
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO ||
+ ctx->overlay.mode == FIMC_OVLY_DMA_MANUAL) {
+ /* Need some delay to waiting reamined operation */
+ msleep(100);
+
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_OFF,
+ (unsigned long)NULL);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_OFF) fail\n");
+ return -EINVAL;
+ }
+
+ /* reset WIN position */
+ memset(&window, 0, sizeof(window));
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_WIN_POSITION,
+ (unsigned long)&window);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_WIN_POSITION) fail\n");
+ return -EINVAL;
+ }
+
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_ADDR, 0x00000000);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_ADDR) fail\n");
+ return -EINVAL;
+ }
+
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_SET_WIN_MEM, DMA_MEM_NONE);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_MEM) fail\n");
+ return -EINVAL;
+ }
+
+ ctrl->fb.is_enable = 0;
+ }
+
+ ret = fimc_init_in_queue(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_init_in_queue\n");
+ return -EINVAL;
+ }
+
+ ret = fimc_init_out_queue(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_init_out_queue\n");
+ return -EINVAL;
+ }
+
+ /* Make all buffers DQUEUED state. */
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->src[i].state = VIDEOBUF_IDLE;
+ ctx->src[i].flags = V4L2_BUF_FLAG_MAPPED;
+ }
+
+ if (ctrl->out->last_ctx == ctx->ctx_num)
+ ctrl->out->last_ctx = -1;
+
+ if (ctx->overlay.mode == FIMC_OVLY_DMA_AUTO) {
+ ctrl->mem.curr = ctx->dst[0].base[FIMC_ADDR_Y];
+ for (i = 0; i < FIMC_OUTBUFS; i++) {
+ ctx->dst[i].base[FIMC_ADDR_Y] = 0;
+ ctx->dst[i].length[FIMC_ADDR_Y] = 0;
+
+ ctx->dst[i].base[FIMC_ADDR_CB] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CB] = 0;
+
+ ctx->dst[i].base[FIMC_ADDR_CR] = 0;
+ ctx->dst[i].length[FIMC_ADDR_CR] = 0;
+ }
+ }
+
+ /* check all ctx to change ctrl->status from streamon to streamoff */
+ for (i = 0; i < FIMC_MAX_CTXS; i++) {
+ if (ctrl->out->ctx[i].status == FIMC_STREAMOFF)
+ off_cnt++;
+ }
+
+ if (off_cnt == FIMC_MAX_CTXS) {
+ ctrl->status = FIMC_STREAMOFF;
+ fimc_outdev_init_idxs(ctrl);
+ }
+
+#if (!defined(CONFIG_EXYNOS_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ if (off_cnt == FIMC_MAX_CTXS) {
+ ctrl->status = FIMC_STREAMOFF;
+ fimc_outdev_init_idxs(ctrl);
+ fimc_outdev_stop_camif(ctrl);
+ }
+#endif
+
+ return 0;
+}
+
+int fimc_output_set_dst_addr(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx, int idx)
+{
+ struct fimc_buf_set buf_set; /* destination addr */
+ u32 format = ctx->fbuf.fmt.pixelformat;
+ u32 width = ctx->fbuf.fmt.width;
+ u32 height = ctx->fbuf.fmt.height;
+ u32 y_size = width * height;
+ u32 c_size = y_size >> 2;
+ int i, cfg;
+ u32 rot = ctx->rotate;
+
+ memset(&buf_set, 0x00, sizeof(buf_set));
+
+ if (V4L2_PIX_FMT_NV12T == format)
+ fimc_get_nv12t_size(width, height, &y_size, &c_size);
+
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ if (ctx->overlay.mode == FIMC_OVLY_NONE_SINGLE_BUF)
+ buf_set.base[FIMC_ADDR_Y] =
+ (dma_addr_t)ctx->fbuf.base;
+ else
+ buf_set.base[FIMC_ADDR_Y] =
+ ctx->dst[idx].base[FIMC_ADDR_Y];
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ if (ctx->overlay.mode == FIMC_OVLY_NONE_SINGLE_BUF) {
+ buf_set.base[FIMC_ADDR_Y] =
+ (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] =
+ buf_set.base[FIMC_ADDR_Y] + y_size;
+ buf_set.base[FIMC_ADDR_CR] =
+ buf_set.base[FIMC_ADDR_CB] + c_size;
+ } else {
+ buf_set.base[FIMC_ADDR_Y] =
+ ctx->dst[idx].base[FIMC_ADDR_Y];
+ buf_set.base[FIMC_ADDR_CB] =
+ ctx->dst[idx].base[FIMC_ADDR_CB];
+ buf_set.base[FIMC_ADDR_CR] =
+ ctx->dst[idx].base[FIMC_ADDR_CR];
+ }
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ if (ctx->overlay.mode == FIMC_OVLY_NONE_SINGLE_BUF) {
+ buf_set.base[FIMC_ADDR_Y] =
+ (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] =
+ buf_set.base[FIMC_ADDR_Y] + y_size;
+ } else {
+ buf_set.base[FIMC_ADDR_Y] =
+ ctx->dst[idx].base[FIMC_ADDR_Y];
+ buf_set.base[FIMC_ADDR_CB] =
+ ctx->dst[idx].base[FIMC_ADDR_CB];
+ }
+ break;
+ case V4L2_PIX_FMT_NV12T:
+ if (ctx->overlay.mode == FIMC_OVLY_NONE_SINGLE_BUF) {
+ if (rot == 0 || rot == 180)
+ fimc_get_nv12t_size(width, height, &y_size, &c_size);
+ else
+ fimc_get_nv12t_size(height, width, &y_size, &c_size);
+ buf_set.base[FIMC_ADDR_Y] = (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] = buf_set.base[FIMC_ADDR_Y] + y_size;
+ } else {
+ buf_set.base[FIMC_ADDR_Y] =
+ ctx->dst[idx].base[FIMC_ADDR_Y];
+ buf_set.base[FIMC_ADDR_CB] =
+ ctx->dst[idx].base[FIMC_ADDR_CB];
+ }
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n", \
+ __func__, format);
+ return -EINVAL;
+ }
+
+ cfg = fimc_hwget_output_buf_sequence(ctrl);
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ if (check_bit(cfg, i))
+ fimc_hwset_output_address(ctrl, &buf_set, i);
+ }
+
+ return 0;
+}
+
+static int fimc_outdev_start_operation(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx, int idx)
+{
+ int ret = 0;
+ unsigned long spin_flags;
+
+ spin_lock_irqsave(&ctrl->out->slock, spin_flags);
+ ret = fimc_outdev_start_camif(ctrl);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_start_camif\n");
+ return -EINVAL;
+ }
+
+ ctrl->out->idxs.active.idx = idx;
+ ctrl->out->idxs.active.ctx = ctx->ctx_num;
+
+ ctrl->status = FIMC_STREAMON;
+ ctx->status = FIMC_STREAMON;
+ spin_unlock_irqrestore(&ctrl->out->slock, spin_flags);
+
+ return ret;
+}
+
+static int fimc_qbuf_output_single_buf(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ int idx)
+{
+ struct fimc_buf_set buf_set; /* destination addr */
+ u32 format = ctx->fbuf.fmt.pixelformat;
+ u32 width = ctx->fbuf.fmt.width;
+ u32 height = ctx->fbuf.fmt.height;
+ u32 y_size = width * height;
+ u32 c_size = y_size >> 2;
+ int ret = -1, i, cfg;
+ u32 rot = ctx->rotate;
+
+ fimc_outdev_set_src_addr(ctrl, ctx->src[idx].base);
+
+ memset(&buf_set, 0x00, sizeof(buf_set));
+
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUYV:
+ buf_set.base[FIMC_ADDR_Y] = (dma_addr_t)ctx->fbuf.base;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ buf_set.base[FIMC_ADDR_Y] = (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] = buf_set.base[FIMC_ADDR_Y] + y_size;
+ buf_set.base[FIMC_ADDR_CR] = buf_set.base[FIMC_ADDR_CB] + c_size;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ buf_set.base[FIMC_ADDR_Y] = (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CR] = buf_set.base[FIMC_ADDR_Y] + y_size;
+ buf_set.base[FIMC_ADDR_CB] = buf_set.base[FIMC_ADDR_CR] + c_size;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ buf_set.base[FIMC_ADDR_Y] = (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] = buf_set.base[FIMC_ADDR_Y] + y_size;
+ break;
+ case V4L2_PIX_FMT_NV12M:
+ buf_set.base[FIMC_ADDR_Y] = (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] =
+ ALIGN(buf_set.base[FIMC_ADDR_Y] + y_size, PAGE_SIZE - 1);
+ break;
+ case V4L2_PIX_FMT_NV12T:
+ if (rot == 0 || rot == 180)
+ fimc_get_nv12t_size(width, height, &y_size, &c_size);
+ else
+ fimc_get_nv12t_size(height, width, &y_size, &c_size);
+ buf_set.base[FIMC_ADDR_Y] = (dma_addr_t)ctx->fbuf.base;
+ buf_set.base[FIMC_ADDR_CB] = buf_set.base[FIMC_ADDR_Y] + y_size;
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n", __func__, format);
+ return -EINVAL;
+ }
+ cfg = fimc_hwget_output_buf_sequence(ctrl);
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ if (check_bit(cfg, i))
+ fimc_hwset_output_address(ctrl, &buf_set, i);
+ }
+
+ ret = fimc_outdev_start_operation(ctrl, ctx, idx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_start_operation\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_qbuf_output_multi_buf(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ int idx)
+{
+ struct fimc_buf_set buf_set; /* destination addr */
+ u32 format = ctx->fbuf.fmt.pixelformat;
+ int ret = -1, i, cfg;
+
+ fimc_outdev_set_src_addr(ctrl, ctx->src[idx].base);
+
+ memset(&buf_set, 0x00, sizeof(buf_set));
+
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_YUYV:
+ buf_set.base[FIMC_ADDR_Y] = ctx->dst[idx].base[FIMC_ADDR_Y];
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ buf_set.base[FIMC_ADDR_Y] = ctx->dst[idx].base[FIMC_ADDR_Y];
+ buf_set.base[FIMC_ADDR_CB] = ctx->dst[idx].base[FIMC_ADDR_CB];
+ buf_set.base[FIMC_ADDR_CR] = ctx->dst[idx].base[FIMC_ADDR_CR];
+ break;
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ buf_set.base[FIMC_ADDR_Y] = ctx->dst[idx].base[FIMC_ADDR_Y];
+ buf_set.base[FIMC_ADDR_CB] = ctx->dst[idx].base[FIMC_ADDR_CB];
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n", __func__, format);
+ return -EINVAL;
+ }
+
+ cfg = fimc_hwget_output_buf_sequence(ctrl);
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ if (check_bit(cfg, i))
+ fimc_hwset_output_address(ctrl, &buf_set, i);
+ }
+
+ ret = fimc_outdev_start_operation(ctrl, ctx, idx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_start_operation\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_qbuf_output_dma_auto(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ int idx)
+{
+ struct fb_var_screeninfo var;
+ struct s3cfb_user_window window;
+ struct v4l2_rect fimd_rect, fimd_rect_virtual;
+ struct fimc_buf_set buf_set; /* destination addr */
+ u32 id = ctrl->id;
+ int ret = -1, i, cfg;
+
+ switch (ctx->status) {
+ case FIMC_READY_ON:
+ memset(&fimd_rect, 0, sizeof(struct v4l2_rect));
+ ret = fimc_fimd_rect(ctrl, ctx, &fimd_rect);
+ if (ret < 0) {
+ fimc_err("fimc_fimd_rect fail\n");
+ return -EINVAL;
+ }
+
+ /* Support any size */
+ memcpy(&fimd_rect_virtual, &fimd_rect, sizeof(fimd_rect));
+ fimc_outdev_dma_auto_dst_resize(&fimd_rect_virtual);
+
+ /* Get WIN var_screeninfo */
+ ret = s3cfb_direct_ioctl(id, FBIOGET_VSCREENINFO,
+ (unsigned long)&var);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(FBIOGET_VSCREENINFO) fail\n");
+ return -EINVAL;
+ }
+ /* window path : DMA */
+ ret = s3cfb_direct_ioctl(id, S3CFB_SET_WIN_PATH,
+ DATA_PATH_DMA);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_PATH) fail\n");
+ return -EINVAL;
+ }
+
+ /* Don't allocate the memory. */
+ ret = s3cfb_direct_ioctl(id, S3CFB_SET_WIN_MEM, DMA_MEM_OTHER);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_SET_WIN_MEM) fail\n");
+ return -EINVAL;
+ }
+
+ /* Update WIN size */
+ var.xres_virtual = fimd_rect_virtual.width;
+ var.yres_virtual = fimd_rect_virtual.height;
+ var.xres = fimd_rect.width;
+ var.yres = fimd_rect.height;
+
+ ret = s3cfb_direct_ioctl(id, FBIOPUT_VSCREENINFO,
+ (unsigned long)&var);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(FBIOPUT_VSCREENINFO) fail\n");
+ return -EINVAL;
+ }
+
+ /* Update WIN position */
+ window.x = fimd_rect.left;
+ window.y = fimd_rect.top;
+ ret = s3cfb_direct_ioctl(id, S3CFB_WIN_POSITION,
+ (unsigned long)&window);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_WIN_POSITION) fail\n");
+ return -EINVAL;
+ }
+
+ /* fall through */
+
+ case FIMC_STREAMON_IDLE:
+ fimc_outdev_set_src_addr(ctrl, ctx->src[idx].base);
+
+ memset(&buf_set, 0x00, sizeof(buf_set));
+ buf_set.base[FIMC_ADDR_Y] = ctx->dst[idx].base[FIMC_ADDR_Y];
+ cfg = fimc_hwget_output_buf_sequence(ctrl);
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ if (check_bit(cfg, i))
+ fimc_hwset_output_address(ctrl, &buf_set, i);
+ }
+
+ ret = fimc_outdev_start_operation(ctrl, ctx, idx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_start_operation\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int fimc_qbuf_output_dma_manual(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ int idx)
+{
+ struct fimc_buf_set buf_set; /* destination addr */
+ int ret = -1, i, cfg;
+
+ fimc_outdev_set_src_addr(ctrl, ctx->src[idx].base);
+
+ memset(&buf_set, 0x00, sizeof(buf_set));
+ buf_set.base[FIMC_ADDR_Y] = ctx->dst[idx].base[FIMC_ADDR_Y];
+ cfg = fimc_hwget_output_buf_sequence(ctrl);
+
+ for (i = 0; i < FIMC_PHYBUFS; i++) {
+ if (check_bit(cfg, i))
+ fimc_hwset_output_address(ctrl, &buf_set, i);
+ }
+
+ ctrl->out->idxs.active.idx = idx;
+ ctrl->out->idxs.active.ctx = ctx->ctx_num;
+
+ ctrl->status = FIMC_STREAMON;
+ ctx->status = FIMC_STREAMON;
+
+ ret = fimc_outdev_start_camif(ctrl);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_start_camif\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_qbuf_output_fifo(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ int idx)
+{
+ int ret = -1;
+
+#if defined(CONFIG_VIDEO_IPC)
+ if (ctx->pix.field == V4L2_FIELD_INTERLACED_TB)
+ ipc_start();
+#endif
+
+ fimc_outdev_set_src_addr(ctrl, ctx->src[idx].base);
+
+ ctrl->out->idxs.active.idx = idx;
+ ctrl->out->idxs.active.ctx = ctx->ctx_num;
+
+ ctrl->status = FIMC_STREAMON;
+ ctx->status = FIMC_STREAMON;
+
+ ret = fimc_start_fifo(ctrl, ctx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_start_fifo\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_update_in_queue_addr(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ u32 idx, dma_addr_t *addr)
+{
+ if (idx >= FIMC_OUTBUFS) {
+ fimc_err("%s: Failed (ctx=%d)\n", __func__, ctx->ctx_num);
+ return -EINVAL;
+ }
+
+ ctx->src[idx].base[FIMC_ADDR_Y] = addr[FIMC_ADDR_Y];
+ if (ctx->pix.pixelformat == V4L2_PIX_FMT_YVU420) {
+ ctx->src[idx].base[FIMC_ADDR_CB] = addr[FIMC_ADDR_CR];
+ ctx->src[idx].base[FIMC_ADDR_CR] = addr[FIMC_ADDR_CB];
+ } else {
+ ctx->src[idx].base[FIMC_ADDR_CB] = addr[FIMC_ADDR_CB];
+ ctx->src[idx].base[FIMC_ADDR_CR] = addr[FIMC_ADDR_CR];
+ }
+
+ return 0;
+}
+
+int fimc_qbuf_output(void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_buf *buf = (struct fimc_buf *)b->m.userptr;
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ int idx, ctx_num;
+ int ret = -1;
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ fimc_info2("ctx(%d) queued idx = %d\n", ctx->ctx_num, b->index);
+ if (ctx->status == FIMC_STREAMOFF) {
+ fimc_err("[ctx=%d] %s:: can not queue bause status "
+ "is FIMC_STREAMOFF status)\n",
+ ctx->ctx_num, __func__);
+ return ret;
+ }
+
+ if (b->index >= ctx->buf_num) {
+ fimc_err("[ctx=%d] The index is out of bounds. "
+ "You requested %d buffers. "
+ "But you set the index as %d\n",
+ ctx_id, ctx->buf_num, b->index);
+ return -EINVAL;
+ }
+
+ /* Check the buffer state if the state is VIDEOBUF_IDLE. */
+ if (ctx->src[b->index].state != VIDEOBUF_IDLE) {
+ fimc_err("[ctx=%d] The index(%d) buffer must be "
+ "dequeued state(%d)\n",
+ ctx_id, b->index, ctx->src[b->index].state);
+ return -EINVAL;
+ }
+
+ if ((ctrl->status == FIMC_READY_ON) ||
+ (ctrl->status == FIMC_STREAMON) ||
+ (ctrl->status == FIMC_STREAMON_IDLE)) {
+ if (b->memory == V4L2_MEMORY_USERPTR) {
+ ret = fimc_update_in_queue_addr(ctrl, ctx, b->index, buf->base);
+ if (ret < 0)
+ return ret;
+ }
+
+#if defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)
+ pm_runtime_get_sync(ctrl->dev);
+#endif
+
+ /* Attach the buffer to the incoming queue. */
+ ret = fimc_push_inq(ctrl, ctx, b->index);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_push_inq (ctx=%d ctrl>status=%d "
+ "ctx->status=%d q_idx=%d)\n", ctx_id,
+ ctrl->status, ctx->status, b->index);
+#if defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)
+ pm_runtime_put_sync(ctrl->dev);
+#endif
+ return -EINVAL;
+ }
+ } else
+ fimc_err("[ctx=%d] qbuf[%d]: not call fimc_push_inq: "
+ "ctrl->status=%d ctx->status=%d q_idx=%d\n",
+ ctx_id, __LINE__, ctrl->status, ctx->status,
+ b->index);
+
+ if ((ctrl->status == FIMC_READY_ON) ||
+ (ctrl->status == FIMC_STREAMON_IDLE)) {
+ ret = fimc_pop_inq(ctrl, &ctx_num, &idx);
+ if (ret < 0) {
+ fimc_err("Fail: fimc_pop_inq (ctx=%d ctrl>status=%d "
+ "ctx->status=%d ret=%d)\n",
+ ctx_id, ctrl->status, ctx->status, ret);
+ ret = -EINVAL;
+ goto err_routine;
+ }
+
+ if (ctrl->regs == NULL) {
+ fimc_err("%s:FIMC%d power is off!!! (ctx=%d)\n",
+ __func__, ctrl->id, ctx_id);
+ return -EINVAL;
+ }
+
+ ctx = &ctrl->out->ctx[ctx_num];
+ if (ctx_num != ctrl->out->last_ctx) {
+ ctrl->out->last_ctx = ctx->ctx_num;
+ ret = fimc_outdev_set_ctx_param(ctrl, ctx);
+ if (ret < 0) {
+ ctx->src[b->index].state = VIDEOBUF_IDLE;
+ ctrl->out->last_ctx = -1;
+ fimc_err("Fail: fimc_outdev_set_ctx_param (ctx=%d)\n",
+ ctx_id);
+ ret = -EINVAL;
+ goto err_routine;
+ }
+ }
+
+ switch (ctx->overlay.mode) {
+ case FIMC_OVLY_FIFO:
+ ret = fimc_qbuf_output_fifo(ctrl, ctx, idx);
+ break;
+ case FIMC_OVLY_DMA_AUTO:
+ ret = fimc_qbuf_output_dma_auto(ctrl, ctx, idx);
+ break;
+ case FIMC_OVLY_DMA_MANUAL:
+ ret = fimc_qbuf_output_dma_manual(ctrl, ctx, idx);
+ break;
+ case FIMC_OVLY_NONE_SINGLE_BUF:
+ ret = fimc_qbuf_output_single_buf(ctrl, ctx, idx);
+ break;
+ case FIMC_OVLY_NONE_MULTI_BUF:
+ ret = fimc_qbuf_output_multi_buf(ctrl, ctx, idx);
+ break;
+ default:
+ break;
+ }
+ }
+
+err_routine:
+#if defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)
+ if (ret < 0)
+ pm_runtime_put_sync(ctrl->dev);
+#endif
+ return ret;
+}
+
+void fimc_recover_output(struct fimc_control *ctrl, struct fimc_ctx *ctx)
+{
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ pm_runtime_get_sync(ctrl->dev);
+/* fimc_sfr_dump(ctrl);*/
+ fimc_outdev_stop_camif(ctrl);
+ fimc_hwset_clear_irq(ctrl);
+ pm_runtime_put_sync(ctrl->dev);
+#endif
+
+ if (ctrl->out->idxs.active.ctx == ctx->ctx_num) {
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ }
+
+ ctrl->status = FIMC_STREAMON_IDLE;
+ ctx->status = FIMC_STREAMON_IDLE;
+
+ return;
+}
+
+int fimc_dqbuf_output(void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_ctx *ctx;
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ int idx = -1, ret = -1;
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ ret = fimc_pop_outq(ctrl, ctx, &idx);
+ if (ret < 0) {
+ ret = wait_event_timeout(ctrl->wq, (ctx->outq[0] != -1),
+ FIMC_DQUEUE_TIMEOUT);
+ if (ret == 0) {
+ fimc_dump_context(ctrl, ctx);
+ fimc_recover_output(ctrl, ctx);
+ pm_runtime_put_sync(ctrl->dev);
+ fimc_err("[0] out_queue is empty\n");
+ return -EAGAIN;
+ } else if (ret == -ERESTARTSYS) {
+ fimc_print_signal(ctrl);
+ pm_runtime_put_sync(ctrl->dev);
+ } else {
+ /* Normal case */
+ ret = fimc_pop_outq(ctrl, ctx, &idx);
+ if (ret < 0) {
+ fimc_err("[1] out_queue is empty\n");
+ fimc_dump_context(ctrl, ctx);
+ return -EINVAL;
+ }
+ }
+ }
+
+ b->index = idx;
+
+ fimc_info2("ctx(%d) dqueued idx = %d\n", ctx->ctx_num, b->index);
+
+ return ret;
+}
+
+int fimc_g_fmt_vid_out(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ struct fimc_outinfo *out = ctrl->out;
+ struct fimc_ctx *ctx;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ int i, j;
+ int in_use;
+
+ fimc_info1("%s: called\n", __func__);
+
+ if (!out) {
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
+ if (!out) {
+ fimc_err("%s: no memory for outdev info\n", __func__);
+ return -ENOMEM;
+ }
+ ctrl->out = out;
+
+ /* init: struct fimc_outinfo */
+ out->last_ctx = -1;
+
+ spin_lock_init(&ctrl->out->lock_in);
+ spin_lock_init(&ctrl->out->lock_out);
+ spin_lock_init(&ctrl->out->slock);
+
+ for (i = 0; i < FIMC_INQUEUES; i++) {
+ ctrl->out->inq[i].ctx = -1;
+ ctrl->out->inq[i].idx = -1;
+ }
+
+ for (i = 0; i < FIMC_MAX_CTXS; i++) {
+ ctx = &ctrl->out->ctx[i];
+ ctx->ctx_num = i;
+ ctx->overlay.mode = FIMC_OVLY_NOT_FIXED;
+ ctx->status = FIMC_STREAMOFF;
+
+ for (j = 0; j < FIMC_OUTBUFS; j++) {
+ ctx->inq[j] = -1;
+ ctx->outq[j] = -1;
+ }
+ }
+
+ ctrl->out->idxs.prev.ctx = -1;
+ ctrl->out->idxs.prev.idx = -1;
+ ctrl->out->idxs.active.ctx = -1;
+ ctrl->out->idxs.active.idx = -1;
+ ctrl->out->idxs.next.ctx = -1;
+ ctrl->out->idxs.next.idx = -1;
+
+ in_use = atomic_read(&ctrl->in_use);
+ for (i = 0; i < in_use; i++)
+ ctrl->out->ctx_used[i] = true;
+ for (i = in_use; i < FIMC_MAX_CTXS; i++)
+ ctrl->out->ctx_used[i] = false;
+ }
+
+ f->fmt.pix = ctrl->out->ctx[ctx_id].pix;
+
+ return 0;
+}
+
+int fimc_try_fmt_vid_out(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ struct fimc_ctx *ctx;
+ u32 format = f->fmt.pix.pixelformat;
+
+ fimc_info1("%s: called. width(%d), height(%d)\n", __func__,
+ f->fmt.pix.width, f->fmt.pix.height);
+
+ ctx = &ctrl->out->ctx[ctx_id];
+ if (ctx->status != FIMC_STREAMOFF) {
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ /* Check pixel format */
+ switch (format) {
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV12T: /* fall through */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_RGB32: /* fall through */
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ break;
+ default:
+ fimc_warn("Supported format : V4L2_PIX_FMT_YUYV, "
+ "V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_NV12T, "
+ "V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_RGB565\n");
+ fimc_warn("Changed format : V4L2_PIX_FMT_RGB32\n");
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_RGB32;
+ return -EINVAL;
+ }
+
+ /* Fill the return value. */
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ f->fmt.pix.bytesperline = f->fmt.pix.width << 2;
+ break;
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ case V4L2_PIX_FMT_RGB565:
+ f->fmt.pix.bytesperline = f->fmt.pix.width << 1;
+ break;
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * 3) >> 1;
+ break;
+
+ default:
+ /* dummy value*/
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ }
+
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+ ctx->crop.left = 0;
+ ctx->crop.top = 0;
+ ctx->crop.width = f->fmt.pix.width;
+ ctx->crop.height = f->fmt.pix.height;
+
+ return 0;
+}
+
+int fimc_s_fmt_vid_out(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ struct fimc_ctx *ctx;
+ int ret = -1;
+
+ fimc_info1("%s: called\n", __func__);
+
+ /* Check stream status */
+ ctx = &ctrl->out->ctx[ctx_id];
+ if (ctx->status != FIMC_STREAMOFF) {
+ fimc_dump_context(ctrl, ctx);
+ fimc_err("%s: FIMC is running\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = fimc_try_fmt_vid_out(filp, fh, f);
+ if (ret < 0)
+ return ret;
+
+ ctx->pix = f->fmt.pix;
+
+ return ret;
+}
+
+int fimc_init_in_queue(struct fimc_control *ctrl, struct fimc_ctx *ctx)
+{
+ struct fimc_idx swap_queue[FIMC_INQUEUES];
+ int swap_cnt = 0, i;
+ unsigned long spin_flags;
+
+ spin_lock_irqsave(&ctrl->out->lock_in, spin_flags);
+
+ /* init incoming queue */
+ for (i = 0; i < FIMC_OUTBUFS; i++)
+ ctx->inq[i] = -1;
+
+ /* init common incoming queue */
+ for (i = 0; i < FIMC_INQUEUES; i++) {
+ if (ctrl->out->inq[i].ctx != ctx->ctx_num) {
+ swap_queue[swap_cnt].ctx = ctrl->out->inq[i].ctx;
+ swap_queue[swap_cnt].idx = ctrl->out->inq[i].idx;
+ swap_cnt++;
+ }
+
+ ctrl->out->inq[i].ctx = -1;
+ ctrl->out->inq[i].idx = -1;
+ }
+
+ /* restore common incoming queue */
+ for (i = 0; i < swap_cnt; i++) {
+ ctrl->out->inq[i].ctx = swap_queue[i].ctx;
+ ctrl->out->inq[i].idx = swap_queue[i].idx;
+ }
+
+ spin_unlock_irqrestore(&ctrl->out->lock_in, spin_flags);
+
+ return 0;
+}
+
+int fimc_init_out_queue(struct fimc_control *ctrl, struct fimc_ctx *ctx)
+{
+ unsigned long spin_flags;
+ int i;
+
+ spin_lock_irqsave(&ctrl->out->lock_out, spin_flags);
+
+ /* Init incoming queue */
+ for (i = 0; i < FIMC_OUTBUFS; i++)
+ ctx->outq[i] = -1;
+
+ spin_unlock_irqrestore(&ctrl->out->lock_out, spin_flags);
+
+ return 0;
+}
+
+int fimc_push_inq(struct fimc_control *ctrl, struct fimc_ctx *ctx, int idx)
+{
+ struct fimc_idx swap_common_inq[FIMC_INQUEUES];
+ int swap_queue[FIMC_OUTBUFS];
+ int i;
+ unsigned long spin_flags;
+
+ fimc_dbg("%s: idx = %d\n", __func__, idx);
+
+ if (ctrl->out->inq[FIMC_INQUEUES-1].idx != -1) {
+ fimc_err("FULL: common incoming queue(%d)\n",
+ ctrl->out->inq[FIMC_INQUEUES-1].idx);
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&ctrl->out->lock_in, spin_flags);
+
+ /* ctx own incoming queue */
+ /* Backup original queue */
+ for (i = 0; i < FIMC_OUTBUFS; i++)
+ swap_queue[i] = ctx->inq[i];
+
+ /* Attach new idx */
+ ctx->inq[0] = idx;
+ ctx->src[idx].state = VIDEOBUF_QUEUED;
+ ctx->src[idx].flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED;
+
+ /* Shift the origonal queue */
+ for (i = 1; i < FIMC_OUTBUFS; i++)
+ ctx->inq[i] = swap_queue[i-1];
+
+ /* Common incoming queue */
+ /* Backup original queue */
+ for (i = 0; i < FIMC_INQUEUES; i++) {
+ swap_common_inq[i].ctx = ctrl->out->inq[i].ctx;
+ swap_common_inq[i].idx = ctrl->out->inq[i].idx;
+ }
+
+ /* Attach new idx */
+ ctrl->out->inq[0].ctx = ctx->ctx_num;
+ ctrl->out->inq[0].idx = idx;
+
+ /* Shift the origonal queue */
+ for (i = 1; i < FIMC_INQUEUES; i++) {
+ ctrl->out->inq[i].ctx = swap_common_inq[i-1].ctx;
+ ctrl->out->inq[i].idx = swap_common_inq[i-1].idx;
+ }
+
+ spin_unlock_irqrestore(&ctrl->out->lock_in, spin_flags);
+
+ return 0;
+}
+
+int fimc_pop_inq(struct fimc_control *ctrl, int *ctx_num, int *idx)
+{
+ struct fimc_ctx *ctx;
+ unsigned long spin_flags;
+ int i, ret = 0;
+ int ctx_idx = -1;
+
+ spin_lock_irqsave(&ctrl->out->lock_in, spin_flags);
+
+ /* find valid index from common incoming queue */
+ for (i = (FIMC_INQUEUES-1); i >= 0; i--) {
+ if (ctrl->out->inq[i].ctx != -1) {
+ *ctx_num = ctrl->out->inq[i].ctx;
+ *idx = ctrl->out->inq[i].idx;
+ ctrl->out->inq[i].ctx = -1;
+ ctrl->out->inq[i].idx = -1;
+ break;
+ }
+ }
+
+ /* common incoming queue is empty. */
+ if (i < 0) {
+ spin_unlock_irqrestore(&ctrl->out->lock_in, spin_flags);
+ return -EINVAL;
+ }
+
+ /* find valid index from incoming queue. */
+ ctx = &ctrl->out->ctx[*ctx_num];
+ for (i = (FIMC_OUTBUFS-1); i >= 0; i--) {
+ if (ctx->inq[i] != -1) {
+ ctx_idx = ctx->inq[i];
+ ctx->inq[i] = -1;
+ ctx->src[ctx_idx].state = VIDEOBUF_ACTIVE;
+ ctx->src[ctx_idx].flags = V4L2_BUF_FLAG_MAPPED;
+ break;
+ }
+ }
+
+ if (*idx != ctx_idx)
+ fimc_err("common inq(%d) vs inq(%d) mismatch\n", *idx, ctx_idx);
+
+ /* incoming queue is empty. */
+ if (i < 0)
+ ret = -EINVAL;
+ else
+ fimc_dbg("%s: index = %d\n", __func__, *idx);
+
+ spin_unlock_irqrestore(&ctrl->out->lock_in, spin_flags);
+
+ return ret;
+}
+
+int fimc_push_outq(struct fimc_control *ctrl, struct fimc_ctx *ctx, int idx)
+{
+ unsigned long spin_flags;
+ int swap_queue[FIMC_OUTBUFS];
+ int i;
+
+ fimc_dbg("%s: index = %d\n", __func__, idx);
+
+ spin_lock_irqsave(&ctrl->out->lock_out, spin_flags);
+
+ /* Backup original queue */
+ for (i = 0; i < FIMC_OUTBUFS; i++)
+ swap_queue[i] = ctx->outq[i];
+
+ /* Attach new index */
+ ctx->outq[0] = idx;
+ ctx->src[idx].state = VIDEOBUF_DONE;
+ ctx->src[idx].flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE;
+
+ /* Shift the origonal queue */
+ for (i = 1; i < FIMC_OUTBUFS; i++)
+ ctx->outq[i] = swap_queue[i-1];
+
+ spin_unlock_irqrestore(&ctrl->out->lock_out, spin_flags);
+
+ return 0;
+}
+
+int fimc_pop_outq(struct fimc_control *ctrl, struct fimc_ctx *ctx, int *idx)
+{
+ unsigned long spin_flags;
+ int i, ret = 0;
+
+ spin_lock_irqsave(&ctrl->out->lock_out, spin_flags);
+
+ /* Find last valid idx in outgoing queue. */
+ for (i = (FIMC_OUTBUFS-1); i >= 0; i--) {
+ if (ctx->outq[i] != -1) {
+ *idx = ctx->outq[i];
+ ctx->outq[i] = -1;
+ ctx->src[*idx].state = VIDEOBUF_IDLE;
+ ctx->src[*idx].flags = V4L2_BUF_FLAG_MAPPED;
+ break;
+ }
+ }
+
+ /* outgoing queue is empty. */
+ if (i < 0) {
+ ret = -EINVAL;
+ fimc_dbg("%s: outgoing queue : %d, %d, %d\n", __func__,
+ ctx->outq[0], ctx->outq[1], ctx->outq[2]);
+ } else
+ fimc_dbg("%s: idx = %d\n", __func__, *idx);
+
+
+ spin_unlock_irqrestore(&ctrl->out->lock_out, spin_flags);
+
+ return ret;
+}
+
+void fimc_dump_context(struct fimc_control *ctrl, struct fimc_ctx *ctx)
+{
+ int i = 0;
+
+ fimc_err("ctx%d, ctrl->status: %d, ctx->status: %d\n",
+ ctx->ctx_num, ctrl->status, ctx->status);
+
+ for (i = 0; i < FIMC_INQUEUES; i++)
+ fimc_err("ctrl->inq[%d]: ctx(%d) idx(%d)\n",
+ i, ctrl->out->inq[i].ctx, ctrl->out->inq[i].idx);
+
+ for (i = 0; i < FIMC_OUTBUFS; i++)
+ fimc_err("inq[%d] = %d\n", i, ctx->inq[i]);
+
+ for (i = 0; i < FIMC_OUTBUFS; i++)
+ fimc_err("outq[%d] = %d\n", i, ctx->outq[i]);
+
+ fimc_err("state : prev.ctx(%d), prev.idx(%d) "
+ "active.ctx(%d), active.idx(%d) "
+ "next.ctx(%d), next.idx(%d)\n",
+ ctrl->out->idxs.prev.ctx, ctrl->out->idxs.prev.idx,
+ ctrl->out->idxs.active.ctx, ctrl->out->idxs.active.idx,
+ ctrl->out->idxs.next.ctx, ctrl->out->idxs.next.idx);
+}
+
+void fimc_print_signal(struct fimc_control *ctrl)
+{
+ if (signal_pending(current)) {
+ fimc_dbg(".pend=%.8lx shpend=%.8lx\n",
+ current->pending.signal.sig[0],
+ current->signal->shared_pending.signal.sig[0]);
+ } else {
+ fimc_dbg(":pend=%.8lx shpend=%.8lx\n",
+ current->pending.signal.sig[0],
+ current->signal->shared_pending.signal.sig[0]);
+ }
+}
diff --git a/drivers/media/video/samsung/fimc/fimc_overlay.c b/drivers/media/video/samsung/fimc/fimc_overlay.c
new file mode 100644
index 0000000..743decc
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc_overlay.c
@@ -0,0 +1,287 @@
+/* linux/drivers/media/video/samsung/fimc/fimc_overlay.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * V4L2 Overlay device support file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <plat/media.h>
+
+#include "fimc.h"
+
+int fimc_try_fmt_overlay(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ struct fimc_ctx *ctx;
+
+ u32 is_rotate = 0;
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ fimc_info1("%s: top(%d) left(%d) width(%d) height(%d)\n", __func__,
+ f->fmt.win.w.top, f->fmt.win.w.left,
+ f->fmt.win.w.width, f->fmt.win.w.height);
+
+ if (ctx->overlay.mode == FIMC_OVLY_NONE_SINGLE_BUF ||
+ (ctx->overlay.mode == FIMC_OVLY_NONE_MULTI_BUF))
+ return 0;
+
+ /* Check Overlay Size : Overlay size must be smaller than LCD size. */
+ is_rotate = fimc_mapping_rot_flip(ctx->rotate, ctx->flip);
+ if (is_rotate & FIMC_ROT) { /* Landscape mode */
+ if (f->fmt.win.w.width > ctrl->fb.lcd_vres) {
+ fimc_warn("The width is changed %d -> %d\n",
+ f->fmt.win.w.width, ctrl->fb.lcd_vres);
+ f->fmt.win.w.width = ctrl->fb.lcd_vres;
+ }
+
+ if (f->fmt.win.w.height > ctrl->fb.lcd_hres) {
+ fimc_warn("The height is changed %d -> %d\n",
+ f->fmt.win.w.height, ctrl->fb.lcd_hres);
+ f->fmt.win.w.height = ctrl->fb.lcd_hres;
+ }
+ } else { /* Portrait mode */
+ if (f->fmt.win.w.width > ctrl->fb.lcd_hres) {
+ fimc_warn("The width is changed %d -> %d\n",
+ f->fmt.win.w.width, ctrl->fb.lcd_hres);
+ f->fmt.win.w.width = ctrl->fb.lcd_hres;
+ }
+
+ if (f->fmt.win.w.height > ctrl->fb.lcd_vres) {
+ fimc_warn("The height is changed %d -> %d\n",
+ f->fmt.win.w.height, ctrl->fb.lcd_vres);
+ f->fmt.win.w.height = ctrl->fb.lcd_vres;
+ }
+ }
+
+ return 0;
+}
+
+int fimc_g_fmt_vid_overlay(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ struct fimc_ctx *ctx;
+
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ fimc_info1("%s: called\n", __func__);
+
+ f->fmt.win = ctx->win;
+
+ return 0;
+}
+
+static int fimc_check_pos(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx,
+ struct v4l2_format *f)
+{
+ if (ctx->win.w.width != f->fmt.win.w.width) {
+ fimc_err("%s: cannot change width(%d,%d)\n", __func__,
+ ctx->win.w.width, f->fmt.win.w.width);
+ return -EINVAL;
+ } else if (ctx->win.w.height != f->fmt.win.w.height) {
+ fimc_err("%s: cannot change height(%d,%d)\n", __func__,
+ ctx->win.w.height, f->fmt.win.w.height);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int fimc_change_fifo_position(struct fimc_control *ctrl,
+ struct fimc_ctx *ctx) {
+ struct v4l2_rect fimd_rect;
+ struct s3cfb_user_window window;
+ int ret = -1;
+
+ memset(&fimd_rect, 0, sizeof(struct v4l2_rect));
+
+ ret = fimc_fimd_rect(ctrl, ctx, &fimd_rect);
+ if (ret < 0) {
+ fimc_err("fimc_fimd_rect fail\n");
+ return -EINVAL;
+ }
+
+ /* Update WIN position */
+ window.x = fimd_rect.left;
+ window.y = fimd_rect.top;
+ ret = s3cfb_direct_ioctl(ctrl->id, S3CFB_WIN_POSITION,
+ (unsigned long)&window);
+ if (ret < 0) {
+ fimc_err("direct_ioctl(S3CFB_WIN_POSITION) fail\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int fimc_s_fmt_vid_overlay(struct file *filp, void *fh, struct v4l2_format *f)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ struct fimc_ctx *ctx;
+ int ret = -1;
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ fimc_info1("%s: called\n", __func__);
+
+ switch (ctx->status) {
+ case FIMC_STREAMON:
+ ret = fimc_check_pos(ctrl, ctx, f);
+ if (ret < 0) {
+ fimc_err("When FIMC is running, "
+ "you can only move the position.\n");
+ return -EBUSY;
+ }
+
+ ret = fimc_try_fmt_overlay(filp, fh, f);
+ if (ret < 0)
+ return ret;
+
+ ctx->win = f->fmt.win;
+ fimc_change_fifo_position(ctrl, ctx);
+
+ break;
+ case FIMC_STREAMOFF:
+ ret = fimc_try_fmt_overlay(filp, fh, f);
+ if (ret < 0)
+ return ret;
+ ctx->win = f->fmt.win;
+
+ break;
+
+ default:
+ fimc_err("FIMC is running\n");
+ fimc_err("%s::FIMC is running(%d)\n", __func__, ctx->status);
+ return -EBUSY;
+ }
+
+ return ret;
+}
+
+int fimc_g_fbuf(struct file *filp, void *fh, struct v4l2_framebuffer *fb)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ struct fimc_ctx *ctx;
+ u32 bpp = 1, format;
+
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ fimc_info1("%s: called\n", __func__);
+
+ fb->capability = ctx->fbuf.capability;
+ fb->flags = 0;
+ fb->base = ctx->fbuf.base;
+
+ fb->fmt.width = ctx->fbuf.fmt.width;
+ fb->fmt.height = ctx->fbuf.fmt.height;
+ fb->fmt.pixelformat = ctx->fbuf.fmt.pixelformat;
+ format = ctx->fbuf.fmt.pixelformat;
+
+ switch (format) {
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ case V4L2_PIX_FMT_NV12:
+ bpp = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ bpp = 2;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ bpp = 4;
+ break;
+ }
+
+ ctx->fbuf.fmt.bytesperline = fb->fmt.width * bpp;
+ fb->fmt.bytesperline = ctx->fbuf.fmt.bytesperline;
+ fb->fmt.sizeimage = ctx->fbuf.fmt.sizeimage;
+ fb->fmt.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ fb->fmt.priv = 0;
+
+ return 0;
+}
+
+int fimc_s_fbuf(struct file *filp, void *fh, struct v4l2_framebuffer *fb)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
+ struct fimc_ctx *ctx;
+ u32 bpp = 1;
+ u32 format = fb->fmt.pixelformat;
+ ctx = &ctrl->out->ctx[ctx_id];
+
+ fimc_info1("%s: called. width(%d), height(%d)\n",
+ __func__, fb->fmt.width, fb->fmt.height);
+
+ ctx->fbuf.capability = V4L2_FBUF_CAP_EXTERNOVERLAY;
+ ctx->fbuf.flags = 0;
+ ctx->fbuf.base = fb->base;
+
+ if (ctx->overlay.mode == FIMC_OVLY_NONE_MULTI_BUF) {
+ ctx->fbuf.fmt.width = fb->fmt.width;
+ ctx->fbuf.fmt.height = fb->fmt.height;
+ ctx->fbuf.fmt.pixelformat = fb->fmt.pixelformat;
+
+ switch (format) {
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_NV12:
+ bpp = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ bpp = 2;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ bpp = 4;
+ break;
+ }
+
+ ctx->fbuf.fmt.bytesperline = fb->fmt.width * bpp;
+ ctx->fbuf.fmt.sizeimage = fb->fmt.sizeimage;
+ ctx->fbuf.fmt.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ ctx->fbuf.fmt.priv = 0;
+ } else if (fb->base) {
+ ctx->fbuf.fmt.width = fb->fmt.width;
+ ctx->fbuf.fmt.height = fb->fmt.height;
+ ctx->fbuf.fmt.pixelformat = fb->fmt.pixelformat;
+
+ switch (format) {
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ case V4L2_PIX_FMT_NV12:
+ bpp = 1;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ bpp = 2;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ bpp = 4;
+ break;
+ }
+
+ ctx->fbuf.fmt.bytesperline = fb->fmt.width * bpp;
+ ctx->fbuf.fmt.sizeimage = fb->fmt.sizeimage;
+ ctx->fbuf.fmt.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ ctx->fbuf.fmt.priv = 0;
+
+ ctx->overlay.mode = FIMC_OVLY_NONE_SINGLE_BUF;
+ } else {
+ ctx->overlay.mode = FIMC_OVLY_NOT_FIXED;
+ }
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/fimc/fimc_regs.c b/drivers/media/video/samsung/fimc/fimc_regs.c
new file mode 100644
index 0000000..332e5db
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc_regs.c
@@ -0,0 +1,2119 @@
+/* linux/drivers/media/video/samsung/fimc/fimc_regs.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register interface file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/videodev2.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/videodev2_exynos_camera.h>
+#include <linux/io.h>
+#include <mach/map.h>
+#include <plat/regs-fimc.h>
+#include <plat/fimc.h>
+
+#include "fimc.h"
+
+/* struct fimc_limit: Limits for FIMC */
+struct fimc_limit fimc40_limits[FIMC_DEVICES] = {
+ {
+ .pre_dst_w = 3264,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 3264,
+ .trg_h_rot = 1280,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1280,
+ }, {
+ .pre_dst_w = 1280,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 1280,
+ .trg_h_rot = 8192,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 768,
+ }, {
+ .pre_dst_w = 1440,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 1440,
+ .trg_h_rot = 0,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 0,
+ },
+};
+
+struct fimc_limit fimc43_limits[FIMC_DEVICES] = {
+ {
+ .pre_dst_w = 4224,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 4224,
+ .trg_h_rot = 1920,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1920,
+ }, {
+ .pre_dst_w = 4224,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 4224,
+ .trg_h_rot = 1920,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1920,
+ }, {
+ .pre_dst_w = 1920,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 1920,
+ .trg_h_rot = 1280,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1280,
+ },
+};
+
+struct fimc_limit fimc50_limits[FIMC_DEVICES] = {
+ {
+ .pre_dst_w = 4224,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 4224,
+ .trg_h_rot = 1920,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1920,
+ }, {
+ .pre_dst_w = 4224,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 4224,
+ .trg_h_rot = 1920,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1920,
+ }, {
+ .pre_dst_w = 1920,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 1920,
+ .trg_h_rot = 1280,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1280,
+ },
+};
+
+struct fimc_limit fimc51_limits[FIMC_DEVICES] = {
+ {
+ .pre_dst_w = 4224,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 4224,
+ .trg_h_rot = 1920,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1920,
+ }, {
+ .pre_dst_w = 4224,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 4224,
+ .trg_h_rot = 1920,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1920,
+ }, {
+ .pre_dst_w = 4224,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 4224,
+ .trg_h_rot = 1920,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1920,
+ }, {
+
+ .pre_dst_w = 1920,
+ .bypass_w = 8192,
+ .trg_h_no_rot = 1920,
+ .trg_h_rot = 1280,
+ .real_w_no_rot = 8192,
+ .real_h_rot = 1280,
+ },
+};
+
+int fimc_hwset_camera_source(struct fimc_control *ctrl)
+{
+ struct s3c_platform_camera *cam = ctrl->cam;
+ u32 cfg = 0;
+
+ /* for now, we support only ITU601 8 bit mode */
+ cfg |= S3C_CISRCFMT_ITU601_8BIT;
+ cfg |= cam->order422;
+
+ if (cam->type == CAM_TYPE_ITU)
+ cfg |= cam->fmt;
+
+ if (ctrl->is.sd) {
+ cfg |= S3C_CISRCFMT_SOURCEHSIZE(ctrl->is.fmt.width);
+ cfg |= S3C_CISRCFMT_SOURCEVSIZE(ctrl->is.fmt.height);
+ } else {
+ cfg |= S3C_CISRCFMT_SOURCEHSIZE(cam->width);
+ cfg |= S3C_CISRCFMT_SOURCEVSIZE(cam->height);
+ }
+
+ writel(cfg, ctrl->regs + S3C_CISRCFMT);
+
+ return 0;
+}
+
+int fimc_hwset_camera_change_source(struct fimc_control *ctrl)
+{
+ struct s3c_platform_camera *cam = ctrl->cam;
+ u32 cfg = 0;
+
+ /* for now, we support only ITU601 8 bit mode */
+ cfg |= S3C_CISRCFMT_ITU601_8BIT;
+ cfg |= cam->order422;
+
+ if (cam->type == CAM_TYPE_ITU)
+ cfg |= cam->fmt;
+
+ if (ctrl->is.sd) {
+ cfg |= S3C_CISRCFMT_SOURCEHSIZE(ctrl->is.zoom_in_width);
+ cfg |= S3C_CISRCFMT_SOURCEVSIZE(ctrl->is.zoom_in_height);
+ } else {
+ cfg |= S3C_CISRCFMT_SOURCEHSIZE(cam->width);
+ cfg |= S3C_CISRCFMT_SOURCEVSIZE(cam->height);
+ }
+
+ writel(cfg, ctrl->regs + S3C_CISRCFMT);
+
+ return 0;
+}
+
+int fimc_hwset_enable_irq(struct fimc_control *ctrl, int overflow, int level)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIGCTRL);
+
+ cfg &= ~(S3C_CIGCTRL_IRQ_OVFEN | S3C_CIGCTRL_IRQ_LEVEL);
+ cfg |= S3C_CIGCTRL_IRQ_ENABLE;
+
+ if (overflow)
+ cfg |= S3C_CIGCTRL_IRQ_OVFEN;
+
+ if (level)
+ cfg |= S3C_CIGCTRL_IRQ_LEVEL;
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_disable_irq(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIGCTRL);
+
+ cfg &= ~(S3C_CIGCTRL_IRQ_OVFEN | S3C_CIGCTRL_IRQ_ENABLE);
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_clear_irq(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIGCTRL);
+
+ cfg |= S3C_CIGCTRL_IRQ_CLR;
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_output_area_size(struct fimc_control *ctrl, u32 size)
+{
+ u32 cfg = 0;
+
+ cfg = S3C_CITAREA_TARGET_AREA(size);
+
+ writel(cfg, ctrl->regs + S3C_CITAREA);
+
+ return 0;
+}
+
+
+int fimc_hwset_image_effect(struct fimc_control *ctrl)
+{
+ u32 cfg = 0;
+
+ if (ctrl->fe.ie_on) {
+ if (ctrl->fe.ie_after_sc)
+ cfg |= S3C_CIIMGEFF_IE_SC_AFTER;
+
+ cfg |= S3C_CIIMGEFF_FIN(ctrl->fe.fin);
+
+ if (ctrl->fe.fin == FIMC_EFFECT_FIN_ARBITRARY_CBCR) {
+ cfg |= S3C_CIIMGEFF_PAT_CB(ctrl->fe.pat_cb)
+ | S3C_CIIMGEFF_PAT_CR(ctrl->fe.pat_cr);
+ }
+
+ cfg |= S3C_CIIMGEFF_IE_ENABLE;
+ }
+
+ writel(cfg, ctrl->regs + S3C_CIIMGEFF);
+
+ return 0;
+}
+
+static void fimc_reset_cfg(struct fimc_control *ctrl)
+{
+ int i;
+ u32 cfg[][2] = {
+ { 0x018, 0x00000000 }, { 0x01c, 0x00000000 },
+ { 0x020, 0x00000000 }, { 0x024, 0x00000000 },
+ { 0x028, 0x00000000 }, { 0x02c, 0x00000000 },
+ { 0x030, 0x00000000 }, { 0x034, 0x00000000 },
+ { 0x038, 0x00000000 }, { 0x03c, 0x00000000 },
+ { 0x040, 0x00000000 }, { 0x044, 0x00000000 },
+ { 0x048, 0x00000000 }, { 0x04c, 0x00000000 },
+ { 0x050, 0x00000000 }, { 0x054, 0x00000000 },
+ { 0x058, 0x18000000 }, { 0x05c, 0x00000000 },
+ { 0x064, 0x00000000 },
+ { 0x0c0, 0x00000000 }, { 0x0c4, 0xffffffff },
+ { 0x0d0, 0x00100080 }, { 0x0d4, 0x00000000 },
+ { 0x0d8, 0x00000000 }, { 0x0dc, 0x00000000 },
+ { 0x0f8, 0x00000000 }, { 0x0fc, 0x04000000 },
+ { 0x168, 0x00000000 }, { 0x16c, 0x00000000 },
+ { 0x170, 0x00000000 }, { 0x174, 0x00000000 },
+ { 0x178, 0x00000000 }, { 0x17c, 0x00000000 },
+ { 0x180, 0x00000000 }, { 0x184, 0x00000000 },
+ { 0x188, 0x00000000 }, { 0x18c, 0x00000000 },
+ { 0x194, 0x0000001e },
+ };
+
+ for (i = 0; i < sizeof(cfg) / 8; i++)
+ writel(cfg[i][1], ctrl->regs + cfg[i][0]);
+}
+
+int fimc_hwset_reset(struct fimc_control *ctrl)
+{
+ u32 cfg = 0;
+
+ cfg = readl(ctrl->regs + S3C_CISRCFMT);
+ cfg |= S3C_CISRCFMT_ITU601_8BIT;
+ writel(cfg, ctrl->regs + S3C_CISRCFMT);
+
+ /* s/w reset */
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg |= (S3C_CIGCTRL_SWRST);
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+ mdelay(1);
+
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg &= ~S3C_CIGCTRL_SWRST;
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ /* in case of ITU656, CISRCFMT[31] should be 0 */
+ if ((ctrl->cap != NULL) && (ctrl->cam != NULL)) {
+ if (ctrl->cam->fmt == ITU_656_YCBCR422_8BIT) {
+ cfg = readl(ctrl->regs + S3C_CISRCFMT);
+ cfg &= ~S3C_CISRCFMT_ITU601_8BIT;
+ writel(cfg, ctrl->regs + S3C_CISRCFMT);
+ }
+ }
+
+ fimc_reset_cfg(ctrl);
+
+ return 0;
+}
+
+int fimc_hwset_sw_reset(struct fimc_control *ctrl)
+{
+ u32 cfg = 0;
+ u32 status;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ cfg = readl(ctrl->regs + S3C_CISTATUS);
+ status = S3C_CISTATUS_GET_ENVID_STATUS(cfg);
+ if(status == 0)
+ break;
+ udelay(100);
+ }
+
+ if (i == 10)
+ fimc_err("%s: SWRST is called while Input DMA RUNNING\n", __func__);
+
+ cfg = readl(ctrl->regs + S3C_CISRCFMT);
+ cfg |= S3C_CISRCFMT_ITU601_8BIT;
+ writel(cfg, ctrl->regs + S3C_CISRCFMT);
+
+ /* s/w reset */
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg |= (S3C_CIGCTRL_SWRST);
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg &= ~S3C_CIGCTRL_SWRST;
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_clksrc(struct fimc_control *ctrl, int src_clk)
+{
+ u32 cfg = readl(ctrl->regs + S3C_MISC_FIMC);
+ cfg &= ~S3C_CLKSRC_HCLK_MASK;
+
+ if (src_clk == FIMC_HCLK)
+ cfg |= S3C_CLKSRC_HCLK;
+ else if (src_clk == FIMC_SCLK)
+ cfg |= S3C_CLKSRC_SCLK;
+
+ writel(cfg, ctrl->regs + S3C_MISC_FIMC);
+ return 0;
+}
+
+int fimc_hwget_overflow_state(struct fimc_control *ctrl)
+{
+ u32 cfg, status, flag;
+
+ status = readl(ctrl->regs + S3C_CISTATUS);
+ flag = S3C_CISTATUS_OVFIY | S3C_CISTATUS_OVFICB | S3C_CISTATUS_OVFICR;
+
+ if (status & flag) {
+ cfg = readl(ctrl->regs + S3C_CIWDOFST);
+ cfg |= (S3C_CIWDOFST_CLROVFIY | S3C_CIWDOFST_CLROVFICB |
+ S3C_CIWDOFST_CLROVFICR);
+ writel(cfg, ctrl->regs + S3C_CIWDOFST);
+
+ cfg = readl(ctrl->regs + S3C_CIWDOFST);
+ cfg &= ~(S3C_CIWDOFST_CLROVFIY | S3C_CIWDOFST_CLROVFICB |
+ S3C_CIWDOFST_CLROVFICR);
+ writel(cfg, ctrl->regs + S3C_CIWDOFST);
+
+ printk(KERN_INFO "FIMC%d overflow is occured status 0x%x\n",
+ ctrl->id, status);
+ return 1;
+ }
+
+ return 0;
+}
+
+int fimc_hwset_camera_offset(struct fimc_control *ctrl)
+{
+ struct s3c_platform_camera *cam = ctrl->cam;
+ struct v4l2_rect *rect = &cam->window;
+ u32 cfg, h1, h2, v1, v2;
+
+ if (!cam) {
+ fimc_err("%s: no active camera\n", __func__);
+ return -ENODEV;
+ }
+
+ h1 = rect->left;
+ h2 = cam->width - rect->width - rect->left;
+ v1 = rect->top;
+ v2 = cam->height - rect->height - rect->top;
+
+ cfg = readl(ctrl->regs + S3C_CIWDOFST);
+ cfg &= ~(S3C_CIWDOFST_WINHOROFST_MASK | S3C_CIWDOFST_WINVEROFST_MASK);
+ cfg |= S3C_CIWDOFST_WINHOROFST(h1);
+ cfg |= S3C_CIWDOFST_WINVEROFST(v1);
+ cfg |= S3C_CIWDOFST_WINOFSEN;
+ writel(cfg, ctrl->regs + S3C_CIWDOFST);
+
+ cfg = 0;
+ cfg |= S3C_CIWDOFST2_WINHOROFST2(h2);
+ cfg |= S3C_CIWDOFST2_WINVEROFST2(v2);
+ writel(cfg, ctrl->regs + S3C_CIWDOFST2);
+
+ return 0;
+}
+
+int fimc_hwset_camera_polarity(struct fimc_control *ctrl)
+{
+ struct s3c_platform_camera *cam = ctrl->cam;
+ u32 cfg;
+
+ if (!cam) {
+ fimc_err("%s: no active camera\n", __func__);
+ return -ENODEV;
+ }
+
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+
+ cfg &= ~(S3C_CIGCTRL_INVPOLPCLK | S3C_CIGCTRL_INVPOLVSYNC |
+ S3C_CIGCTRL_INVPOLHREF | S3C_CIGCTRL_INVPOLHSYNC);
+
+ if (cam->inv_pclk)
+ cfg |= S3C_CIGCTRL_INVPOLPCLK;
+
+ if (cam->inv_vsync)
+ cfg |= S3C_CIGCTRL_INVPOLVSYNC;
+
+ if (cam->inv_href)
+ cfg |= S3C_CIGCTRL_INVPOLHREF;
+
+ if (cam->inv_hsync)
+ cfg |= S3C_CIGCTRL_INVPOLHSYNC;
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+int fimc40_hwset_camera_type(struct fimc_control *ctrl)
+{
+ struct s3c_platform_camera *cam = ctrl->cam;
+ u32 cfg;
+
+ if (!cam) {
+ fimc_err("%s: no active camera\n", __func__);
+ return -ENODEV;
+ }
+
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg &= ~(S3C_CIGCTRL_TESTPATTERN_MASK | S3C_CIGCTRL_SELCAM_ITU_MASK |
+ S3C_CIGCTRL_SELCAM_FIMC_MASK);
+
+ /* Interface selection */
+ if (cam->type == CAM_TYPE_MIPI) {
+ cfg |= S3C_CIGCTRL_SELCAM_FIMC_MIPI;
+ writel(cam->fmt, ctrl->regs + S3C_CSIIMGFMT);
+ } else if (cam->type == CAM_TYPE_ITU) {
+ if (cam->id == CAMERA_PAR_A)
+ cfg |= S3C_CIGCTRL_SELCAM_ITU_A;
+ else
+ cfg |= S3C_CIGCTRL_SELCAM_ITU_B;
+ /* switch to ITU interface */
+ cfg |= S3C_CIGCTRL_SELCAM_FIMC_ITU;
+ } else {
+ fimc_err("%s: invalid camera bus type selected\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+int fimc43_hwset_camera_type(struct fimc_control *ctrl)
+{
+ struct s3c_platform_camera *cam = ctrl->cam;
+ u32 cfg;
+
+ if (!cam) {
+ fimc_err("%s: no active camera\n", __func__);
+ return -ENODEV;
+ }
+
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg &= ~(S3C_CIGCTRL_TESTPATTERN_MASK | S3C_CIGCTRL_SELCAM_ITU_MASK |
+ S3C_CIGCTRL_SELCAM_MIPI_MASK | S3C_CIGCTRL_SELCAM_FIMC_MASK |
+ S3C_CIGCTRL_SELWB_CAMIF_MASK);
+
+ /* Interface selection */
+ if (cam->id == CAMERA_WB) {
+ cfg |= S3C_CIGCTRL_SELWB_CAMIF_WRITEBACK;
+ } else if (cam->type == CAM_TYPE_MIPI) {
+ cfg |= S3C_CIGCTRL_SELCAM_FIMC_MIPI;
+
+ /* C110/V210 Support only MIPI A support */
+ cfg |= S3C_CIGCTRL_SELCAM_MIPI_A;
+
+ /* FIXME: Temporary MIPI CSIS Data 32 bit aligned */
+ if (ctrl->cap->fmt.pixelformat == V4L2_PIX_FMT_JPEG ||
+ ctrl->cap->fmt.pixelformat == V4L2_PIX_FMT_INTERLEAVED)
+ writel((MIPI_USER_DEF_PACKET_1 | (0x1 << 8)),
+ ctrl->regs + S3C_CSIIMGFMT);
+ else
+ writel(cam->fmt | (0x1 << 8), ctrl->regs + S3C_CSIIMGFMT);
+ } else if (cam->type == CAM_TYPE_ITU) {
+ if (cam->id == CAMERA_PAR_A)
+ cfg |= S3C_CIGCTRL_SELCAM_ITU_A;
+ else
+ cfg |= S3C_CIGCTRL_SELCAM_ITU_B;
+ /* switch to ITU interface */
+ cfg |= S3C_CIGCTRL_SELCAM_FIMC_ITU;
+ } else {
+ fimc_err("%s: invalid camera bus type selected\n", __func__);
+ return -EINVAL;
+ }
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+int fimc51_hwset_camera_type(struct fimc_control *ctrl)
+{
+ struct s3c_platform_camera *cam = ctrl->cam;
+ u32 cfg;
+
+ if (!cam) {
+ fimc_err("%s: no active camera\n", __func__);
+ return -ENODEV;
+ }
+
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg &= ~(S3C_CIGCTRL_TESTPATTERN_MASK | S3C_CIGCTRL_SELCAM_ITU_MASK |
+ S3C_CIGCTRL_SELCAM_MIPI_MASK | S3C_CIGCTRL_SELCAM_FIMC_MASK |
+ S3C_CIGCTRL_SELWB_CAMIF_MASK | S3C_CIGCTRL_SELWRITEBACK_MASK);
+
+ /* Interface selection */
+ if (cam->id == CAMERA_WB) {
+ cfg |= S3C_CIGCTRL_SELWB_CAMIF_WRITEBACK;
+ cfg |= S3C_CIGCTRL_SELWRITEBACK_A;
+ } else if (cam->id == CAMERA_WB_B || cam->use_isp) {
+ cfg |= S3C_CIGCTRL_SELWB_CAMIF_WRITEBACK;
+ cfg |= S3C_CIGCTRL_SELWRITEBACK_B;
+ } else if (cam->type == CAM_TYPE_MIPI) {
+ cfg |= S3C_CIGCTRL_SELCAM_FIMC_MIPI;
+
+ /* V310 Support MIPI A/B support */
+ if (cam->id == CAMERA_CSI_C)
+ cfg |= S3C_CIGCTRL_SELCAM_MIPI_A;
+ else
+ cfg |= S3C_CIGCTRL_SELCAM_MIPI_B;
+
+ /* FIXME: Temporary MIPI CSIS Data 32 bit aligned */
+ if (ctrl->cap->fmt.pixelformat == V4L2_PIX_FMT_JPEG ||
+ ctrl->cap->fmt.pixelformat == V4L2_PIX_FMT_INTERLEAVED)
+ writel((MIPI_USER_DEF_PACKET_1 | (0x1 << 8)),
+ ctrl->regs + S3C_CSIIMGFMT);
+ else
+ writel(cam->fmt | (0x1 << 8), ctrl->regs + S3C_CSIIMGFMT);
+ } else if (cam->type == CAM_TYPE_ITU) {
+ if (cam->id == CAMERA_PAR_A)
+ cfg |= S3C_CIGCTRL_SELCAM_ITU_A;
+ else
+ cfg |= S3C_CIGCTRL_SELCAM_ITU_B;
+ /* switch to ITU interface */
+ cfg |= S3C_CIGCTRL_SELCAM_FIMC_ITU;
+ } else {
+ fimc_err("%s: invalid camera bus type selected\n", __func__);
+ return -EINVAL;
+ }
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+int fimc_hwset_camera_type(struct fimc_control *ctrl)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ switch (pdata->hw_ver) {
+ case 0x40:
+ fimc40_hwset_camera_type(ctrl);
+ break;
+ case 0x43:
+ case 0x45:
+ fimc43_hwset_camera_type(ctrl);
+ break;
+ case 0x51:
+ fimc51_hwset_camera_type(ctrl);
+ break;
+ default:
+ fimc43_hwset_camera_type(ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+
+int fimc_hwset_jpeg_mode(struct fimc_control *ctrl, bool enable)
+{
+ u32 cfg;
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+
+ if (enable)
+ cfg |= S3C_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~S3C_CIGCTRL_CAM_JPEG;
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_output_size(struct fimc_control *ctrl, int width, int height)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CITRGFMT);
+
+ cfg &= ~(S3C_CITRGFMT_TARGETH_MASK | S3C_CITRGFMT_TARGETV_MASK);
+
+ cfg |= S3C_CITRGFMT_TARGETHSIZE(width);
+ cfg |= S3C_CITRGFMT_TARGETVSIZE(height);
+
+ writel(cfg, ctrl->regs + S3C_CITRGFMT);
+
+ return 0;
+}
+
+int fimc_hwset_output_colorspace(struct fimc_control *ctrl, u32 pixelformat)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ u32 cfg;
+
+ if (pdata->hw_ver != 0x40) {
+ if (pixelformat == V4L2_PIX_FMT_YUV444) {
+ cfg = readl(ctrl->regs + S3C_CIEXTEN);
+ cfg |= S3C_CIEXTEN_YUV444_OUT;
+ writel(cfg, ctrl->regs + S3C_CIEXTEN);
+
+ return 0;
+ } else {
+ cfg = readl(ctrl->regs + S3C_CIEXTEN);
+ cfg &= ~S3C_CIEXTEN_YUV444_OUT;
+ writel(cfg, ctrl->regs + S3C_CIEXTEN);
+ }
+ }
+
+ cfg = readl(ctrl->regs + S3C_CITRGFMT);
+ cfg &= ~S3C_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_INTERLEAVED:
+ break;
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_RGB32:
+ cfg |= S3C_CITRGFMT_OUTFORMAT_RGB;
+ break;
+
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ case V4L2_PIX_FMT_YVYU:
+ cfg |= S3C_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61: /* fall through */
+ case V4L2_PIX_FMT_YUV422P:
+ cfg |= S3C_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV12M: /* fall through */
+ case V4L2_PIX_FMT_NV12T: /* fall through */
+ case V4L2_PIX_FMT_NV21:
+ cfg |= S3C_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+
+ default:
+ fimc_err("%s: invalid pixel format : %d\n",
+ __func__, pixelformat);
+ break;
+ }
+
+ writel(cfg, ctrl->regs + S3C_CITRGFMT);
+
+ return 0;
+}
+
+int fimc_hwset_output_rot_flip(struct fimc_control *ctrl, u32 rot, u32 flip)
+{
+ u32 cfg, val;
+
+ cfg = readl(ctrl->regs + S3C_CITRGFMT);
+ cfg &= ~S3C_CITRGFMT_FLIP_MASK;
+ cfg &= ~S3C_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ val = fimc_mapping_rot_flip(rot, flip);
+
+ if (val & FIMC_ROT)
+ cfg |= S3C_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ if (val & FIMC_XFLIP)
+ cfg |= S3C_CITRGFMT_FLIP_X_MIRROR;
+
+ if (val & FIMC_YFLIP)
+ cfg |= S3C_CITRGFMT_FLIP_Y_MIRROR;
+
+ writel(cfg, ctrl->regs + S3C_CITRGFMT);
+
+ return 0;
+}
+
+int fimc_hwset_output_area(struct fimc_control *ctrl, u32 width, u32 height)
+{
+ u32 cfg = 0;
+
+ cfg = S3C_CITAREA_TARGET_AREA(width * height);
+ writel(cfg, ctrl->regs + S3C_CITAREA);
+
+ return 0;
+}
+
+int fimc_hwset_enable_lastirq(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIOCTRL);
+
+ cfg |= S3C_CIOCTRL_LASTIRQ_ENABLE;
+ writel(cfg, ctrl->regs + S3C_CIOCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_disable_lastirq(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIOCTRL);
+
+ cfg &= ~S3C_CIOCTRL_LASTIRQ_ENABLE;
+ writel(cfg, ctrl->regs + S3C_CIOCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_enable_lastend(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIOCTRL);
+
+ cfg |= S3C_CIOCTRL_LASTENDEN;
+ writel(cfg, ctrl->regs + S3C_CIOCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_disable_lastend(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIOCTRL);
+
+ cfg &= ~S3C_CIOCTRL_LASTENDEN;
+ writel(cfg, ctrl->regs + S3C_CIOCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_prescaler(struct fimc_control *ctrl, struct fimc_scaler *sc)
+{
+ u32 cfg = 0, shfactor;
+
+ shfactor = 10 - (sc->hfactor + sc->vfactor);
+
+ cfg |= S3C_CISCPRERATIO_SHFACTOR(shfactor);
+ cfg |= S3C_CISCPRERATIO_PREHORRATIO(sc->pre_hratio);
+ cfg |= S3C_CISCPRERATIO_PREVERRATIO(sc->pre_vratio);
+
+ writel(cfg, ctrl->regs + S3C_CISCPRERATIO);
+
+ cfg = 0;
+ cfg |= S3C_CISCPREDST_PREDSTWIDTH(sc->pre_dst_width);
+ cfg |= S3C_CISCPREDST_PREDSTHEIGHT(sc->pre_dst_height);
+
+ writel(cfg, ctrl->regs + S3C_CISCPREDST);
+
+ return 0;
+}
+
+int fimc_hwset_output_address(struct fimc_control *ctrl,
+ struct fimc_buf_set *bs, int id)
+{
+ writel(bs->base[FIMC_ADDR_Y], ctrl->regs + S3C_CIOYSA(id));
+
+ if (ctrl->cap && ctrl->cap->fmt.pixelformat == V4L2_PIX_FMT_YVU420) {
+ writel(bs->base[FIMC_ADDR_CR], ctrl->regs + S3C_CIOCBSA(id));
+ writel(bs->base[FIMC_ADDR_CB], ctrl->regs + S3C_CIOCRSA(id));
+ } else {
+ writel(bs->base[FIMC_ADDR_CB], ctrl->regs + S3C_CIOCBSA(id));
+ writel(bs->base[FIMC_ADDR_CR], ctrl->regs + S3C_CIOCRSA(id));
+ }
+
+ return 0;
+}
+
+int fimc_hwset_output_yuv(struct fimc_control *ctrl, u32 pixelformat)
+{
+ u32 cfg;
+
+ cfg = readl(ctrl->regs + S3C_CIOCTRL);
+ cfg &= ~(S3C_CIOCTRL_ORDER2P_MASK | S3C_CIOCTRL_ORDER422_MASK |
+ S3C_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (pixelformat) {
+ /* 1 plane formats */
+ case V4L2_PIX_FMT_YUYV:
+ cfg |= S3C_CIOCTRL_ORDER422_YCBYCR;
+ break;
+
+ case V4L2_PIX_FMT_UYVY:
+ cfg |= S3C_CIOCTRL_ORDER422_CBYCRY;
+ break;
+
+ case V4L2_PIX_FMT_VYUY:
+ cfg |= S3C_CIOCTRL_ORDER422_CRYCBY;
+ break;
+
+ case V4L2_PIX_FMT_YVYU:
+ cfg |= S3C_CIOCTRL_ORDER422_YCRYCB;
+ break;
+
+ /* 2 plane formats */
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV12M: /* fall through */
+ case V4L2_PIX_FMT_NV12T: /* fall through */
+ case V4L2_PIX_FMT_NV16:
+ cfg |= S3C_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= S3C_CIOCTRL_YCBCR_2PLANE;
+ break;
+
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV61:
+ cfg |= S3C_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= S3C_CIOCTRL_YCBCR_2PLANE;
+ break;
+
+ /* 3 plane formats */
+ case V4L2_PIX_FMT_YUV422P: /* fall through */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ cfg |= S3C_CIOCTRL_YCBCR_3PLANE;
+ break;
+
+ /* Set alpha value to 0xff */
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_RGB32:
+ cfg |= (0xff << 4);
+ break;
+ }
+
+ writel(cfg, ctrl->regs + S3C_CIOCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_output_scan(struct fimc_control *ctrl,
+ struct v4l2_pix_format *fmt)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ u32 cfg;
+
+ /* nothing to do: FIMC40 not supported interlaced and weave output */
+ if (pdata->hw_ver == 0x40)
+ return 0;
+
+ cfg = readl(ctrl->regs + S3C_CISCCTRL);
+ cfg &= ~S3C_CISCCTRL_SCAN_MASK;
+
+ if (fmt->field == V4L2_FIELD_INTERLACED ||
+ fmt->field == V4L2_FIELD_INTERLACED_TB)
+ cfg |= S3C_CISCCTRL_INTERLACE;
+ else
+ cfg |= S3C_CISCCTRL_PROGRESSIVE;
+
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ cfg = readl(ctrl->regs + S3C_CIOCTRL);
+ cfg &= ~S3C_CIOCTRL_WEAVE_MASK;
+
+ if ((ctrl->cap) && (fmt->field == V4L2_FIELD_INTERLACED_TB))
+ cfg |= S3C_CIOCTRL_WEAVE_OUT;
+
+ writel(cfg, ctrl->regs + S3C_CIOCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_input_rot(struct fimc_control *ctrl, u32 rot, u32 flip)
+{
+ u32 cfg, val;
+
+ cfg = readl(ctrl->regs + S3C_CITRGFMT);
+ cfg &= ~S3C_CITRGFMT_INROT90_CLOCKWISE;
+
+ val = fimc_mapping_rot_flip(rot, flip);
+
+ if (val & FIMC_ROT)
+ cfg |= S3C_CITRGFMT_INROT90_CLOCKWISE;
+
+ writel(cfg, ctrl->regs + S3C_CITRGFMT);
+
+ return 0;
+}
+
+int fimc40_hwset_scaler(struct fimc_control *ctrl, struct fimc_scaler *sc)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+
+ cfg &= ~(S3C_CISCCTRL_SCALERBYPASS |
+ S3C_CISCCTRL_SCALEUP_H | S3C_CISCCTRL_SCALEUP_V |
+ S3C_CISCCTRL_MAIN_V_RATIO_MASK |
+ S3C_CISCCTRL_MAIN_H_RATIO_MASK |
+ S3C_CISCCTRL_CSCR2Y_WIDE |
+ S3C_CISCCTRL_CSCY2R_WIDE);
+
+ if (ctrl->range == FIMC_RANGE_WIDE)
+ cfg |= (S3C_CISCCTRL_CSCR2Y_WIDE | S3C_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->bypass)
+ cfg |= S3C_CISCCTRL_SCALERBYPASS;
+
+ if (sc->scaleup_h)
+ cfg |= S3C_CISCCTRL_SCALEUP_H;
+
+ if (sc->scaleup_v)
+ cfg |= S3C_CISCCTRL_SCALEUP_V;
+
+ cfg |= S3C_CISCCTRL_MAINHORRATIO(sc->main_hratio);
+ cfg |= S3C_CISCCTRL_MAINVERRATIO(sc->main_vratio);
+
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc43_hwset_scaler(struct fimc_control *ctrl, struct fimc_scaler *sc)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+ u32 cfg_ext = readl(ctrl->regs + S3C_CIEXTEN);
+
+ cfg &= ~(S3C_CISCCTRL_SCALERBYPASS |
+ S3C_CISCCTRL_SCALEUP_H | S3C_CISCCTRL_SCALEUP_V |
+ S3C_CISCCTRL_MAIN_V_RATIO_MASK |
+ S3C_CISCCTRL_MAIN_H_RATIO_MASK |
+ S3C_CISCCTRL_CSCR2Y_WIDE |
+ S3C_CISCCTRL_CSCY2R_WIDE);
+
+ if (ctrl->range == FIMC_RANGE_WIDE)
+ cfg |= (S3C_CISCCTRL_CSCR2Y_WIDE | S3C_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->bypass)
+ cfg |= S3C_CISCCTRL_SCALERBYPASS;
+
+ if (sc->scaleup_h)
+ cfg |= S3C_CISCCTRL_SCALEUP_H;
+
+ if (sc->scaleup_v)
+ cfg |= S3C_CISCCTRL_SCALEUP_V;
+
+ cfg |= S3C_CISCCTRL_MAINHORRATIO(sc->main_hratio);
+ cfg |= S3C_CISCCTRL_MAINVERRATIO(sc->main_vratio);
+
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ cfg_ext &= ~S3C_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~S3C_CIEXTEN_MAINVERRATIO_EXT_MASK;
+
+ cfg_ext |= S3C_CIEXTEN_MAINHORRATIO_EXT(sc->main_hratio);
+ cfg_ext |= S3C_CIEXTEN_MAINVERRATIO_EXT(sc->main_vratio);
+
+ writel(cfg_ext, ctrl->regs + S3C_CIEXTEN);
+
+ return 0;
+}
+
+int fimc50_hwset_scaler(struct fimc_control *ctrl, struct fimc_scaler *sc)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+ u32 cfg_ext = readl(ctrl->regs + S3C_CIEXTEN);
+
+ cfg &= ~(S3C_CISCCTRL_SCALERBYPASS |
+ S3C_CISCCTRL_SCALEUP_H | S3C_CISCCTRL_SCALEUP_V |
+ S3C_CISCCTRL_MAIN_V_RATIO_MASK |
+ S3C_CISCCTRL_MAIN_H_RATIO_MASK |
+ S3C_CISCCTRL_CSCR2Y_WIDE |
+ S3C_CISCCTRL_CSCY2R_WIDE);
+
+ if (ctrl->range == FIMC_RANGE_WIDE)
+ cfg |= (S3C_CISCCTRL_CSCR2Y_WIDE | S3C_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->bypass)
+ cfg |= S3C_CISCCTRL_SCALERBYPASS;
+
+ if (sc->scaleup_h)
+ cfg |= S3C_CISCCTRL_SCALEUP_H;
+
+ if (sc->scaleup_v)
+ cfg |= S3C_CISCCTRL_SCALEUP_V;
+
+ cfg |= S3C_CISCCTRL_MAINHORRATIO((sc->main_hratio >> 6));
+ cfg |= S3C_CISCCTRL_MAINVERRATIO((sc->main_vratio >> 6));
+
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ cfg_ext &= ~S3C_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~S3C_CIEXTEN_MAINVERRATIO_EXT_MASK;
+
+ cfg_ext |= S3C_CIEXTEN_MAINHORRATIO_EXT(sc->main_hratio);
+ cfg_ext |= S3C_CIEXTEN_MAINVERRATIO_EXT(sc->main_vratio);
+
+ writel(cfg_ext, ctrl->regs + S3C_CIEXTEN);
+
+ return 0;
+}
+
+int fimc_hwset_scaler(struct fimc_control *ctrl, struct fimc_scaler *sc)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ switch (pdata->hw_ver) {
+ case 0x40:
+ fimc40_hwset_scaler(ctrl, sc);
+ break;
+ case 0x43:
+ case 0x45:
+ fimc43_hwset_scaler(ctrl, sc);
+ break;
+ case 0x50:
+ case 0x51:
+ fimc50_hwset_scaler(ctrl, sc);
+ break;
+ default:
+ fimc43_hwset_scaler(ctrl, sc);
+ break;
+ }
+
+ return 0;
+}
+
+
+int fimc_hwset_scaler_bypass(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+
+ cfg |= S3C_CISCCTRL_SCALERBYPASS;
+
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_enable_lcdfifo(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+
+ cfg |= S3C_CISCCTRL_LCDPATHEN_FIFO;
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_disable_lcdfifo(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+
+ cfg &= ~S3C_CISCCTRL_LCDPATHEN_FIFO;
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc_hwget_frame_count(struct fimc_control *ctrl)
+{
+ return S3C_CISTATUS_GET_FRAME_COUNT(readl(ctrl->regs + S3C_CISTATUS));
+}
+
+int fimc_hwget_frame_end(struct fimc_control *ctrl)
+{
+ unsigned long timeo = jiffies;
+ u32 cfg;
+
+ timeo += 20; /* waiting for 100ms */
+
+ cfg = readl(ctrl->regs + S3C_CISTATUS);
+ cfg &= ~S3C_CISTATUS_FRAMEEND;
+ writel(cfg, ctrl->regs + S3C_CISTATUS);
+ while (time_before(jiffies, timeo)) {
+ cfg = readl(ctrl->regs + S3C_CISTATUS);
+ if (S3C_CISTATUS_GET_FRAME_END(cfg)) {
+ cfg &= ~S3C_CISTATUS_FRAMEEND;
+ writel(cfg, ctrl->regs + S3C_CISTATUS);
+ break;
+ }
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int fimc_hwget_last_frame_end(struct fimc_control *ctrl)
+{
+ unsigned long timeo = jiffies;
+ u32 cfg;
+
+ timeo += 20; /* waiting for 100ms */
+ while (time_before(jiffies, timeo)) {
+ cfg = readl(ctrl->regs + S3C_CISTATUS);
+
+ if (S3C_CISTATUS_GET_LAST_CAPTURE_END(cfg)) {
+ cfg &= ~S3C_CISTATUS_LASTCAPTUREEND;
+ writel(cfg, ctrl->regs + S3C_CISTATUS);
+ break;
+ }
+ cond_resched();
+ }
+
+ return 0;
+}
+
+int fimc_hwset_start_scaler(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+
+ cfg |= S3C_CISCCTRL_SCALERSTART;
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_stop_scaler(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+
+ cfg &= ~S3C_CISCCTRL_SCALERSTART;
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_input_rgb(struct fimc_control *ctrl, u32 pixelformat)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+ cfg &= ~S3C_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ if (pixelformat == V4L2_PIX_FMT_RGB32)
+ cfg |= S3C_CISCCTRL_INRGB_FMT_RGB888;
+ else if (pixelformat == V4L2_PIX_FMT_RGB565)
+ cfg |= S3C_CISCCTRL_INRGB_FMT_RGB565;
+
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_intput_field(struct fimc_control *ctrl, enum v4l2_field field)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ u32 cfg;
+
+ if (pdata->hw_ver == 0x40)
+ return 0;
+
+ cfg = readl(ctrl->regs + S3C_MSCTRL);
+ cfg &= ~S3C_MSCTRL_FIELD_MASK;
+
+ if (field == V4L2_FIELD_NONE)
+ cfg |= S3C_MSCTRL_FIELD_NORMAL;
+ else if (field == V4L2_FIELD_INTERLACED_TB)
+ cfg |= S3C_MSCTRL_FIELD_WEAVE;
+
+ writel(cfg, ctrl->regs + S3C_MSCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_output_rgb(struct fimc_control *ctrl, u32 pixelformat)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+ cfg &= ~S3C_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ if (pixelformat == V4L2_PIX_FMT_RGB32)
+ cfg |= S3C_CISCCTRL_OUTRGB_FMT_RGB888;
+ else if (pixelformat == V4L2_PIX_FMT_RGB565)
+ cfg |= S3C_CISCCTRL_OUTRGB_FMT_RGB565;
+
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_ext_rgb(struct fimc_control *ctrl, int enable)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CISCCTRL);
+ cfg &= ~S3C_CISCCTRL_EXTRGB_EXTENSION;
+
+ if (enable)
+ cfg |= S3C_CISCCTRL_EXTRGB_EXTENSION;
+
+ writel(cfg, ctrl->regs + S3C_CISCCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_enable_capture(struct fimc_control *ctrl, u32 bypass)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIIMGCPT);
+ cfg &= ~S3C_CIIMGCPT_IMGCPTEN_SC;
+ cfg |= S3C_CIIMGCPT_IMGCPTEN;
+
+ if (!bypass)
+ cfg |= S3C_CIIMGCPT_IMGCPTEN_SC;
+
+ writel(cfg, ctrl->regs + S3C_CIIMGCPT);
+
+ return 0;
+}
+
+int fimc_hwset_disable_capture(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIIMGCPT);
+
+ cfg &= ~(S3C_CIIMGCPT_IMGCPTEN_SC | S3C_CIIMGCPT_IMGCPTEN);
+
+ writel(cfg, ctrl->regs + S3C_CIIMGCPT);
+
+ return 0;
+}
+
+void fimc_wait_disable_capture(struct fimc_control *ctrl)
+{
+ unsigned long timeo = jiffies + 40; /* timeout of 200 ms */
+ u32 cfg;
+ if (!ctrl || !ctrl->cap)
+ return;
+ while (time_before(jiffies, timeo)) {
+ cfg = readl(ctrl->regs + S3C_CISTATUS);
+
+ if (0 == (cfg & S3C_CISTATUS_IMGCPTEN) \
+ && 0 == (cfg & S3C_CISTATUS_IMGCPTENSC) \
+ && 0 == (cfg & S3C_CISTATUS_SCALERSTART))
+ break;
+ msleep(5);
+ }
+ fimc_info2("IMGCPTEN: Wait time = %d ms\n" \
+ , jiffies_to_msecs(jiffies - timeo + 20));
+ return;
+}
+
+int fimc_hwset_input_address(struct fimc_control *ctrl, dma_addr_t *base)
+{
+ writel(base[FIMC_ADDR_Y], ctrl->regs + S3C_CIIYSA0);
+ writel(base[FIMC_ADDR_CB], ctrl->regs + S3C_CIICBSA0);
+ writel(base[FIMC_ADDR_CR], ctrl->regs + S3C_CIICRSA0);
+
+ return 0;
+}
+
+int fimc_hwset_enable_autoload(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIREAL_ISIZE);
+
+ cfg |= S3C_CIREAL_ISIZE_AUTOLOAD_ENABLE;
+
+ writel(cfg, ctrl->regs + S3C_CIREAL_ISIZE);
+
+ return 0;
+}
+
+int fimc_hwset_disable_autoload(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIREAL_ISIZE);
+
+ cfg &= ~S3C_CIREAL_ISIZE_AUTOLOAD_ENABLE;
+
+ writel(cfg, ctrl->regs + S3C_CIREAL_ISIZE);
+
+ return 0;
+}
+
+int fimc_hwset_real_input_size(struct fimc_control *ctrl, u32 width, u32 height)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIREAL_ISIZE);
+ cfg &= ~(S3C_CIREAL_ISIZE_HEIGHT_MASK | S3C_CIREAL_ISIZE_WIDTH_MASK);
+
+ cfg |= S3C_CIREAL_ISIZE_WIDTH(width);
+ cfg |= S3C_CIREAL_ISIZE_HEIGHT(height);
+
+ writel(cfg, ctrl->regs + S3C_CIREAL_ISIZE);
+
+ return 0;
+}
+
+int fimc_hwset_addr_change_enable(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIREAL_ISIZE);
+
+ cfg &= ~S3C_CIREAL_ISIZE_ADDR_CH_DISABLE;
+
+ writel(cfg, ctrl->regs + S3C_CIREAL_ISIZE);
+
+ return 0;
+}
+
+int fimc_hwset_addr_change_disable(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIREAL_ISIZE);
+
+ cfg |= S3C_CIREAL_ISIZE_ADDR_CH_DISABLE;
+
+ writel(cfg, ctrl->regs + S3C_CIREAL_ISIZE);
+
+ return 0;
+}
+
+int fimc_hwset_input_burst_cnt(struct fimc_control *ctrl, u32 cnt)
+{
+ u32 cfg = readl(ctrl->regs + S3C_MSCTRL);
+ cfg &= ~S3C_MSCTRL_BURST_CNT_MASK;
+
+ if (cnt > 4)
+ cnt = 4;
+ else if (cnt == 0)
+ cnt = 4;
+
+ cfg |= S3C_MSCTRL_SUCCESSIVE_COUNT(cnt);
+ writel(cfg, ctrl->regs + S3C_MSCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_input_colorspace(struct fimc_control *ctrl, u32 pixelformat)
+{
+ u32 cfg = readl(ctrl->regs + S3C_MSCTRL);
+ cfg &= ~S3C_MSCTRL_INFORMAT_RGB;
+
+ /* Color format setting */
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420: /* fall through */
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ cfg |= S3C_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_VYUY:
+ cfg |= S3C_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61:
+ cfg |= S3C_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_RGB32:
+ cfg |= S3C_MSCTRL_INFORMAT_RGB;
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n",
+ __func__, pixelformat);
+ return -EINVAL;
+ }
+
+ writel(cfg, ctrl->regs + S3C_MSCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_input_yuv(struct fimc_control *ctrl, u32 pixelformat)
+{
+ u32 cfg = readl(ctrl->regs + S3C_MSCTRL);
+ cfg &= ~(S3C_MSCTRL_ORDER2P_SHIFT_MASK | S3C_MSCTRL_C_INT_IN_2PLANE |
+ S3C_MSCTRL_ORDER422_YCBYCR);
+
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUV420: /* fall through */
+ case V4L2_PIX_FMT_YVU420:
+ cfg |= S3C_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ cfg |= S3C_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ cfg |= S3C_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case V4L2_PIX_FMT_YVYU:
+ cfg |= S3C_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case V4L2_PIX_FMT_VYUY:
+ cfg |= S3C_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ case V4L2_PIX_FMT_NV16:
+ cfg |= S3C_MSCTRL_ORDER2P_LSB_CBCR;
+ cfg |= S3C_MSCTRL_C_INT_IN_2PLANE;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV61:
+ cfg |= S3C_MSCTRL_ORDER2P_LSB_CRCB;
+ cfg |= S3C_MSCTRL_C_INT_IN_2PLANE;
+ break;
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_RGB32:
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n",
+ __func__, pixelformat);
+ }
+
+ writel(cfg, ctrl->regs + S3C_MSCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_input_flip(struct fimc_control *ctrl, u32 rot, u32 flip)
+{
+ u32 cfg, val;
+
+ cfg = readl(ctrl->regs + S3C_MSCTRL);
+ cfg &= ~(S3C_MSCTRL_FLIP_X_MIRROR | S3C_MSCTRL_FLIP_Y_MIRROR);
+ val = fimc_mapping_rot_flip(rot, flip);
+
+ if (val & FIMC_XFLIP)
+ cfg |= S3C_MSCTRL_FLIP_X_MIRROR;
+
+ if (val & FIMC_YFLIP)
+ cfg |= S3C_MSCTRL_FLIP_Y_MIRROR;
+
+ writel(cfg, ctrl->regs + S3C_MSCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_input_source(struct fimc_control *ctrl, enum fimc_input path)
+{
+ u32 cfg = readl(ctrl->regs + S3C_MSCTRL);
+ cfg &= ~S3C_MSCTRL_INPUT_MASK;
+
+ if (path == FIMC_SRC_MSDMA)
+ cfg |= S3C_MSCTRL_INPUT_MEMORY;
+ else if (path == FIMC_SRC_CAM)
+ cfg |= S3C_MSCTRL_INPUT_EXTCAM;
+
+ writel(cfg, ctrl->regs + S3C_MSCTRL);
+
+ return 0;
+
+}
+
+int fimc_hwset_start_input_dma(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_MSCTRL);
+ cfg |= S3C_MSCTRL_ENVID;
+
+ writel(cfg, ctrl->regs + S3C_MSCTRL);
+
+ return 0;
+}
+
+int fimc_hwset_stop_input_dma(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_MSCTRL);
+ cfg &= ~S3C_MSCTRL_ENVID;
+
+ writel(cfg, ctrl->regs + S3C_MSCTRL);
+
+ return 0;
+}
+
+void fimc_wait_stop_processing(struct fimc_control *ctrl)
+{
+ fimc_hwget_frame_end(ctrl);
+ fimc_hwget_last_frame_end(ctrl);
+}
+
+void fimc_hwset_stop_processing(struct fimc_control *ctrl)
+{
+ fimc_wait_stop_processing(ctrl);
+
+ fimc_hwset_stop_scaler(ctrl);
+ fimc_hwset_disable_capture(ctrl);
+ fimc_hwset_stop_input_dma(ctrl);
+
+ /* We need to wait for sometime after processing is stopped.
+ * This is required for obtaining clean buffer for DMA processing. */
+ fimc_wait_stop_processing(ctrl);
+}
+
+int fimc40_hwset_output_offset(struct fimc_control *ctrl, u32 pixelformat,
+ struct v4l2_rect *bounds,
+ struct v4l2_rect *crop)
+{
+ u32 cfg_y = 0, cfg_cb = 0, cfg_cr = 0;
+
+ if (!crop->left && !crop->top && (bounds->width == crop->width) &&
+ (bounds->height == crop->height))
+ return -EINVAL;
+
+ fimc_dbg("%s: left: %d, top: %d, width: %d, height: %d\n",
+ __func__, crop->left, crop->top, crop->width, crop->height);
+
+ switch (pixelformat) {
+ /* 1 plane, 32 bits per pixel */
+ case V4L2_PIX_FMT_RGB32:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left * 4);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ break;
+
+ /* 1 plane, 16 bits per pixel */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_RGB565:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left * 2);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ break;
+
+ /* 2 planes, 16 bits per pixel */
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIOCBOFF_HORIZONTAL(crop->left / 2);
+ cfg_cb |= S3C_CIOCBOFF_VERTICAL(crop->top / 2);
+ break;
+
+ /* 2 planes, 12 bits per pixel */
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV12T: /* fall through */
+ case V4L2_PIX_FMT_NV21:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIOCBOFF_HORIZONTAL(crop->left / 4);
+ cfg_cb |= S3C_CIOCBOFF_VERTICAL(crop->top / 4);
+ break;
+
+ /* 3 planes, 16 bits per pixel */
+ case V4L2_PIX_FMT_YUV422P:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIOCBOFF_HORIZONTAL(crop->left / 2);
+ cfg_cb |= S3C_CIOCBOFF_VERTICAL(crop->top / 2);
+ cfg_cr |= S3C_CIOCROFF_HORIZONTAL(crop->left / 2);
+ cfg_cr |= S3C_CIOCROFF_VERTICAL(crop->top / 2);
+ break;
+
+ /* 3 planes, 12 bits per pixel */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIOCBOFF_HORIZONTAL(crop->left / 4);
+ cfg_cb |= S3C_CIOCBOFF_VERTICAL(crop->top / 4);
+ cfg_cr |= S3C_CIOCROFF_HORIZONTAL(crop->left / 4);
+ cfg_cr |= S3C_CIOCROFF_VERTICAL(crop->top / 4);
+ break;
+
+ default:
+ break;
+ }
+
+ writel(cfg_y, ctrl->regs + S3C_CIOYOFF);
+ writel(cfg_cb, ctrl->regs + S3C_CIOCBOFF);
+ writel(cfg_cr, ctrl->regs + S3C_CIOCROFF);
+
+ return 0;
+}
+
+int fimc50_hwset_output_offset(struct fimc_control *ctrl, u32 pixelformat,
+ struct v4l2_rect *bounds,
+ struct v4l2_rect *crop)
+{
+ u32 cfg_y = 0, cfg_cb = 0, cfg_cr = 0;
+
+ fimc_dbg("%s: left: %d, top: %d, width: %d, height: %d\n",
+ __func__, crop->left, crop->top, crop->width, crop->height);
+
+ switch (pixelformat) {
+ /* 1 plane, 32 bits per pixel */
+ case V4L2_PIX_FMT_RGB32:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ break;
+
+ /* 1 plane, 16 bits per pixel */
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_RGB565:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ break;
+
+ /* 2 planes, 16 bits per pixel */
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIOCBOFF_HORIZONTAL(crop->left);
+ cfg_cb |= S3C_CIOCBOFF_VERTICAL(crop->top);
+ break;
+
+ /* 2 planes, 12 bits per pixel */
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV12M: /* fall through */
+ case V4L2_PIX_FMT_NV12T: /* fall through */
+ case V4L2_PIX_FMT_NV21:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIOCBOFF_HORIZONTAL(crop->left);
+ cfg_cb |= S3C_CIOCBOFF_VERTICAL(crop->top);
+ break;
+
+ /* 3 planes, 16 bits per pixel */
+ case V4L2_PIX_FMT_YUV422P:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIOCBOFF_HORIZONTAL(crop->left);
+ cfg_cb |= S3C_CIOCBOFF_VERTICAL(crop->top);
+ cfg_cr |= S3C_CIOCROFF_HORIZONTAL(crop->left);
+ cfg_cr |= S3C_CIOCROFF_VERTICAL(crop->top);
+ break;
+
+ /* 3 planes, 12 bits per pixel */
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ cfg_y |= S3C_CIOYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIOYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIOCBOFF_HORIZONTAL(crop->left);
+ cfg_cb |= S3C_CIOCBOFF_VERTICAL(crop->top);
+ cfg_cr |= S3C_CIOCROFF_HORIZONTAL(crop->left);
+ cfg_cr |= S3C_CIOCROFF_VERTICAL(crop->top);
+ break;
+
+ default:
+ break;
+ }
+
+ writel(cfg_y, ctrl->regs + S3C_CIOYOFF);
+ writel(cfg_cb, ctrl->regs + S3C_CIOCBOFF);
+ writel(cfg_cr, ctrl->regs + S3C_CIOCROFF);
+
+ return 0;
+}
+
+int fimc_hwset_output_offset(struct fimc_control *ctrl, u32 pixelformat,
+ struct v4l2_rect *bounds,
+ struct v4l2_rect *crop)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ if (pdata->hw_ver >= 0x50)
+ fimc50_hwset_output_offset(ctrl, pixelformat, bounds, crop);
+ else
+ fimc40_hwset_output_offset(ctrl, pixelformat, bounds, crop);
+
+ return 0;
+}
+
+int fimc40_hwset_input_offset(struct fimc_control *ctrl, u32 pixelformat,
+ struct v4l2_rect *bounds,
+ struct v4l2_rect *crop)
+{
+ u32 cfg_y = 0, cfg_cb = 0;
+
+ if (crop->left || crop->top ||
+ (bounds->width != crop->width) ||
+ (bounds->height != crop->height)) {
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_RGB565:
+ cfg_y |= S3C_CIIYOFF_HORIZONTAL(crop->left * 2);
+ cfg_y |= S3C_CIIYOFF_VERTICAL(crop->top);
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ cfg_y |= S3C_CIIYOFF_HORIZONTAL(crop->left * 4);
+ cfg_y |= S3C_CIIYOFF_VERTICAL(crop->top);
+ break;
+ case V4L2_PIX_FMT_NV12: /* fall through */
+ case V4L2_PIX_FMT_NV21: /* fall through */
+ case V4L2_PIX_FMT_NV12T:
+ cfg_y |= S3C_CIIYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIIYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIICBOFF_HORIZONTAL(crop->left);
+ cfg_cb |= S3C_CIICBOFF_VERTICAL(crop->top / 2);
+
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n",
+ __func__, pixelformat);
+ }
+ }
+
+ writel(cfg_y, ctrl->regs + S3C_CIIYOFF);
+ writel(cfg_cb, ctrl->regs + S3C_CIICBOFF);
+
+ return 0;
+}
+
+int fimc50_hwset_input_offset(struct fimc_control *ctrl, u32 pixelformat,
+ struct v4l2_rect *bounds,
+ struct v4l2_rect *crop)
+{
+ u32 cfg_y = 0, cfg_cb = 0, cfg_cr = 0;
+
+ if (crop->left || crop->top ||
+ (bounds->width != crop->width) ||
+ (bounds->height != crop->height)) {
+ switch (pixelformat) {
+ case V4L2_PIX_FMT_YUYV: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YVYU: /* fall through */
+ case V4L2_PIX_FMT_VYUY: /* fall through */
+ case V4L2_PIX_FMT_RGB565:
+ cfg_y |= S3C_CIIYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIIYOFF_VERTICAL(crop->top);
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ cfg_y |= S3C_CIIYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIIYOFF_VERTICAL(crop->top);
+ break;
+ case V4L2_PIX_FMT_NV12: /* fall through*/
+ case V4L2_PIX_FMT_NV21: /* fall through*/
+ case V4L2_PIX_FMT_NV12T:
+ cfg_y |= S3C_CIIYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIIYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIICBOFF_HORIZONTAL(crop->left);
+ cfg_cb |= S3C_CIICBOFF_VERTICAL(crop->top);
+ break;
+ case V4L2_PIX_FMT_NV16: /* fall through */
+ case V4L2_PIX_FMT_NV61:
+ cfg_y |= S3C_CIIYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIIYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIICBOFF_HORIZONTAL(crop->left);
+ cfg_cb |= S3C_CIICBOFF_VERTICAL(crop->top);
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ cfg_y |= S3C_CIIYOFF_HORIZONTAL(crop->left);
+ cfg_y |= S3C_CIIYOFF_VERTICAL(crop->top);
+ cfg_cb |= S3C_CIICBOFF_HORIZONTAL(crop->left);
+ cfg_cb |= S3C_CIICBOFF_VERTICAL(crop->top);
+ cfg_cr |= S3C_CIICROFF_HORIZONTAL(crop->left);
+ cfg_cr |= S3C_CIICROFF_VERTICAL(crop->top);
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n",
+ __func__, pixelformat);
+ }
+ }
+
+ writel(cfg_y, ctrl->regs + S3C_CIIYOFF);
+ writel(cfg_cb, ctrl->regs + S3C_CIICBOFF);
+ writel(cfg_cr, ctrl->regs + S3C_CIICROFF);
+
+ return 0;
+}
+
+int fimc_hwset_input_offset(struct fimc_control *ctrl, u32 pixelformat,
+ struct v4l2_rect *bounds,
+ struct v4l2_rect *crop)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+
+ if (pdata->hw_ver >= 0x50)
+ fimc50_hwset_input_offset(ctrl, pixelformat, bounds, crop);
+ else
+ fimc40_hwset_input_offset(ctrl, pixelformat, bounds, crop);
+
+ return 0;
+}
+
+int fimc_hwset_org_input_size(struct fimc_control *ctrl, u32 width, u32 height)
+{
+ u32 cfg = 0;
+
+ cfg |= S3C_ORGISIZE_HORIZONTAL(width);
+ cfg |= S3C_ORGISIZE_VERTICAL(height);
+
+ writel(cfg, ctrl->regs + S3C_ORGISIZE);
+
+ return 0;
+}
+
+int fimc_hwset_org_output_size(struct fimc_control *ctrl, u32 width, u32 height)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ u32 cfg = 0;
+
+ cfg |= S3C_ORGOSIZE_HORIZONTAL(width);
+ cfg |= S3C_ORGOSIZE_VERTICAL(height);
+
+ writel(cfg, ctrl->regs + S3C_ORGOSIZE);
+
+ if (pdata->hw_ver != 0x40) {
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg &= ~S3C_CIGCTRL_CSC_MASK;
+
+ if (width >= FIMC_HD_WIDTH)
+ cfg |= S3C_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= S3C_CIGCTRL_CSC_ITU601;
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+ }
+
+ return 0;
+}
+
+int fimc_hwset_ext_output_size(struct fimc_control *ctrl, u32 width, u32 height)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIEXTEN);
+
+ cfg &= ~S3C_CIEXTEN_TARGETH_EXT_MASK;
+ cfg &= ~S3C_CIEXTEN_TARGETV_EXT_MASK;
+ cfg |= S3C_CIEXTEN_TARGETH_EXT(width);
+ cfg |= S3C_CIEXTEN_TARGETV_EXT(height);
+
+ writel(cfg, ctrl->regs + S3C_CIEXTEN);
+
+ return 0;
+}
+
+int fimc_hwset_input_addr_style(struct fimc_control *ctrl, u32 pixelformat)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIDMAPARAM);
+ cfg &= ~S3C_CIDMAPARAM_R_MODE_MASK;
+
+ if (pixelformat == V4L2_PIX_FMT_NV12T)
+ cfg |= S3C_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= S3C_CIDMAPARAM_R_MODE_LINEAR;
+
+ writel(cfg, ctrl->regs + S3C_CIDMAPARAM);
+
+ return 0;
+}
+
+int fimc_hwset_output_addr_style(struct fimc_control *ctrl, u32 pixelformat)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIDMAPARAM);
+ cfg &= ~S3C_CIDMAPARAM_W_MODE_MASK;
+
+ if (pixelformat == V4L2_PIX_FMT_NV12T)
+ cfg |= S3C_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= S3C_CIDMAPARAM_W_MODE_LINEAR;
+
+ writel(cfg, ctrl->regs + S3C_CIDMAPARAM);
+
+ return 0;
+}
+
+int fimc_hw_wait_winoff(struct fimc_control *ctrl)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ u32 cfg = readl(ctrl->regs + S3C_CISTATUS);
+ u32 status = S3C_CISTATUS_GET_LCD_STATUS(cfg);
+ int i = FIMC_FIFOOFF_CNT;
+
+ if (pdata->hw_ver == 0x40)
+ return 0;
+
+ while (status && i--) {
+ cfg = readl(ctrl->regs + S3C_CISTATUS);
+ status = S3C_CISTATUS_GET_LCD_STATUS(cfg);
+ }
+
+ if (i < 1) {
+ fimc_err("Fail : %s\n", __func__);
+ return -EBUSY;
+ } else
+ return 0;
+}
+
+int fimc_hw_wait_stop_input_dma(struct fimc_control *ctrl)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ u32 cfg = readl(ctrl->regs + S3C_MSCTRL);
+ u32 status = S3C_MSCTRL_GET_INDMA_STATUS(cfg);
+ int i = FIMC_FIFOOFF_CNT, j = FIMC_FIFOOFF_CNT;
+
+ if (pdata->hw_ver == 0x40)
+ return 0;
+
+ while (status && i--) {
+ cfg = readl(ctrl->regs + S3C_MSCTRL);
+ status = S3C_MSCTRL_GET_INDMA_STATUS(cfg);
+ }
+
+ cfg = readl(ctrl->regs + S3C_CISTATUS);
+ status = S3C_CISTATUS_GET_ENVID_STATUS(cfg);
+ while (status && j--) {
+ cfg = readl(ctrl->regs + S3C_CISTATUS);
+ status = S3C_CISTATUS_GET_ENVID_STATUS(cfg);
+ }
+
+ if ((i < 1) || (j < 1)) {
+ fimc_err("Fail : %s\n", __func__);
+ return -EBUSY;
+ } else {
+ return 0;
+ }
+}
+
+int fimc_hwset_input_lineskip(struct fimc_control *ctrl)
+{
+ struct s3c_platform_fimc *pdata = to_fimc_plat(ctrl->dev);
+ u32 cfg = 0;
+
+ if (pdata->hw_ver == 0x40)
+ return 0;
+
+ cfg = S3C_CIILINESKIP(ctrl->sc.skipline);
+
+ writel(cfg, ctrl->regs + S3C_CIILINESKIP_Y);
+ writel(cfg, ctrl->regs + S3C_CIILINESKIP_CB);
+ writel(cfg, ctrl->regs + S3C_CIILINESKIP_CR);
+
+ return 0;
+}
+
+int fimc_hw_reset_camera(struct fimc_control *ctrl)
+{
+ u32 cfg = 0;
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg &= ~S3C_CIGCTRL_CAMRST_A;
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg |= S3C_CIGCTRL_CAMRST_A;
+
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+
+ return 0;
+}
+
+/* Above FIMC v5.1 */
+int fimc_hwset_output_buf_sequence(struct fimc_control *ctrl, u32 shift, u32 enable)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIFCNTSEQ);
+ u32 mask = 0x00000001 << shift;
+
+ cfg &= (~mask);
+ cfg |= (enable << shift);
+ writel(cfg, ctrl->regs + S3C_CIFCNTSEQ);
+ return 0;
+}
+
+int fimc_hwget_output_buf_sequence(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIFCNTSEQ);
+ return cfg;
+}
+/* Above FIMC v5.1 */
+int fimc_hw_reset_output_buf_sequence(struct fimc_control *ctrl)
+{
+ writel(0x0, ctrl->regs + S3C_CIFCNTSEQ);
+ return 0;
+}
+
+void fimc_hwset_output_buf_sequence_all(struct fimc_control *ctrl, u32 framecnt_seq)
+{
+ writel(framecnt_seq, ctrl->regs + S3C_CIFCNTSEQ);
+}
+
+/* Above FIMC v5.1 */
+int fimc_hwget_before_frame_count(struct fimc_control *ctrl)
+{
+ u32 before = readl(ctrl->regs + S3C_CISTATUS2);
+ before &= 0x00001f80; /* [12:7] FrameCnt_before */
+ return before >> 7;
+}
+
+/* Above FIMC v5.1 */
+int fimc_hwget_present_frame_count(struct fimc_control *ctrl)
+{
+ u32 present = readl(ctrl->regs + S3C_CISTATUS2);
+ present &= 0x0000003f; /* [5:0] FrameCnt_present */
+ return present >> 0;
+}
+
+int fimc_hwget_check_framecount_sequence(struct fimc_control *ctrl, u32 frame)
+{
+ u32 framecnt_seq = readl(ctrl->regs + S3C_CIFCNTSEQ);
+ frame -= 1;
+ frame = 0x1 << frame;
+
+ if (framecnt_seq & frame)
+ return FIMC_FRAMECNT_SEQ_ENABLE;
+ else
+ return FIMC_FRAMECNT_SEQ_DISABLE;
+}
+
+int fimc_hwset_sysreg_camblk_fimd0_wb(struct fimc_control *ctrl)
+{
+ u32 camblk_cfg = readl(SYSREG_CAMERA_BLK);
+
+ if (soc_is_exynos4210()) {
+ camblk_cfg &= (~(0x3 << 14));
+ camblk_cfg |= ctrl->id << 14;
+ } else {
+ camblk_cfg &= (~(0x3 << 23));
+ camblk_cfg |= ctrl->id << 23;
+ }
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+
+ return 0;
+}
+
+int fimc_hwset_sysreg_camblk_fimd1_wb(struct fimc_control *ctrl)
+{
+ u32 camblk_cfg = readl(SYSREG_CAMERA_BLK);
+
+ camblk_cfg &= (~(0x3 << 10));
+ camblk_cfg |= ctrl->id << 10;
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+
+ return 0;
+}
+
+int fimc_hwset_sysreg_camblk_isp_wb(struct fimc_control *ctrl)
+{
+ u32 camblk_cfg = readl(SYSREG_CAMERA_BLK);
+ u32 ispblk_cfg = readl(SYSREG_ISP_BLK);
+ camblk_cfg = camblk_cfg & (~(0x7 << 20));
+ if (ctrl->id == 0)
+ camblk_cfg = camblk_cfg | (0x1 << 20);
+ else if (ctrl->id == 1)
+ camblk_cfg = camblk_cfg | (0x2 << 20);
+ else if (ctrl->id == 2)
+ camblk_cfg = camblk_cfg | (0x4 << 20);
+ else if (ctrl->id == 3)
+ camblk_cfg = camblk_cfg | (0x7 << 20); /* FIXME*/
+ else
+ fimc_err("%s: not supported id : %d\n", __func__, ctrl->id);
+
+ camblk_cfg = camblk_cfg & (~(0x1 << 15));
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+ udelay(1000);
+ camblk_cfg = camblk_cfg | (0x1 << 15);
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+
+ ispblk_cfg = ispblk_cfg & (~(0x1 << 7));
+ writel(ispblk_cfg, SYSREG_ISP_BLK);
+ udelay(1000);
+ ispblk_cfg = ispblk_cfg | (0x1 << 7);
+ writel(ispblk_cfg, SYSREG_ISP_BLK);
+
+ return 0;
+}
+
+void fimc_hwset_enable_frame_end_irq(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg |= S3C_CIGCTRL_IRQ_END_DISABLE;
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+}
+
+void fimc_hwset_disable_frame_end_irq(struct fimc_control *ctrl)
+{
+ u32 cfg = readl(ctrl->regs + S3C_CIGCTRL);
+ cfg &= ~S3C_CIGCTRL_IRQ_END_DISABLE;
+ writel(cfg, ctrl->regs + S3C_CIGCTRL);
+}
+
+void fimc_reset_status_reg(struct fimc_control *ctrl)
+{
+ writel(0x0, ctrl->regs + S3C_CISTATUS);
+}
+
+#if defined (CONFIG_ARCH_EXYNOS4)
+void fimc_sfr_dump(struct fimc_control *ctrl)
+{
+ int i;
+ u32 cfg[] = {0x0, 0x4, 0x8, 0x14, 0x18, 0x1C, 0x20, 0x24, 0x28, 0x2c, 0x30,
+ 0x34, 0x38, 0x3c, 0x40, 0x44, 0x48, 0x4c, 0x50, 0x54, 0x58, 0x5c,
+ 0x60, 0x64, 0x68, 0xc0, 0xc4, 0xc8, 0xd0, 0xd4, 0xd8, 0xdc, 0xec,
+ 0xf0, 0xf4, 0xf8, 0xfc, 0x144, 0x148, 0x14c, 0x168, 0x16c, 0x170,
+ 0x174, 0x178, 0x180, 0x184,0x188, 0x18c, 0x194, 0x19c, 0x1a0, 0x1fc,};
+
+ for (i = 0; i < sizeof(cfg) / 4; i++) {
+ printk("idx = 0x%x \tval 0x%08x \n", cfg[i], readl(ctrl->regs + cfg[i]));
+ }
+
+}
+#endif
diff --git a/drivers/media/video/samsung/fimc/fimc_v4l2.c b/drivers/media/video/samsung/fimc/fimc_v4l2.c
new file mode 100644
index 0000000..510e2f1
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/fimc_v4l2.c
@@ -0,0 +1,327 @@
+/* linux/drivers/media/video/samsung/fimc/fimc_v4l2.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * V4L2 interface support file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/videodev2.h>
+#include <linux/videodev2_exynos_media.h>
+#include <linux/videodev2_exynos_camera.h>
+#include <media/v4l2-ioctl.h>
+#include <plat/fimc.h>
+
+#include "fimc.h"
+
+static int fimc_querycap(struct file *filp, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+
+ fimc_info1("%s: called\n", __func__);
+
+ strcpy(cap->driver, "SEC FIMC Driver");
+ strlcpy(cap->card, ctrl->vd->name, sizeof(cap->card));
+ sprintf(cap->bus_info, "FIMC AHB-bus");
+
+ cap->version = 0;
+ cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_STREAMING);
+
+ return 0;
+}
+
+static int fimc_reqbufs(struct file *filp, void *fh,
+ struct v4l2_requestbuffers *b)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_reqbufs_capture(ctrl, b);
+ } else if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_reqbufs_output(fh, b);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_querybuf(struct file *filp, void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_querybuf_capture(ctrl, b);
+ } else if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_querybuf_output(fh, b);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_g_ctrl(struct file *filp, void *fh, struct v4l2_control *c)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (ctrl->cap != NULL) {
+ ret = fimc_g_ctrl_capture(ctrl, c);
+ } else if (ctrl->out != NULL) {
+ ret = fimc_g_ctrl_output(fh, c);
+ } else {
+ fimc_err("%s: Invalid case\n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_s_ctrl(struct file *filp, void *fh, struct v4l2_control *c)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (ctrl->cap != NULL) {
+ ret = fimc_s_ctrl_capture(ctrl, c);
+ } else if (ctrl->out != NULL) {
+ ret = fimc_s_ctrl_output(filp, fh, c);
+ } else {
+ fimc_err("%s: Invalid case\n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_g_ext_ctrls(struct file *filp, void *fh, struct v4l2_ext_controls *c)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (ctrl->cap != NULL) {
+ ret = fimc_g_ext_ctrls_capture(fh, c);
+ } else {
+ fimc_err("%s: Invalid case\n", __func__);
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int fimc_s_ext_ctrls(struct file *filp, void *fh, struct v4l2_ext_controls *c)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (ctrl->cap != NULL) {
+ ret = fimc_s_ext_ctrls_capture(fh, c);
+ } else if (ctrl->out != NULL) {
+ /* How about "ret = fimc_s_ext_ctrls_output(fh, c);"? */
+ } else {
+ fimc_err("%s: Invalid case\n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_cropcap(struct file *filp, void *fh, struct v4l2_cropcap *a)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_cropcap_capture(ctrl, a);
+ } else if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_cropcap_output(fh, a);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_g_crop(struct file *filp, void *fh, struct v4l2_crop *a)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_g_crop_capture(ctrl, a);
+ } else if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_g_crop_output(fh, a);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_s_crop(struct file *filp, void *fh, struct v4l2_crop *a)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (a->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_s_crop_capture(ctrl, a);
+ } else if (a->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_s_crop_output(fh, a);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_streamon(struct file *filp, void *fh, enum v4l2_buf_type i)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (i == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_streamon_capture(ctrl);
+ } else if (i == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_streamon_output(fh);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_streamoff(struct file *filp, void *fh, enum v4l2_buf_type i)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (i == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_streamoff_capture(ctrl);
+ } else if (i == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_streamoff_output(fh);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_qbuf(struct file *filp, void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_qbuf_capture(ctrl, b);
+ } else if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_qbuf_output(fh, b);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_dqbuf(struct file *filp, void *fh, struct v4l2_buffer *b)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int ret = -1;
+
+ if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ ret = fimc_dqbuf_capture(ctrl, b);
+ } else if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_dqbuf_output(fh, b);
+ } else {
+ fimc_err("V4L2_BUF_TYPE_VIDEO_CAPTURE and "
+ "V4L2_BUF_TYPE_VIDEO_OUTPUT are only supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int fimc_log_status(struct file *filp, void *fh)
+{
+ struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
+ int framecnt_seq;
+
+ printk(KERN_INFO "fimc%d ctrl->status is %d\n", ctrl->id, ctrl->status);
+
+#if defined (CONFIG_ARCH_EXYNOS4)
+ framecnt_seq = fimc_hwget_output_buf_sequence(ctrl);
+ printk(KERN_INFO "fimc(%d) framecnt_seq is %d\n", ctrl->id, framecnt_seq);
+ printk(KERN_INFO "fimc(%d) availble_buf is %d\n", ctrl->id, fimc_hwget_number_of_bits(framecnt_seq));
+
+ fimc_sfr_dump(ctrl);
+#endif
+ return 0;
+}
+
+const struct v4l2_ioctl_ops fimc_v4l2_ops = {
+ .vidioc_querycap = fimc_querycap,
+ .vidioc_reqbufs = fimc_reqbufs,
+ .vidioc_querybuf = fimc_querybuf,
+ .vidioc_g_ctrl = fimc_g_ctrl,
+ .vidioc_g_ext_ctrls = fimc_g_ext_ctrls,
+ .vidioc_s_ctrl = fimc_s_ctrl,
+ .vidioc_s_ext_ctrls = fimc_s_ext_ctrls,
+ .vidioc_cropcap = fimc_cropcap,
+ .vidioc_g_crop = fimc_g_crop,
+ .vidioc_s_crop = fimc_s_crop,
+ .vidioc_streamon = fimc_streamon,
+ .vidioc_streamoff = fimc_streamoff,
+ .vidioc_qbuf = fimc_qbuf,
+ .vidioc_dqbuf = fimc_dqbuf,
+ .vidioc_enum_fmt_vid_cap = fimc_enum_fmt_vid_capture,
+ .vidioc_g_fmt_vid_cap = fimc_g_fmt_vid_capture,
+ .vidioc_s_fmt_vid_cap = fimc_s_fmt_vid_capture,
+ .vidioc_s_fmt_type_private = fimc_s_fmt_vid_private,
+ .vidioc_try_fmt_vid_cap = fimc_try_fmt_vid_capture,
+ .vidioc_enum_input = fimc_enum_input,
+ .vidioc_g_input = fimc_g_input,
+ .vidioc_s_input = fimc_s_input,
+ .vidioc_g_parm = fimc_g_parm,
+ .vidioc_s_parm = fimc_s_parm,
+ .vidioc_queryctrl = fimc_queryctrl,
+ .vidioc_querymenu = fimc_querymenu,
+ .vidioc_g_fmt_vid_out = fimc_g_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = fimc_s_fmt_vid_out,
+ .vidioc_try_fmt_vid_out = fimc_try_fmt_vid_out,
+ .vidioc_g_fbuf = fimc_g_fbuf,
+ .vidioc_s_fbuf = fimc_s_fbuf,
+ .vidioc_try_fmt_vid_overlay = fimc_try_fmt_overlay,
+ .vidioc_g_fmt_vid_overlay = fimc_g_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = fimc_s_fmt_vid_overlay,
+ .vidioc_enum_framesizes = fimc_enum_framesizes,
+ .vidioc_enum_frameintervals = fimc_enum_frameintervals,
+ .vidioc_log_status = fimc_log_status,
+};
diff --git a/drivers/media/video/samsung/fimc/ipc.c b/drivers/media/video/samsung/fimc/ipc.c
new file mode 100644
index 0000000..4bcbd5d
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/ipc.c
@@ -0,0 +1,472 @@
+/* linux/drivers/media/video/samsung/fimc/ipc.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Driver file for Samsung IPC driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/memory.h>
+#include <plat/clock.h>
+#include <plat/regs-ipc.h>
+
+#include "fimc-ipc.h"
+#include "ipc_table.h"
+
+struct ipc_control *ipc;
+
+void shadow_update(void)
+{
+ writel(S3C_IPC_SHADOW_UPDATE_ENABLE,
+ ipc->regs + S3C_IPC_SHADOW_UPDATE);
+}
+
+void ipc_enable_postprocessing(u32 onoff)
+{
+ u32 cfg = readl(ipc->regs + S3C_IPC_BYPASS);
+
+ if (!onoff)
+ cfg |= S3C_IPC_PP_BYPASS_DISABLE;
+ else
+ cfg &= S3C_IPC_PP_BYPASS_ENABLE;
+
+ writel(cfg, ipc->regs + S3C_IPC_BYPASS);
+
+ shadow_update();
+}
+
+void ipc_enable(u32 onoff)
+{
+ u32 cfg = readl(ipc->regs + S3C_IPC_ENABLE);
+
+ if (!onoff)
+ cfg &= S3C_IPC_OFF;
+ else
+ cfg |= S3C_IPC_ON;
+
+ writel(cfg, ipc->regs + S3C_IPC_ENABLE);
+}
+
+void ipc_reset(void)
+{
+ u32 cfg;
+
+ do {
+ cfg = readl(ipc->regs + S3C_IPC_SRESET);
+ } while ((cfg & S3C_IPC_SRESET_MASK));
+
+ writel(S3C_IPC_SRESET_ENABLE, ipc->regs + S3C_IPC_SRESET);
+}
+
+void ipc_start(void)
+{
+ ipc_enable_postprocessing(ON);
+ ipc_enable(ON);
+}
+
+void ipc_stop(void)
+{
+ ipc_enable_postprocessing(OFF);
+ ipc_enable(OFF);
+ ipc_reset();
+
+#if 1
+ /* Jonghun Han
+ * After clk_disalbe, we cannot set register although clk is enable.
+ * Must be tested by System Application part.
+ */
+ clk_disable(ipc->clk);
+#endif
+}
+
+void ipc_field_id_control(enum ipc_field_id id)
+{
+ writel(id, ipc->regs + S3C_IPC_FIELD_ID);
+ shadow_update();
+}
+
+void ipc_field_id_mode(enum ipc_field_id_sel sel,
+ enum ipc_field_id_togl toggle)
+{
+ u32 cfg;
+
+ cfg = readl(ipc->regs + S3C_IPC_MODE);
+ cfg |= S3C_IPC_FIELD_ID_SELECTION(sel);
+ writel(cfg, ipc->regs + S3C_IPC_MODE);
+
+ cfg = readl(ipc->regs + S3C_IPC_MODE);
+ cfg |= S3C_IPC_FIELD_ID_AUTO_TOGGLING(toggle);
+ writel(cfg, ipc->regs + S3C_IPC_MODE);
+
+ shadow_update();
+}
+
+void ipc_2d_enable(enum ipc_enoff onoff)
+{
+ u32 cfg;
+
+ cfg = readl(ipc->regs + S3C_IPC_MODE);
+ cfg &= ~S3C_IPC_2D_MASK;
+ cfg |= S3C_IPC_2D_CTRL(onoff);
+ writel(cfg, ipc->regs + S3C_IPC_MODE);
+
+ shadow_update();
+}
+
+void ipc_set_mode(struct ipc_controlvariable con_var)
+{
+ u32 cfg = 0;
+
+ /* Enalbed : 2D IPC , Disabled : Horizon Double Scailing */
+ ipc_field_id_control(IPC_BOTTOM_FIELD);
+ ipc_field_id_mode(CAM_FIELD_SIG, AUTO);
+ ipc_2d_enable(con_var.modeval);
+
+ if (con_var.modeval == IPC_2D)
+ cfg = IPC_2D_ENABLE;
+ else
+ cfg = IPC_HOR_SCALING_ENABLE;
+ writel(cfg, ipc->regs + S3C_IPC_H_RATIO);
+
+ cfg = IPC_2D_ENABLE;
+ writel(cfg, ipc->regs + S3C_IPC_V_RATIO);
+
+ shadow_update();
+}
+
+void ipc_set_imgsize(struct ipc_source src, struct ipc_destination dst)
+{
+ writel(S3C_IPC_SRC_WIDTH_SET(src.srchsz),
+ ipc->regs + S3C_IPC_SRC_WIDTH);
+ writel(S3C_IPC_SRC_HEIGHT_SET(src.srcvsz),
+ ipc->regs + S3C_IPC_SRC_HEIGHT);
+
+ writel(S3C_IPC_DST_WIDTH_SET(dst.dsthsz),
+ ipc->regs + S3C_IPC_DST_WIDTH);
+ writel(S3C_IPC_DST_HEIGHT_SET(dst.dstvsz),
+ ipc->regs + S3C_IPC_DST_HEIGHT);
+
+ shadow_update();
+}
+
+void ipc_set_enhance_param(void)
+{
+ u32 i;
+
+ for (i = 0; i < 8; i++) {
+ ipc->enhance_var.brightness[i] = 0x0;
+ ipc->enhance_var.contrast[i] = 0x80;
+ }
+
+ ipc->enhance_var.saturation = 0x80;
+ ipc->enhance_var.sharpness = NO_EFFECT;
+ ipc->enhance_var.thhnoise = 0x5;
+ ipc->enhance_var.brightoffset = 0x0;
+}
+
+void ipc_set_contrast(u32 *contrast)
+{
+ u32 i, line_eq[8];
+
+ for (i = 0; i < 8; i++) {
+ line_eq[i] = readl(ipc->regs + (S3C_IPC_PP_LINE_EQ0 + 4 * i));
+ line_eq[i] &= ~S3C_IPC_PP_LINE_CONTRAST_MASK;
+ line_eq[i] |= S3C_IPC_PP_LINE_CONTRAST(contrast[i]);
+ writel(line_eq[i], ipc->regs + (S3C_IPC_PP_LINE_EQ0 + 4 * i));
+ }
+
+ shadow_update();
+}
+
+void ipc_set_brightness(u32 *brightness)
+{
+ u32 i, line_eq[8];
+
+ for (i = 0; i < 8; i++) {
+ line_eq[i] = readl(ipc->regs + (S3C_IPC_PP_LINE_EQ0 + 4 * i));
+ line_eq[i] &= ~S3C_IPC_PP_LINE_BRIGTHNESS_MASK;
+ line_eq[i] |= S3C_IPC_PP_LINE_BRIGHT(brightness[i]);
+ writel(line_eq[i], ipc->regs + (S3C_IPC_PP_LINE_EQ0 + 4 * i));
+ }
+
+ shadow_update();
+}
+
+void ipc_set_bright_offset(u32 offset)
+{
+ writel(S3C_IPC_PP_BRIGHT_OFFSET_SET(offset),
+ ipc->regs + S3C_IPC_PP_BRIGHT_OFFSET);
+ shadow_update();
+}
+
+void ipc_set_saturation(u32 saturation)
+{
+ writel(S3C_IPC_PP_SATURATION_SET(saturation),
+ ipc->regs + S3C_IPC_PP_SATURATION);
+ shadow_update();
+}
+
+void ipc_set_sharpness(enum ipc_sharpness sharpness, u32 threshold)
+{
+ u32 sharpval;
+
+ switch (sharpness) {
+ case NO_EFFECT:
+ sharpval = 0x0;
+ break;
+ case MIN_EDGE:
+ sharpval = 0x1;
+ break;
+ case MODERATE_EDGE:
+ sharpval = 0x2;
+ break;
+ default:
+ sharpval = 0x3;
+ break;
+ }
+
+ writel(S3C_IPC_PP_TH_HNOISE_SET(threshold) | sharpval,
+ ipc->regs + S3C_IPC_PP_SHARPNESS);
+
+ shadow_update();
+}
+
+void ipc_set_polyphase_filter(u32 filter_reg,
+ const s8 *filter_coef, u16 tap)
+{
+ u32 base;
+ u32 i, j;
+ u16 tmp_tap;
+ u8 *coef;
+
+ base = (u32)ipc->regs + filter_reg;
+ coef = (u8 *)filter_coef;
+
+ for (i = 0; i < tap; i++) {
+ tmp_tap = tap - i - 1;
+
+ for (j = 0; j < 4; j++) {
+ writel(((coef[4 * j * tap + tmp_tap] << 24)
+ | (coef[(4 * j + 1) * tap + tmp_tap] << 16)
+ | (coef[(4 * j + 2) * tap + tmp_tap] << 8)
+ | (coef[(4 * j + 3) * tap + tmp_tap])), base);
+ base += 4;
+ }
+ }
+}
+
+void ipc_set_polyphase_filterset(enum ipc_filter_h_pp h_filter,
+ enum ipc_filter_v_pp v_filter)
+{
+ ipc_set_polyphase_filter(S3C_IPC_POLY8_Y0_LL,
+ ipc_8tap_coef_y_h + h_filter * 16 * 8, 8);
+ ipc_set_polyphase_filter(S3C_IPC_POLY4_C0_LL,
+ ipc_4tap_coef_c_h + h_filter * 16 * 4, 4);
+ ipc_set_polyphase_filter(S3C_IPC_POLY4_Y0_LL,
+ ipc_4tap_coef_y_v + v_filter * 16 * 4, 4);
+}
+
+/* For the real interlace mode,
+ * the vertical ratio should be used after divided by 2.
+ * Because in the interlace mode,
+ * all the IPC output is used for FIMD display
+ * and it should be the same as one field of the progressive mode.
+ * Therefore the same filter coefficients should be used for
+ * the same final output video.
+ * When half of the interlace V_RATIO is same as the progressive V_RATIO,
+ * the final output video scale is same. (20051104,ishan)
+*/
+void ipc_set_filter(void)
+{
+ enum ipc_filter_h_pp h_filter;
+ enum ipc_filter_v_pp v_filter;
+ u32 h_ratio, v_ratio;
+
+ h_ratio = readl(ipc->regs + S3C_IPC_H_RATIO);
+ v_ratio = readl(ipc->regs + S3C_IPC_V_RATIO);
+
+ /* Horizontal Y 8 tap , Horizontal C 4 tap */
+ if (h_ratio <= (0x1 << 16)) /* 720 -> 720 or zoom in */
+ h_filter = IPC_PP_H_NORMAL;
+ else if (h_ratio <= (0x9 << 13)) /* 720 -> 640 */
+ h_filter = IPC_PP_H_8_9 ;
+ else if (h_ratio <= (0x1 << 17)) /* 2 -> 1 */
+ h_filter = IPC_PP_H_1_2;
+ else if (h_ratio <= (0x3 << 16)) /* 2 -> 1 */
+ h_filter = IPC_PP_H_1_3;
+ else /* 4 -> 1 */
+ h_filter = IPC_PP_H_1_4;
+
+ /* Vertical Y 4 tap */
+ if (v_ratio <= (0x1 << 16)) /* 720 -> 720 or zoom in */
+ v_filter = IPC_PP_V_NORMAL;
+ else if (v_ratio <= (0x3 << 15)) /* 6 -> 5 */
+ v_filter = IPC_PP_V_5_6;
+ else if (v_ratio <= (0x1 << 17)) /* 2 -> 1 */
+ v_filter = IPC_PP_V_1_2;
+ else if (v_ratio <= (0x3 << 16)) /* 3 -> 1 */
+ v_filter = IPC_PP_V_1_3;
+ else /* 4 -> 1 */
+ v_filter = IPC_PP_V_1_4;
+
+ ipc_set_polyphase_filterset(h_filter, v_filter);
+}
+
+void ipc_set_pixel_rate(void)
+{
+ writel(S3C_IPC_PEL_RATE_SET, ipc->regs + S3C_IPC_PEL_RATE_CTRL);
+ shadow_update();
+}
+
+int ipc_init(u32 src_width, u32 src_height, enum ipc_2d ipc2d)
+{
+ if (src_width > IN_SC_MAX_WIDTH || src_height > IN_SC_MAX_HEIGHT) {
+ ipc_err("IPC input size error\n");
+ ipc_stop();
+ return -EINVAL;
+ }
+
+ ipc->src.imghsz = src_width;
+ ipc->src.imgvsz = src_height;
+ ipc->src.srchsz = src_width;
+ ipc->src.srcvsz = src_height;
+
+ ipc->dst.scanmode = PROGRESSIVE;
+
+ if (ipc2d == IPC_2D) {
+ ipc->dst.dsthsz = src_width;
+ ipc->dst.dstvsz = src_height * 2;
+ } else {
+ ipc->dst.dsthsz = src_width * 2;
+ ipc->dst.dstvsz = src_height;
+ }
+
+ ipc->control_var.modeval = ipc2d;
+
+ clk_enable(ipc->clk);
+
+ ipc_reset();
+ ipc_enable(OFF);
+ ipc_enable_postprocessing(OFF);
+
+ ipc_set_mode(ipc->control_var);
+ ipc_set_imgsize(ipc->src, ipc->dst);
+
+ ipc_set_enhance_param();
+ ipc_set_contrast(ipc->enhance_var.contrast);
+ ipc_set_brightness(ipc->enhance_var.brightness);
+ ipc_set_bright_offset(ipc->enhance_var.brightoffset);
+ ipc_set_saturation(ipc->enhance_var.saturation);
+ ipc_set_sharpness(ipc->enhance_var.sharpness,
+ ipc->enhance_var.thhnoise);
+
+ ipc_set_filter();
+ ipc_set_pixel_rate();
+
+ return 0;
+}
+
+static int ipc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ ipc = (struct ipc_control *) \
+ kmalloc(sizeof(struct ipc_control), GFP_KERNEL);
+ if (!ipc) {
+ ipc_err("no memory for configuration\n");
+ return -ENOMEM;
+ }
+ strcpy(ipc->name, IPC_NAME);
+
+ ipc->clk = clk_get(&pdev->dev, IPC_CLK_NAME);
+ if (IS_ERR(ipc->clk)) {
+ ipc_err("failed to get ipc clock source\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ipc_err("failed to get io memory region\n");
+ return -EINVAL;
+ }
+
+ res = request_mem_region(res->start, res->end - res->start + 1,
+ pdev->name);
+ if (!res) {
+ ipc_err("failed to request io memory region\n");
+ return -EINVAL;
+ }
+
+ /* ioremap for register block */
+ ipc->regs = ioremap(res->start, res->end - res->start + 1);
+ if (!ipc->regs) {
+ ipc_err("failed to remap io region\n");
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "IPC probe success\n");
+
+ return 0;
+}
+
+static int ipc_remove(struct platform_device *pdev)
+{
+ ipc_stop();
+ kfree(ipc);
+
+ return 0;
+}
+
+int ipc_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+int ipc_resume(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver ipc_driver = {
+ .probe = ipc_probe,
+ .remove = ipc_remove,
+ .suspend = ipc_suspend,
+ .resume = ipc_resume,
+ .driver = {
+ .name = "s3c-ipc",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int ipc_register(void)
+{
+ platform_driver_register(&ipc_driver);
+
+ return 0;
+}
+
+static void ipc_unregister(void)
+{
+ platform_driver_unregister(&ipc_driver);
+}
+
+module_init(ipc_register);
+module_exit(ipc_unregister);
+
+MODULE_AUTHOR("Jonghun, Han <jonghun.han@samsung.com>");
+MODULE_AUTHOR("Youngmok, Song <ym.song@samsung.com>");
+MODULE_DESCRIPTION("IPC support for FIMC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/fimc/ipc_table.h b/drivers/media/video/samsung/fimc/ipc_table.h
new file mode 100644
index 0000000..d0e8969
--- /dev/null
+++ b/drivers/media/video/samsung/fimc/ipc_table.h
@@ -0,0 +1,314 @@
+/* linux/drivers/media/video/samsung/fimc/ipc_table.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file for Samsung IPC driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __IPC_TABLE_H
+#define __IPC_TABLE_H __FILE__
+
+/* Horizontal Y 8tap */
+const s8 ipc_8tap_coef_y_h[] = {
+ /* IPC_PP_H_NORMAL */
+ 0, 0, 0, 0, 127, 0, 0, 0,
+ 0, 1, -2, 8, 126, -6, 2, -1,
+ 0, 1, -5, 16, 125, -12, 4, -1,
+ 0, 2, -8, 25, 121, -16, 5, -1,
+ -1, 3, -10, 35, 114, -18, 6, -1,
+ -1, 4, -13, 46, 107, -20, 6, -1,
+ -1, 5, -16, 57, 99, -21, 6, -1,
+ -1, 5, -18, 68, 89, -20, 6, -1,
+ -1, 6, -20, 79, 79, -20, 6, -1,
+ -1, 6, -20, 89, 68, -18, 5, -1,
+ -1, 6, -21, 99, 57, -16, 5, -1,
+ -1, 6, -20, 107, 46, -13, 4, -1,
+ -1, 6, -18, 114, 35, -10, 3, -1,
+ -1, 5, -16, 121, 25, -8, 2, 0,
+ -1, 4, -12, 125, 16, -5, 1, 0,
+ -1, 2, -6, 126, 8, -2, 1, 0,
+
+ /* IPC_PP_H_8_9 */
+ 0, 3, -7, 12, 112, 12, -7, 3,
+ -1, 3, -9, 19, 113, 6, -5, 2,
+ -1, 3, -11, 27, 111, 0, -3, 2,
+ -1, 4, -13, 35, 108, -5, -1, 1,
+ -1, 4, -14, 43, 104, -9, 0, 1,
+ -1, 5, -16, 52, 99, -12, 1, 0,
+ -1, 5, -17, 61, 92, -14, 2, 0,
+ 0, 4, -17, 69, 85, -16, 3, 0,
+ 0, 4, -17, 77, 77, -17, 4, 0,
+ 0, 3, -16, 85, 69, -17, 4, 0,
+ 0, 2, -14, 92, 61, -17, 5, -1,
+ 0, 1, -12, 99, 52, -16, 5, -1,
+ 1, 0, -9, 104, 43, -14, 4, -1,
+ 1, -1, -5, 108, 35, -13, 4, -1,
+ 2, -3, 0, 111, 27, -11, 3, -1,
+ 2, -5, 6, 113, 19, -9, 3, -1,
+
+ /* IPC_PP_H_1_2 */
+ 0, -3, 0, 35, 64, 35, 0, -3,
+ 0, -3, 1, 38, 64, 32, -1, -3,
+ 0, -3, 2, 41, 63, 29, -2, -2,
+ 0, -4, 4, 43, 63, 27, -3, -2,
+ 0, -4, 5, 46, 62, 24, -3, -2,
+ 0, -4, 7, 49, 60, 21, -3, -2,
+ -1, -4, 9, 51, 59, 19, -4, -1,
+ -1, -4, 12, 53, 57, 16, -4, -1,
+ -1, -4, 14, 55, 55, 14, -4, -1,
+ -1, -4, 16, 57, 53, 12, -4, -1,
+ -1, -4, 19, 59, 51, 9, -4, -1,
+ -2, -3, 21, 60, 49, 7, -4, 0,
+ -2, -3, 24, 62, 46, 5, -4, 0,
+ -2, -3, 27, 63, 43, 4, -4, 0,
+ -2, -2, 29, 63, 41, 2, -3, 0,
+ -3, -1, 32, 64, 38, 1, -3, 0,
+
+ /* IPC_PP_H_1_3 */
+ 0, 0, 10, 32, 44, 32, 10, 0,
+ -1, 0, 11, 33, 45, 31, 9, 0,
+ -1, 0, 12, 35, 45, 29, 8, 0,
+ -1, 1, 13, 36, 44, 28, 7, 0,
+ -1, 1, 15, 37, 44, 26, 6, 0,
+ -1, 2, 16, 38, 43, 25, 5, 0,
+ -1, 2, 18, 39, 43, 23, 5, -1,
+ -1, 3, 19, 40, 42, 22, 4, -1,
+ -1, 3, 21, 41, 41, 21, 3, -1,
+ -1, 4, 22, 42, 40, 19, 3, -1,
+ -1, 5, 23, 43, 39, 18, 2, -1,
+ 0, 5, 25, 43, 38, 16, 2, -1,
+ 0, 6, 26, 44, 37, 15, 1, -1,
+ 0, 7, 28, 44, 36, 13, 1, -1,
+ 0, 8, 29, 45, 35, 12, 0, -1,
+ 0, 9, 31, 45, 33, 11, 0, -1,
+
+ /* IPC_PP_H_1_4 */
+ 0, 2, 13, 30, 38, 30, 13, 2,
+ 0, 3, 14, 30, 38, 29, 12, 2,
+ 0, 3, 15, 31, 38, 28, 11, 2,
+ 0, 4, 16, 32, 38, 27, 10, 1,
+ 0, 4, 17, 33, 37, 26, 10, 1,
+ 0, 5, 18, 34, 37, 24, 9, 1,
+ 0, 5, 19, 34, 37, 24, 8, 1,
+ 1, 6, 20, 35, 36, 22, 7, 1,
+ 1, 6, 21, 36, 36, 21, 6, 1,
+ 1, 7, 22, 36, 35, 20, 6, 1,
+ 1, 8, 24, 37, 34, 19, 5, 0,
+ 1, 9, 24, 37, 34, 18, 5, 0,
+ 1, 10, 26, 37, 33, 17, 4, 0,
+ 1, 10, 27, 38, 32, 16, 4, 0,
+ 2, 11, 28, 38, 31, 15, 3, 0,
+ 2, 12, 29, 38, 30, 14, 3, 0
+};
+
+/* Horizontal C 4tap */
+const s8 ipc_4tap_coef_c_h[] = {
+ /* IPC_PP_H_NORMAL */
+ 0, 0, 127, 0,
+ 0, 5, 126, -3,
+ -1, 11, 124, -6,
+ -1, 19, 118, -8,
+ -2, 27, 111, -8,
+ -3, 37, 102, -8,
+ -4, 48, 92, -8,
+ -5, 59, 81, -7,
+ -6, 70, 70, -6,
+ -7, 81, 59, -5,
+ -8, 92, 48, -4,
+ -8, 102, 37, -3,
+ -8, 111, 27, -2,
+ -8, 118, 19, -1,
+ -6, 124, 11, -1,
+ -3, 126, 5, 0,
+
+ /* IPC_PP_H_8_9 */
+ 0, 8, 112, 8,
+ -1, 13, 113, 3,
+ -2, 19, 111, 0,
+ -2, 26, 107, -3,
+ -3, 34, 101, -4,
+ -3, 42, 94, -5,
+ -4, 51, 86, -5,
+ -5, 60, 78, -5,
+ -5, 69, 69, -5,
+ -5, 78, 60, -5,
+ -5, 86, 51, -4,
+ -5, 94, 42, -3,
+ -4, 101, 34, -3,
+ -3, 107, 26, -2,
+ 0, 111, 19, -2,
+ 3, 113, 13, -1,
+
+ /* IPC_PP_H_1_2 */
+ 0, 26, 76, 26,
+ 0, 30, 76, 22,
+ 0, 34, 75, 19,
+ 1, 38, 73, 16,
+ 1, 43, 71, 13,
+ 2, 47, 69, 10,
+ 3, 51, 66, 8,
+ 4, 55, 63, 6,
+ 5, 59, 59, 5,
+ 6, 63, 55, 4,
+ 8, 66, 51, 3,
+ 10, 69, 47, 2,
+ 13, 71, 43, 1,
+ 16, 73, 38, 1,
+ 19, 75, 34, 0,
+ 22, 76, 30, 0,
+
+ /* IPC_PP_H_1_3 */
+ 0, 30, 68, 30,
+ 2, 33, 66, 27,
+ 3, 36, 66, 23,
+ 3, 39, 65, 21,
+ 4, 43, 63, 18,
+ 5, 46, 62, 15,
+ 6, 49, 60, 13,
+ 8, 52, 57, 11,
+ 9, 55, 55, 9,
+ 11, 57, 52, 8,
+ 13, 60, 49, 6,
+ 15, 62, 46, 5,
+ 18, 63, 43, 4,
+ 21, 65, 39, 3,
+ 23, 66, 36, 3,
+ 27, 66, 33, 2,
+
+ /* IPC_PP_H_1_4 */
+ 0, 31, 66, 31,
+ 3, 34, 63, 28,
+ 4, 37, 62, 25,
+ 4, 40, 62, 22,
+ 5, 43, 61, 19,
+ 6, 46, 59, 17,
+ 7, 48, 58, 15,
+ 9, 51, 55, 13,
+ 11, 53, 53, 11,
+ 13, 55, 51, 9,
+ 15, 58, 48, 7,
+ 17, 59, 46, 6,
+ 19, 61, 43, 5,
+ 22, 62, 40, 4,
+ 25, 62, 37, 4,
+ 28, 63, 34, 3,
+};
+
+
+/* Vertical Y 8tap */
+const s8 ipc_4tap_coef_y_v[] = {
+ /* IPC_PP_V_NORMAL */
+ 0, 0, 127, 0,
+ 0, 5, 126, -3,
+ -1, 11, 124, -6,
+ -1, 19, 118, -8,
+ -2, 27, 111, -8,
+ -3, 37, 102, -8,
+ -4, 48, 92, -8,
+ -5, 59, 81, -7,
+ -6, 70, 70, -6,
+ -7, 81, 59, -5,
+ -8, 92, 48, -4,
+ -8, 102, 37, -3,
+ -8, 111, 27, -2,
+ -8, 118, 19, -1,
+ -6, 124, 11, -1,
+ -3, 126, 5, 0,
+
+ /* IPC_PP_V_5_6 */
+ 0, 11, 106, 11,
+ -2, 16, 107, 7,
+ -2, 22, 105, 3,
+ -2, 29, 101, 0,
+ -3, 36, 96, -1,
+ -3, 44, 90, -3,
+ -4, 52, 84, -4,
+ -4, 60, 76, -4,
+ -4, 68, 68, -4,
+ -4, 76, 60, -4,
+ -4, 84, 52, -4,
+ -3, 90, 44, -3,
+ -1, 96, 36, -3,
+ 0, 101, 29, -2,
+ 3, 105, 22, -2,
+ 7, 107, 16, -2,
+
+ /* IPC_PP_V_3_4 */
+ 0, 15, 98, 15,
+ -2, 21, 97, 12,
+ -2, 26, 96, 8,
+ -2, 32, 93, 5,
+ -2, 39, 89, 2,
+ -2, 46, 84, 0,
+ -3, 53, 79, -1,
+ -2, 59, 73, -2,
+ -2, 66, 66, -2,
+ -2, 73, 59, -2,
+ -1, 79, 53, -3,
+ 0, 84, 46, -2,
+ 2, 89, 39, -2,
+ 5, 93, 32, -2,
+ 8, 96, 26, -2,
+ 12, 97, 21, -2,
+
+ /* IPC_PP_V_1_2 */
+ 0, 26, 76, 26,
+ 0, 30, 76, 22,
+ 0, 34, 75, 19,
+ 1, 38, 73, 16,
+ 1, 43, 71, 13,
+ 2, 47, 69, 10,
+ 3, 51, 66, 8,
+ 4, 55, 63, 6,
+ 5, 59, 59, 5,
+ 6, 63, 55, 4,
+ 8, 66, 51, 3,
+ 10, 69, 47, 2,
+ 13, 71, 43, 1,
+ 16, 73, 38, 1,
+ 19, 75, 34, 0,
+ 22, 76, 30, 0,
+
+ /* IPC_PP_V_1_3 */
+ 0, 30, 68, 30,
+ 2, 33, 66, 27,
+ 3, 36, 66, 23,
+ 3, 39, 65, 21,
+ 4, 43, 63, 18,
+ 5, 46, 62, 15,
+ 6, 49, 60, 13,
+ 8, 52, 57, 11,
+ 9, 55, 55, 9,
+ 11, 57, 52, 8,
+ 13, 60, 49, 6,
+ 15, 62, 46, 5,
+ 18, 63, 43, 4,
+ 21, 65, 39, 3,
+ 23, 66, 36, 3,
+ 27, 66, 33, 2,
+
+ /* IPC_PP_V_1_4 */
+ 0, 31, 66, 31,
+ 3, 34, 63, 28,
+ 4, 37, 62, 25,
+ 4, 40, 62, 22,
+ 5, 43, 61, 19,
+ 6, 46, 59, 17,
+ 7, 48, 58, 15,
+ 9, 51, 55, 13,
+ 11, 53, 53, 11,
+ 13, 55, 51, 9,
+ 15, 58, 48, 7,
+ 17, 59, 46, 6,
+ 19, 61, 43, 5,
+ 22, 62, 40, 4,
+ 25, 62, 37, 4,
+ 28, 63, 34, 3
+};
+
+#endif /* __IPC_TABLE_H */
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/Kconfig b/drivers/media/video/samsung/fimg2d3x-exynos4/Kconfig
new file mode 100644
index 0000000..dccbc16
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/Kconfig
@@ -0,0 +1,22 @@
+# drivers/media/video/samsung/fimg2d3x/Kconfig
+#
+# Copyright (c) 2010 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+config VIDEO_FIMG2D3X
+ bool "Samsung Graphics 2D Driver"
+ select VIDEO_FIMG2D
+ depends on VIDEO_SAMSUNG && CPU_EXYNOS4210
+ default n
+ ---help---
+ This is a graphics 2D (FIMG2D 3.x) driver for Samsung ARM based SoC.
+
+config VIDEO_FIMG2D3X_DEBUG
+ bool "Enables FIMG2D debug messages"
+ depends on VIDEO_FIMG2D3X
+ default n
+ ---help---
+ This enables FIMG2D driver debug messages.
+
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/Makefile b/drivers/media/video/samsung/fimg2d3x-exynos4/Makefile
new file mode 100644
index 0000000..a24f530
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/Makefile
@@ -0,0 +1,17 @@
+# drivers/media/video/samsung/fimg2d3x/Makefile
+#
+# Copyright (c) 2010 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+obj-y :=
+obj-m :=
+obj-n :=
+obj- :=
+
+obj-$(CONFIG_VIDEO_FIMG2D3X) += fimg2d_dev.o fimg2d_cache.o fimg2d3x_regs.o fimg2d_core.o
+
+ifeq ($(CONFIG_VIDEO_FIMG2D3X_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d.h b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d.h
new file mode 100644
index 0000000..2c2c07b
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d.h
@@ -0,0 +1,397 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d_3x.h
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SEC_FIMG2D_H_
+#define __SEC_FIMG2D_H_
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+
+#define G2D_SFR_SIZE 0x1000
+
+#define TRUE (1)
+#define FALSE (0)
+
+#define G2D_MINOR 240
+
+#define G2D_IOCTL_MAGIC 'G'
+
+#define G2D_BLIT _IO(G2D_IOCTL_MAGIC,0)
+#define G2D_GET_VERSION _IO(G2D_IOCTL_MAGIC,1)
+#define G2D_GET_MEMORY _IOR(G2D_IOCTL_MAGIC,2, unsigned int)
+#define G2D_GET_MEMORY_SIZE _IOR(G2D_IOCTL_MAGIC,3, unsigned int)
+#define G2D_DMA_CACHE_CLEAN _IOWR(G2D_IOCTL_MAGIC,4, struct g2d_dma_info)
+#define G2D_DMA_CACHE_FLUSH _IOWR(G2D_IOCTL_MAGIC,5, struct g2d_dma_info)
+#define G2D_SYNC _IO(G2D_IOCTL_MAGIC,6)
+#define G2D_RESET _IO(G2D_IOCTL_MAGIC, 7)
+
+#define G2D_TIMEOUT (1000)
+
+#define G2D_MAX_WIDTH (2048)
+#define G2D_MAX_HEIGHT (2048)
+
+#define G2D_ALPHA_VALUE_MAX (255)
+
+#define G2D_POLLING (1<<0)
+#define G2D_INTERRUPT (0<<0)
+#define G2D_CACHE_OP (1<<1)
+#define G2D_NONE_INVALIDATE (0<<1)
+#define G2D_HYBRID_MODE (1<<2)
+
+#define G2D_PT_NOTVALID (0)
+#define G2D_PT_CACHED (1)
+#define G2D_PT_UNCACHED (2)
+
+#define GET_FRAME_SIZE(rect) ((rect.full_w) * (rect.full_h) * (rect.bytes_per_pixel))
+#define GET_RECT_SIZE(rect) ((rect.full_w) * (rect.h) * (rect.bytes_per_pixel))
+#define GET_REAL_SIZE(rect) ((rect.full_w) * (rect.h) * (rect.bytes_per_pixel))
+#define GET_STRIDE(rect) ((rect.full_w) * (rect.bytes_per_pixel))
+#define GET_SPARE_BYTES(rect) ((rect.full_w - rect.w) * rect.bytes_per_pixel)
+#define GET_START_ADDR(rect) (rect.addr + ((rect.y * rect.full_w) * rect.bytes_per_pixel))
+#define GET_REAL_START_ADDR(rect) GET_START_ADDR(rect) + (rect.x * rect.bytes_per_pixel)
+#define GET_REAL_END_ADDR(rect) GET_START_ADDR(rect) + GET_RECT_SIZE(rect) - ((rect.full_w - (rect.x + rect.w)) * rect.bytes_per_pixel)
+
+#define GET_RECT_SIZE_C(rect, clip) ((rect.full_w) * (clip.b - clip.t) * (rect.bytes_per_pixel))
+#define GET_START_ADDR_C(rect, clip) (rect.addr + ((clip.t * rect.full_w) * rect.bytes_per_pixel))
+#define GET_REAL_START_ADDR_C(rect, clip) GET_START_ADDR_C(rect, clip) + (clip.l * rect.bytes_per_pixel)
+#define GET_REAL_END_ADDR_C(rect, clip) GET_START_ADDR_C(rect, clip) + GET_RECT_SIZE_C(rect, clip) - ((rect.full_w - clip.r) * rect.bytes_per_pixel)
+
+#define GET_USEC(before, after) ((after.tv_sec - before.tv_sec) * 1000000 + (after.tv_usec - before.tv_usec))
+
+typedef enum {
+ G2D_ROT_0 = 0,
+ G2D_ROT_90,
+ G2D_ROT_180,
+ G2D_ROT_270,
+ G2D_ROT_X_FLIP,
+ G2D_ROT_Y_FLIP
+} G2D_ROT_DEG;
+
+typedef enum {
+ G2D_ALPHA_BLENDING_MIN = 0, // wholly transparent
+ G2D_ALPHA_BLENDING_MAX = 255, // 255
+ G2D_ALPHA_BLENDING_OPAQUE = 256, // opaque
+} G2D_ALPHA_BLENDING_MODE;
+
+typedef enum {
+ G2D_COLORKEY_NONE = 0,
+ G2D_COLORKEY_SRC_ON,
+ G2D_COLORKEY_DST_ON,
+ G2D_COLORKEY_SRC_DST_ON,
+}G2D_COLORKEY_MODE;
+
+typedef enum {
+ G2D_BLUE_SCREEN_NONE = 0,
+ G2D_BLUE_SCREEN_TRANSPARENT,
+ G2D_BLUE_SCREEN_WITH_COLOR,
+}G2D_BLUE_SCREEN_MODE;
+
+typedef enum {
+ G2D_ROP_SRC = 0,
+ G2D_ROP_DST,
+ G2D_ROP_SRC_AND_DST,
+ G2D_ROP_SRC_OR_DST,
+ G2D_ROP_3RD_OPRND,
+ G2D_ROP_SRC_AND_3RD_OPRND,
+ G2D_ROP_SRC_OR_3RD_OPRND,
+ G2D_ROP_SRC_XOR_3RD_OPRND,
+ G2D_ROP_DST_OR_3RD,
+}G2D_ROP_TYPE;
+
+typedef enum {
+ G2D_THIRD_OP_NONE = 0,
+ G2D_THIRD_OP_PATTERN,
+ G2D_THIRD_OP_FG,
+ G2D_THIRD_OP_BG
+}G2D_THIRD_OP_MODE;
+
+typedef enum {
+ G2D_BLACK = 0,
+ G2D_RED,
+ G2D_GREEN,
+ G2D_BLUE,
+ G2D_WHITE,
+ G2D_YELLOW,
+ G2D_CYAN,
+ G2D_MAGENTA
+}G2D_COLOR;
+
+typedef enum {
+ G2D_RGB_565 = ((0<<4)|2),
+
+ G2D_ABGR_8888 = ((2<<4)|1),
+ G2D_BGRA_8888 = ((3<<4)|1),
+ G2D_ARGB_8888 = ((0<<4)|1),
+ G2D_RGBA_8888 = ((1<<4)|1),
+
+ G2D_XBGR_8888 = ((2<<4)|0),
+ G2D_BGRX_8888 = ((3<<4)|0),
+ G2D_XRGB_8888 = ((0<<4)|0),
+ G2D_RGBX_8888 = ((1<<4)|0),
+
+ G2D_ABGR_1555 = ((2<<4)|4),
+ G2D_BGRA_5551 = ((3<<4)|4),
+ G2D_ARGB_1555 = ((0<<4)|4),
+ G2D_RGBA_5551 = ((1<<4)|4),
+
+ G2D_XBGR_1555 = ((2<<4)|3),
+ G2D_BGRX_5551 = ((3<<4)|3),
+ G2D_XRGB_1555 = ((0<<4)|3),
+ G2D_RGBX_5551 = ((1<<4)|3),
+
+ G2D_ABGR_4444 = ((2<<4)|6),
+ G2D_BGRA_4444 = ((3<<4)|6),
+ G2D_ARGB_4444 = ((0<<4)|6),
+ G2D_RGBA_4444 = ((1<<4)|6),
+
+ G2D_XBGR_4444 = ((2<<4)|5),
+ G2D_BGRX_4444 = ((3<<4)|5),
+ G2D_XRGB_4444 = ((0<<4)|5),
+ G2D_RGBX_4444 = ((1<<4)|5),
+
+ G2D_PACKED_BGR_888 = ((2<<4)|7),
+ G2D_PACKED_RGB_888 = ((0<<4)|7),
+
+ G2D_MAX_COLOR_SPACE
+}G2D_COLOR_SPACE;
+
+typedef enum {
+ G2D_Clear_Mode, //!< [0, 0]
+ G2D_Src_Mode, //!< [Sa, Sc]
+ G2D_Dst_Mode, //!< [Da, Dc]
+ G2D_SrcOver_Mode, //!< [Sa + Da - Sa*Da, Rc = Sc + (1 - Sa)*Dc]
+ G2D_DstOver_Mode, //!< [Sa + Da - Sa*Da, Rc = Dc + (1 - Da)*Sc]
+ G2D_SrcIn_Mode, //!< [Sa * Da, Sc * Da]
+ G2D_DstIn_Mode, //!< [Sa * Da, Sa * Dc]
+ G2D_SrcOut_Mode, //!< [Sa * (1 - Da), Sc * (1 - Da)]
+ G2D_DstOut_Mode, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
+ G2D_SrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
+ G2D_DstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
+ G2D_Xor_Mode, //!< [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
+
+ // these modes are defined in the SVG Compositing standard
+ // http://www.w3.org/TR/2009/WD-SVGCompositing-20090430/
+ G2D_Plus_Mode,
+ G2D_Multiply_Mode,
+ G2D_Screen_Mode,
+ G2D_Overlay_Mode,
+ G2D_Darken_Mode,
+ G2D_Lighten_Mode,
+ G2D_ColorDodge_Mode,
+ G2D_ColorBurn_Mode,
+ G2D_HardLight_Mode,
+ G2D_SoftLight_Mode,
+ G2D_Difference_Mode,
+ G2D_Exclusion_Mode,
+
+ kLastMode = G2D_Exclusion_Mode
+}G2D_PORTTERDUFF_MODE;
+
+typedef enum {
+ G2D_MEMORY_KERNEL,
+ G2D_MEMORY_USER
+}G2D_MEMORY_TYPE;
+
+typedef struct {
+ int x;
+ int y;
+ unsigned int w;
+ unsigned int h;
+ unsigned int full_w;
+ unsigned int full_h;
+ int color_format;
+ unsigned int bytes_per_pixel;
+ unsigned char * addr;
+} g2d_rect;
+
+typedef struct {
+ unsigned int t;
+ unsigned int b;
+ unsigned int l;
+ unsigned int r;
+} g2d_clip;
+
+typedef struct {
+ unsigned int rotate_val;
+ unsigned int alpha_val;
+
+ unsigned int blue_screen_mode; //true : enable, false : disable
+ unsigned int color_key_val; //screen color value
+ unsigned int color_switch_val; //one color
+
+ unsigned int src_color; // when set one color on SRC
+
+ unsigned int third_op_mode;
+ unsigned int rop_mode;
+ unsigned int mask_mode;
+ unsigned int render_mode;
+ unsigned int potterduff_mode;
+ unsigned int memory_type;
+} g2d_flag;
+
+typedef struct {
+ g2d_rect src_rect;
+ g2d_rect dst_rect;
+ g2d_clip clip;
+ g2d_flag flag;
+} g2d_params;
+
+/* for reserved memory */
+struct g2d_reserved_mem {
+ /* buffer base */
+ unsigned int base;
+ /* buffer size */
+ unsigned int size;
+};
+
+
+struct g2d_dma_info {
+ unsigned long addr;
+ unsigned int size;
+};
+
+struct g2d_platdata {
+ int hw_ver;
+ const char *parent_clkname;
+ const char *clkname;
+ const char *gate_clkname;
+ unsigned long clkrate;
+};
+
+struct g2d_timer {
+ int cnt;
+ struct timeval start_marker;
+ struct timeval cur_marker;
+};
+
+struct g2d_global {
+ int irq_num;
+ struct resource * mem;
+ void __iomem * base;
+ struct clk * clock;
+ atomic_t clk_enable_flag;
+ wait_queue_head_t waitq;
+ atomic_t in_use;
+ atomic_t num_of_object;
+ struct mutex lock;
+ struct device * dev;
+ atomic_t ready_to_run;
+ int src_attribute;
+ int dst_attribute;
+
+ struct g2d_reserved_mem reserved_mem; /* for reserved memory */
+ atomic_t is_mmu_faulted;
+ unsigned int faulted_addr;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ int irq_handled;
+};
+
+
+/****** debug message API *****/
+enum fimg2d_log {
+ FIMG2D_LOG_DEBUG = 0x1000,
+ FIMG2D_LOG_INFO = 0x0100,
+ FIMG2D_LOG_WARN = 0x0010,
+ FIMG2D_LOG_ERR = 0x0001,
+};
+
+/* debug macro */
+#define FIMG2D_LOG_DEFAULT (FIMG2D_LOG_WARN | FIMG2D_LOG_ERR)
+
+#define FIMG2D_DEBUG(fmt, ...) \
+ do { \
+ if (FIMG2D_LOG_DEFAULT & FIMG2D_LOG_DEBUG) \
+ printk(KERN_DEBUG "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define FIMG2D_INFO(fmt, ...) \
+ do { \
+ if (FIMG2D_LOG_DEFAULT & FIMG2D_LOG_INFO) \
+ printk(KERN_INFO "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define FIMG2D_WARN(fmt, ...) \
+ do { \
+ if (FIMG2D_LOG_DEFAULT & FIMG2D_LOG_WARN) \
+ printk(KERN_WARNING "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define FIMG2D_ERROR(fmt, ...) \
+ do { \
+ if (FIMG2D_LOG_DEFAULT & FIMG2D_LOG_ERR) \
+ printk(KERN_ERR "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define fimg2d_dbg(fmt, ...) FIMG2D_DEBUG(fmt, ##__VA_ARGS__)
+#define fimg2d_info(fmt, ...) FIMG2D_INFO(fmt, ##__VA_ARGS__)
+#define fimg2d_warn(fmt, ...) FIMG2D_WARN(fmt, ##__VA_ARGS__)
+#define fimg2d_err(fmt, ...) FIMG2D_ERROR(fmt, ##__VA_ARGS__)
+
+
+/**** function declearation***************************/
+int g2d_check_params(g2d_params *params);
+void g2d_start_bitblt(struct g2d_global *g2d_dev, g2d_params *params);
+void g2d_check_fifo_state_wait(struct g2d_global *g2d_dev);
+u32 g2d_set_src_img(struct g2d_global *g2d_dev, g2d_rect * rect, g2d_flag * flag);
+u32 g2d_set_dst_img(struct g2d_global *g2d_dev, g2d_rect * rect);
+u32 g2d_set_pattern(struct g2d_global *g2d_dev, g2d_rect * rect, g2d_flag * flag);
+u32 g2d_set_clip_win(struct g2d_global *g2d_dev, g2d_clip * rect);
+u32 g2d_set_rotation(struct g2d_global *g2d_dev, g2d_flag * flag);
+u32 g2d_set_color_key(struct g2d_global *g2d_dev, g2d_flag * flag);
+u32 g2d_set_alpha(struct g2d_global *g2d_dev, g2d_flag * flag);
+void g2d_set_bitblt_cmd(struct g2d_global *g2d_dev, g2d_rect * src_rect, g2d_rect * dst_rect, g2d_clip * clip, u32 blt_cmd);
+void g2d_reset(struct g2d_global *g2d_dev);
+void g2d_disable_int(struct g2d_global *g2d_dev);
+void g2d_set_int_finish(struct g2d_global *g2d_dev);
+
+/* fimg2d_cache */
+void g2d_clip_for_src(g2d_rect *src_rect, g2d_rect *dst_rect, g2d_clip *clip, g2d_clip *src_clip);
+void g2d_mem_inner_cache(g2d_params *params);
+void g2d_mem_outer_cache(struct g2d_global *g2d_dev, g2d_params *params, int *need_dst_clean);
+void g2d_mem_cache_oneshot(void *src_addr, void *dst_addr, unsigned long src_size, unsigned long dst_size);
+u32 g2d_mem_cache_op(unsigned int cmd, void * addr, unsigned int size);
+void g2d_mem_outer_cache_flush(void *start_addr, unsigned long size);
+void g2d_mem_outer_cache_clean(const void *start_addr, unsigned long size);
+void g2d_mem_outer_cache_inv(g2d_params *params);
+u32 g2d_check_pagetable(void * vaddr, unsigned int size, unsigned long pgd);
+void g2d_pagetable_clean(const void *start_addr, unsigned long size, unsigned long pgd);
+int g2d_check_need_dst_cache_clean(g2d_params * params);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void g2d_early_suspend(struct early_suspend *h);
+void g2d_late_resume(struct early_suspend *h);
+#endif
+
+/* fimg2d_core */
+int g2d_clk_enable(struct g2d_global *g2d_dev);
+int g2d_clk_disable(struct g2d_global *g2d_dev);
+void g2d_sysmmu_on(struct g2d_global *g2d_dev);
+void g2d_sysmmu_off(struct g2d_global *g2d_dev);
+void g2d_sysmmu_set_pgd(u32 pgd);
+void g2d_fail_debug(g2d_params *params);
+int g2d_init_regs(struct g2d_global *g2d_dev, g2d_params *params);
+int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params);
+int g2d_wait_for_finish(struct g2d_global *g2d_dev, g2d_params *params);
+int g2d_init_mem(struct device *dev, unsigned int *base, unsigned int *size);
+
+#endif /*__SEC_FIMG2D_H_*/
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d3x_regs.c b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d3x_regs.c
new file mode 100644
index 0000000..33ce53e
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d3x_regs.c
@@ -0,0 +1,376 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.c
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file implements fimg2d register control functions.
+ */
+
+#include <mach/map.h>
+#include <asm/io.h>
+#include <mach/regs-fimg2d3x.h>
+
+#include "fimg2d3x_regs.h"
+#include "fimg2d.h"
+
+void get_rot_config(unsigned int rotate_value, u32 *rot, u32 *src_dir, u32 *dst_dir)
+{
+ switch(rotate_value) {
+ case G2D_ROT_90:
+ *rot = 1; /* rotation = 1, src_y_dir == dst_y_dir, src_x_dir == dst_x_dir */
+ *src_dir = 0;
+ *dst_dir = 0;
+ break;
+
+ case G2D_ROT_270:
+ *rot = 1; /* rotation = 1, src_y_dir != dst_y_dir, src_x_dir != dst_x_dir */
+ *src_dir = 0;
+ *dst_dir = 0x3;
+ break;
+
+ case G2D_ROT_180:
+ *rot = 0; /* rotation = 0, src_y_dir != dst_y_dir, src_x_dir != dst_x_dir */
+ *src_dir = 0;
+ *dst_dir = 0x3;
+ break;
+
+ case G2D_ROT_X_FLIP:
+ *rot = 0; /* rotation = 0, src_y_dir != dst_y_dir */
+ *src_dir = 0;
+ *dst_dir = 0x2;
+ break;
+
+ case G2D_ROT_Y_FLIP:
+ *rot = 0; /* rotation = 0, src_x_dir != dst_y_dir */
+ *src_dir = 0;
+ *dst_dir = 0x1;
+ break;
+
+ default :
+ *rot = 0; /* rotation = 0; */
+ *src_dir = 0;
+ *dst_dir = 0;
+ break;
+ }
+
+ return ;
+}
+
+int g2d_check_params(g2d_params *params)
+{
+ g2d_rect * src_rect = &params->src_rect;
+ g2d_rect * dst_rect = &params->dst_rect;
+ g2d_flag * flag = &params->flag;
+
+ /* source */
+ if (0 > src_rect->x || 0 > src_rect->y) {
+ return -1;
+ }
+
+ if (0 == src_rect->h || 0 == src_rect->w) {
+ return -1;
+ }
+
+ if (8000 < src_rect->x+src_rect->w || 8000 < src_rect->y+src_rect->h) {
+ return -1;
+ }
+
+ /* destination */
+ if (0 > dst_rect->x || 0 > dst_rect->y) {
+ return -1;
+ }
+
+ if (0 == dst_rect->h || 0 == dst_rect->w) {
+ return -1;
+ }
+
+ if (8000 < dst_rect->x+dst_rect->w || 8000 < dst_rect->y+dst_rect->h) {
+ return -1;
+ }
+
+ if (flag->alpha_val > G2D_ALPHA_BLENDING_OPAQUE) {
+ return -1;
+ }
+
+ return 0;
+}
+
+void g2d_check_fifo_state_wait(struct g2d_global *g2d_dev)
+{
+ /* 1 = The graphics engine finishes the execution of command. */
+ /* 0 = in the middle of rendering process. */
+ while(!(readl(g2d_dev->base + FIFO_STAT_REG) & 0x1));
+
+ return;
+}
+
+
+u32 g2d_set_src_img(struct g2d_global *g2d_dev, g2d_rect * rect, g2d_flag * flag)
+{
+ u32 data = 0;
+ u32 blt_cmd = 0;
+
+ /* set source to one color */
+ //if(rect == NULL)
+ if (flag->potterduff_mode == G2D_Clear_Mode) {
+ /* select source */
+ writel(G2D_SRC_SELECT_R_USE_FG_COLOR, g2d_dev->base + SRC_SELECT_REG);
+
+ /* foreground color */
+ // writel(flag->src_color, g2d_dev->base + FG_COLOR_REG);
+ writel(0, g2d_dev->base + FG_COLOR_REG);
+ } else {
+ /* select source */
+ writel(G2D_SRC_SELECT_R_NORMAL, g2d_dev->base + SRC_SELECT_REG);
+
+ /* set base address of source image */
+ writel((u32)rect->addr, g2d_dev->base + SRC_BASE_ADDR_REG);
+
+ /* set stride */
+ writel(rect->full_w * rect->bytes_per_pixel, g2d_dev->base + SRC_STRIDE_REG);
+
+ /* set color mode */
+ writel(rect->color_format, g2d_dev->base + SRC_COLOR_MODE_REG);
+
+ /* set coordinate of source image */
+ data = (rect->y << 16) | (rect->x);
+ writel(data, g2d_dev->base + SRC_LEFT_TOP_REG);
+
+ data = ((rect->y + rect->h) << 16) | (rect->x + rect->w);
+ writel(data, g2d_dev->base + SRC_RIGHT_BOTTOM_REG);
+
+ }
+
+ return blt_cmd;
+}
+
+u32 g2d_set_dst_img(struct g2d_global *g2d_dev, g2d_rect * rect)
+{
+ u32 data = 0;
+ u32 blt_cmd = 0;
+
+ /* select destination */
+ writel(G2D_DST_SELECT_R_NORMAL, g2d_dev->base + DST_SELECT_REG);
+
+ /* set base address of destination image */
+ writel((u32)rect->addr, g2d_dev->base + DST_BASE_ADDR_REG);
+
+ /* set stride */
+ writel(rect->full_w * rect->bytes_per_pixel, g2d_dev->base + DST_STRIDE_REG);
+
+ /* set color mode */
+ writel(rect->color_format, g2d_dev->base + DST_COLOR_MODE_REG);
+
+ /* set coordinate of destination image */
+ data = (rect->y << 16) | (rect->x);
+ writel(data, g2d_dev->base + DST_LEFT_TOP_REG);
+
+ data = ((rect->y + rect->h) << 16) | (rect->x + rect->w);
+ writel(data, g2d_dev->base + DST_RIGHT_BOTTOM_REG);
+
+ return blt_cmd;
+}
+
+u32 g2d_set_rotation(struct g2d_global *g2d_dev, g2d_flag * flag)
+{
+ u32 blt_cmd = 0;
+ u32 rot=0, src_dir=0, dst_dir=0;
+
+ get_rot_config(flag->rotate_val, &rot, &src_dir, &dst_dir);
+
+ writel(rot, g2d_dev->base + ROTATE_REG);
+ writel(src_dir, g2d_dev->base + SRC_MSK_DIRECT_REG);
+ writel(dst_dir, g2d_dev->base + DST_PAT_DIRECT_REG);
+
+ return blt_cmd;
+}
+
+u32 g2d_set_clip_win(struct g2d_global *g2d_dev, g2d_clip * clip)
+{
+ u32 blt_cmd = 0;
+
+ //blt_cmd |= G2D_BLT_CMD_R_CW_ENABLE;
+ writel((clip->t << 16) | (clip->l), g2d_dev->base + CW_LEFT_TOP_REG);
+ writel((clip->b << 16) | (clip->r), g2d_dev->base + CW_RIGHT_BOTTOM_REG);
+
+ return blt_cmd;
+}
+
+u32 g2d_set_color_key(struct g2d_global *g2d_dev, g2d_flag * flag)
+{
+ u32 blt_cmd = 0;
+
+ /* Transparent Selection */
+ switch(flag->blue_screen_mode) {
+ case G2D_BLUE_SCREEN_TRANSPARENT :
+ writel(flag->color_key_val, g2d_dev->base + BS_COLOR_REG);
+
+ blt_cmd |= G2D_BLT_CMD_R_TRANSPARENT_MODE_TRANS;
+ break;
+
+ case G2D_BLUE_SCREEN_WITH_COLOR :
+ writel(flag->color_switch_val, g2d_dev->base + BG_COLOR_REG);
+ writel(flag->color_key_val, g2d_dev->base + BS_COLOR_REG);
+
+ blt_cmd |= G2D_BLT_CMD_R_TRANSPARENT_MODE_BLUESCR;
+ break;
+
+ case G2D_BLUE_SCREEN_NONE :
+ default:
+ blt_cmd |= G2D_BLT_CMD_R_TRANSPARENT_MODE_OPAQUE;
+ break;
+ }
+
+ blt_cmd |= G2D_BLT_CMD_R_COLOR_KEY_DISABLE;
+
+ return blt_cmd;
+}
+
+u32 g2d_set_pattern(struct g2d_global *g2d_dev, g2d_rect * rect, g2d_flag * flag)
+{
+ u32 data = 0;
+ u32 blt_cmd = 0;
+
+ /* Third Operand Selection */
+ switch(flag->third_op_mode) {
+ case G2D_THIRD_OP_PATTERN :
+ /* set base address of pattern image */
+ writel((u32)rect->addr, g2d_dev->base + PAT_BASE_ADDR_REG);
+
+ /* set size of pattern image */
+ data = ((rect->y + rect->h) << 16) | (rect->x + rect->w);
+ writel(data, g2d_dev->base + PAT_SIZE_REG);
+
+ /* set stride */
+ writel(rect->full_w * rect->bytes_per_pixel, g2d_dev->base + PAT_STRIDE_REG);
+
+ /* set color mode */
+ writel(rect->color_format, g2d_dev->base + PAT_COLOR_MODE_REG);
+
+ data = (rect->y << 16) | rect->x;
+ writel(data, g2d_dev->base + PAT_OFFSET_REG);
+
+ data = G2D_THIRD_OP_REG_PATTERN;
+ break;
+ case G2D_THIRD_OP_FG :
+ data = G2D_THIRD_OP_REG_FG_COLOR;
+ break;
+ case G2D_THIRD_OP_BG :
+ data = G2D_THIRD_OP_REG_BG_COLOR;
+ break;
+ case G2D_THIRD_OP_NONE :
+ default:
+ data = G2D_THIRD_OP_REG_NONE;
+ break;
+ }
+
+ writel(data, g2d_dev->base + THIRD_OPERAND_REG);
+
+ if(flag->third_op_mode == G2D_THIRD_OP_NONE) {
+ data = ((G2D_ROP_REG_SRC << 8) | G2D_ROP_REG_SRC);
+ } else {
+ switch(flag->rop_mode) {
+ case G2D_ROP_DST:
+ data = ((G2D_ROP_REG_DST << 8) | G2D_ROP_REG_DST);
+ break;
+ case G2D_ROP_SRC_AND_DST:
+ data = ((G2D_ROP_REG_SRC_AND_DST << 8) | G2D_ROP_REG_SRC_AND_DST);
+ break;
+ case G2D_ROP_SRC_OR_DST:
+ data = ((G2D_ROP_REG_SRC_OR_DST << 8) | G2D_ROP_REG_SRC_OR_DST);
+ break;
+ case G2D_ROP_3RD_OPRND:
+ data = ((G2D_ROP_REG_3RD_OPRND << 8) | G2D_ROP_REG_3RD_OPRND);
+ break;
+ case G2D_ROP_SRC_AND_3RD_OPRND:
+ data = ((G2D_ROP_REG_SRC_AND_3RD_OPRND << 8) | G2D_ROP_REG_SRC_AND_3RD_OPRND);
+ break;
+ case G2D_ROP_SRC_OR_3RD_OPRND:
+ data = ((G2D_ROP_REG_SRC_OR_3RD_OPRND << 8) | G2D_ROP_REG_SRC_OR_3RD_OPRND);
+ break;
+ case G2D_ROP_SRC_XOR_3RD_OPRND:
+ data = ((G2D_ROP_REG_SRC_XOR_3RD_OPRND << 8) | G2D_ROP_REG_SRC_XOR_3RD_OPRND);
+ break;
+ case G2D_ROP_DST_OR_3RD:
+ data = ((G2D_ROP_REG_DST_OR_3RD_OPRND << 8) | G2D_ROP_REG_DST_OR_3RD_OPRND);
+ break;
+ case G2D_ROP_SRC:
+ default:
+ data = ((G2D_ROP_REG_SRC << 8) | G2D_ROP_REG_SRC);
+ break;
+ }
+ }
+ writel(data, g2d_dev->base + ROP4_REG);
+
+ /* Mask Operation */
+ if(flag->mask_mode == TRUE) {
+ writel((u32)rect->addr, g2d_dev->base + MASK_BASE_ADDR_REG);
+ writel(rect->full_w * rect->bytes_per_pixel, g2d_dev->base + MASK_STRIDE_REG);
+
+ blt_cmd |= G2D_BLT_CMD_R_MASK_ENABLE;
+ }
+
+ return blt_cmd;
+}
+
+u32 g2d_set_alpha(struct g2d_global *g2d_dev, g2d_flag * flag)
+{
+ u32 blt_cmd = 0;
+
+ /* Alpha Value */
+ if(flag->alpha_val <= G2D_ALPHA_VALUE_MAX) {
+ if ((flag->potterduff_mode == G2D_Clear_Mode) || (flag->potterduff_mode == G2D_Src_Mode))
+ blt_cmd |= G2D_BLT_CMD_R_ALPHA_BLEND_NONE;
+ else
+ blt_cmd |= G2D_BLT_CMD_R_ALPHA_BLEND_ALPHA_BLEND;
+ writel((flag->alpha_val & 0xff), g2d_dev->base + ALPHA_REG);
+ } else {
+ blt_cmd |= G2D_BLT_CMD_R_ALPHA_BLEND_NONE;
+ }
+
+ return blt_cmd;
+}
+
+void g2d_set_bitblt_cmd(struct g2d_global *g2d_dev, g2d_rect * src_rect, g2d_rect * dst_rect, g2d_clip * clip, u32 blt_cmd)
+{
+ if ((src_rect->w != dst_rect->w)
+ || (src_rect->h != dst_rect->h)) {
+ blt_cmd |= G2D_BLT_CMD_R_STRETCH_ENABLE;
+ }
+
+ if ((clip->t != dst_rect->y) || (clip->b != dst_rect->y + dst_rect->h)
+ || (clip->l != dst_rect->x) || (clip->r != dst_rect->x + dst_rect->w)) {
+ blt_cmd |= G2D_BLT_CMD_R_CW_ENABLE;
+ }
+ writel(blt_cmd, g2d_dev->base + BITBLT_COMMAND_REG);
+}
+
+void g2d_reset(struct g2d_global *g2d_dev)
+{
+ writel(G2D_SWRESET_R_RESET, g2d_dev->base + SOFT_RESET_REG);
+}
+
+void g2d_disable_int(struct g2d_global *g2d_dev)
+{
+ writel(G2D_INTEN_R_CF_DISABLE, g2d_dev->base + INTEN_REG);
+}
+
+void g2d_set_int_finish(struct g2d_global *g2d_dev)
+{
+ writel(G2D_INTC_PEND_R_INTP_CMD_FIN, g2d_dev->base + INTC_PEND_REG);
+}
+
+void g2d_start_bitblt(struct g2d_global *g2d_dev, g2d_params *params)
+{
+ if (!(params->flag.render_mode & G2D_POLLING)) {
+ writel(G2D_INTEN_R_CF_ENABLE, g2d_dev->base + INTEN_REG);
+ }
+ writel(0x7, g2d_dev->base + CACHECTL_REG);
+
+ writel(G2D_BITBLT_R_START, g2d_dev->base + BITBLT_START_REG);
+}
+
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d3x_regs.h b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d3x_regs.h
new file mode 100644
index 0000000..f67f636
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d3x_regs.h
@@ -0,0 +1,278 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.h
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.*/
+
+#ifndef __SEC_FIMG2D3X_REGS_H
+#define __SEC_FIMG2D3X_REGS_H
+
+//**********************************************************
+// Address Definition of SFR
+//**********************************************************
+#define SEC_G2DREG(x) ((x))
+
+//** General Register *****************
+#define CONRTOL_REG SEC_G2DREG(0x0000)
+#define SOFT_RESET_REG SEC_G2DREG(0x0000)
+#define INTEN_REG SEC_G2DREG(0x0004)
+#define INTC_PEND_REG SEC_G2DREG(0x000C)
+#define FIFO_STAT_REG SEC_G2DREG(0x0010)
+#define AXI_ID_MODE_REG SEC_G2DREG(0x0014)
+#define CACHECTL_REG SEC_G2DREG(0x0018)
+
+//** G2D Command *********************
+#define BITBLT_START_REG SEC_G2DREG(0x0100)
+#define BITBLT_COMMAND_REG SEC_G2DREG(0x0104)
+
+//** Rotation & Direction *************
+#define ROTATE_REG SEC_G2DREG(0x0200)
+#define SRC_MSK_DIRECT_REG SEC_G2DREG(0x0204)
+#define DST_PAT_DIRECT_REG SEC_G2DREG(0x0208)
+// for old vector
+#define SRC_DIRECT_REG SEC_G2DREG(0x0204)
+#define DST_DIRECT_REG SEC_G2DREG(0x0208)
+
+//** Source **************************
+#define SRC_SELECT_REG SEC_G2DREG(0x0300)
+#define SRC_BASE_ADDR_REG SEC_G2DREG(0x0304)
+#define SRC_STRIDE_REG SEC_G2DREG(0x0308)
+#define SRC_COLOR_MODE_REG SEC_G2DREG(0x030C)
+#define SRC_LEFT_TOP_REG SEC_G2DREG(0x0310)
+#define SRC_RIGHT_BOTTOM_REG SEC_G2DREG(0x0314)
+
+//** Destination ***********************
+#define DST_SELECT_REG SEC_G2DREG(0x0400)
+#define DST_BASE_ADDR_REG SEC_G2DREG(0x0404)
+#define DST_STRIDE_REG SEC_G2DREG(0x0408)
+#define DST_COLOR_MODE_REG SEC_G2DREG(0x040C)
+#define DST_LEFT_TOP_REG SEC_G2DREG(0x0410)
+#define DST_RIGHT_BOTTOM_REG SEC_G2DREG(0x0414)
+
+//** Pattern **************************
+#define PAT_BASE_ADDR_REG SEC_G2DREG(0x0500)
+#define PAT_SIZE_REG SEC_G2DREG(0x0504)
+#define PAT_COLOR_MODE_REG SEC_G2DREG(0x0508)
+#define PAT_OFFSET_REG SEC_G2DREG(0x050C)
+#define PAT_STRIDE_REG SEC_G2DREG(0x0510)
+
+//** Mask *****************************
+#define MASK_BASE_ADDR_REG SEC_G2DREG(0x0520)
+#define MASK_STRIDE_REG SEC_G2DREG(0x0524)
+
+//** Clipping Window *******************
+#define CW_LEFT_TOP_REG SEC_G2DREG(0x0600)
+#define CW_RIGHT_BOTTOM_REG SEC_G2DREG(0x0604)
+
+//** ROP4 & Blending *****************
+#define THIRD_OPERAND_REG SEC_G2DREG(0x0610)
+#define ROP4_REG SEC_G2DREG(0x0614)
+#define ALPHA_REG SEC_G2DREG(0x0618)
+
+//** Color ***************************
+#define FG_COLOR_REG SEC_G2DREG(0x0700)
+#define BG_COLOR_REG SEC_G2DREG(0x0704)
+#define BS_COLOR_REG SEC_G2DREG(0x0708)
+
+//** Color Key ***********************
+#define SRC_COLORKEY_CTRL_REG SEC_G2DREG(0x0710)
+#define SRC_COLORKEY_DR_MIN_REG SEC_G2DREG(0x0714)
+#define SRC_COLORKEY_DR_MAX_REG SEC_G2DREG(0x0718)
+#define DST_COLORKEY_CTRL_REG SEC_G2DREG(0x071C)
+#define DST_COLORKEY_DR_MIN_REG SEC_G2DREG(0x0720)
+#define DST_COLORKEY_DR_MAX_REG SEC_G2DREG(0x0724)
+
+//**********************************************************
+// Bit Definition part
+//**********************************************************
+
+/* software reset register (SOFT_RESET_REG : 0x0000) */
+#define G2D_SWRESET_R_RESET (1<<0)
+
+/* interrupt enable register (INTEN_REG : 0x0004)) */
+#define G2D_INTEN_R_INT_TYPE_EDGE (1<<1)
+#define G2D_INTEN_R_INT_TYPE_LEVEL (0<<1)
+#define G2D_INTEN_R_CF_ENABLE (1<<0)
+#define G2D_INTEN_R_CF_DISABLE (0<<0)
+
+/* interrupt pending register (INTC_PEND_REG : 0x000C) */
+#define G2D_INTC_PEND_R_INTP_CMD_FIN (1<<0)
+
+/* AXI ID mode register (AXI_ID_MODE_REG : 0x0014) */
+#define G2D_AXIID_MODE_R_MULTI_ID (1<<0)
+#define G2D_AXIID_MODE_R_SIGNLE_ID (0<<0)
+
+/* bitblit start register (BITBLT_START_REG : 0x0100) */
+#define G2D_BITBLT_R_START (1<<0)
+
+/* bitblt command register (BITBLT_COMMAND_REG : 0x0104) */
+#define G2D_BLT_CMD_R_COLOR_EXP_CORRECT (0<<24)
+#define G2D_BLT_CMD_R_COLOR_EXP_ZERO (1<<24)
+
+#define G2D_BLT_CMD_R_SRC_NON_PRE_BLEND_DISLABE (0<<22)
+#define G2D_BLT_CMD_R_SRC_NON_PRE_BLEND_CONSTANT_ALPHA (1<<22)
+#define G2D_BLT_CMD_R_SRC_NON_PRE_BLEND_PERPIXEL_ALPHA (2<<22)
+
+#define G2D_BLT_CMD_R_ALPHA_BLEND_NONE (0<<20)
+#define G2D_BLT_CMD_R_ALPHA_BLEND_ALPHA_BLEND (1<<20)
+#define G2D_BLT_CMD_R_ALPHA_BLEND_FADE (2<<20)
+// #define G2D_BLT_CMD_R_ALPHA_BLEND_PERPIXEL (3<<20)
+
+#define G2D_BLT_CMD_R_ALPHA_BLEND_FAD_OFFSET (8)
+
+#define G2D_BLT_CMD_R_COLOR_KEY_DISABLE (0<<16)
+#define G2D_BLT_CMD_R_COLOR_KEY_ENABLE_SRC (1<<16)
+#define G2D_BLT_CMD_R_COLOR_KEY_ENABLE_DST (2<<16)
+#define G2D_BLT_CMD_R_COLOP_KEY_ENABLE_SRC_DST (3<<16)
+
+#define G2D_BLT_CMD_R_TRANSPARENT_MODE_OPAQUE (0<<12)
+#define G2D_BLT_CMD_R_TRANSPARENT_MODE_TRANS (1<<12)
+#define G2D_BLT_CMD_R_TRANSPARENT_MODE_BLUESCR (2<<12)
+
+#define G2D_BLT_CMD_R_CW_ENABLE (1<<8)
+#define G2D_BLT_CMD_R_STRETCH_ENABLE (1<<4)
+#define G2D_BLT_CMD_R_MASK_ENABLE (1<<0)
+
+/* rotation register (ROTATE_REG : 0x02000) */
+#define G2D_ROT_CMD_R_0 (0<<0)
+#define G2D_ROT_CMD_R_90 (1<<0)
+
+/* source and mask direction register (SRC_MSK_DIRECT_REG : 0x0204) */
+#define G2D_SRC_MSK_DIR_R_MSK_Y_POSITIVE (0<<8)
+#define G2D_SRC_MSK_DIR_R_MSK_Y_NEGATIVE (0<<8)
+#define G2D_SRC_MSK_DIR_R_SRC_Y_POSITIVE (0<<8)
+#define G2D_SRC_MSK_DIR_R_SRC_Y_POSITIVE (0<<8)
+
+/* source image selection register (SRC_SELECT_REG : 0x0300) */
+#define G2D_SRC_SELECT_R_NORMAL (0<<0)
+#define G2D_SRC_SELECT_R_USE_FG_COLOR (1<<0)
+#define G2D_SRC_SELECT_R_USE_BG_COLOR (2<<0)
+
+/* source image color mode register (SRC_COLOR_MODE_REG : 0x030C) */
+
+
+/* destination image selection register (DST_SELECT_REG : 0x0400) */
+#define G2D_DST_SELECT_R_NORMAL (0<<0)
+#define G2D_DST_SELECT_R_USE_FG_COLOR (1<<0)
+#define G2D_DST_SELECT_R_USE_BG_COLOR (2<<0)
+
+#define G2D_CMD0_REG_M_X (1<<8)
+
+#define G2D_CMD0_REG_L (1<<1)
+#define G2D_CMD0_REG_P (1<<0)
+
+/* BitBLT */
+#define G2D_CMD1_REG_S (1<<1)
+#define G2D_CMD1_REG_N (1<<0)
+
+/* resource color mode */
+#define G2D_COLOR_MODE_REG_C3_32BPP (1<<3)
+#define G2D_COLOR_MODE_REG_C3_24BPP (1<<3)
+#define G2D_COLOR_MODE_REG_C2_18BPP (1<<2)
+#define G2D_COLOR_MODE_REG_C1_16BPP (1<<1)
+#define G2D_COLOR_MODE_REG_C0_15BPP (1<<0)
+
+#define G2D_COLOR_RGB_565 (0x0<<0)
+#define G2D_COLOR_RGBA_5551 (0x1<<0)
+#define G2D_COLOR_ARGB_1555 (0x2<<0)
+#define G2D_COLOR_RGBA_8888 (0x3<<0)
+#define G2D_COLOR_ARGB_8888 (0x4<<0)
+#define G2D_COLOR_XRGB_8888 (0x5<<0)
+#define G2D_COLOR_RGBX_8888 (0x6<<0)
+#define G2D_COLOR_YUV422_SP (0x1<<3)
+
+#define G2D_CHL_ORDER_XRGB (0<<4) // ARGB,XRGB
+#define G2D_CHL_ORDER_RGBX (1<<4) // RGBA,RGBX
+#define G2D_CHL_ORDER_XBGR (2<<4) // ABGR,XBGR
+#define G2D_CHL_ORDER_BGRX (3<<4) // BGRA,BGRX
+
+#define G2D_FMT_XRGB_8888 (0)
+#define G2D_FMT_ARGB_8888 (1)
+#define G2D_FMT_RGB_565 (2)
+#define G2D_FMT_XRGB_1555 (3)
+#define G2D_FMT_ARGB_1555 (4)
+#define G2D_FMT_XRGB_4444 (5)
+#define G2D_FMT_ARGB_4444 (6)
+#define G2D_FMT_PACKED_RGB_888 (7)
+
+/* rotation mode */
+#define G2D_ROTATRE_REG_FY (1<<5)
+#define G2D_ROTATRE_REG_FX (1<<4)
+#define G2D_ROTATRE_REG_R3_270 (1<<3)
+#define G2D_ROTATRE_REG_R2_180 (1<<2)
+#define G2D_ROTATRE_REG_R1_90 (1<<1)
+#define G2D_ROTATRE_REG_R0_0 (1<<0)
+
+/* Endian select */
+#define G2D_ENDIAN_DST_BIG_ENDIAN (1<<1)
+#define G2D_ENDIAN_DST_LITTLE_ENDIAN (0<<1)
+
+#define G2D_ENDIAN_SRC_BIG_ENDIAN (1<<0)
+#define G2D_ENDIAN_SRC_LITTLE_ENDIAN (0<<0)
+
+/* read buffer size */
+#define G2D_ENDIAN_READSIZE_READ_SIZE_1 (0<<0)
+#define G2D_ENDIAN_READSIZE_READ_SIZE_4 (1<<0)
+#define G2D_ENDIAN_READSIZE_READ_SIZE_8 (2<<0)
+#define G2D_ENDIAN_READSIZE_READ_SIZE_16 (3<<0)
+
+/* Third Operans Select */
+/*
+#define G2D_ROP_REG_OS_PATTERN (0<<13)
+#define G2D_ROP_REG_OS_FG_COLOR (1<<13)
+#define G2D_ROP_REG_OS_PATTERN_MASK_SELECT (0<<4)
+#define G2D_ROP_REG_OS_PATTERN_THIRD (0)
+*/
+#define G2D_THIRD_OP_REG_PATTERN ((0<<4) | (0))
+#define G2D_THIRD_OP_REG_FG_COLOR ((1<<4) | (1))
+#define G2D_THIRD_OP_REG_BG_COLOR ((2<<4) | (2))
+#define G2D_THIRD_OP_REG_NONE ((3<<4) | (3))
+
+/* Alpha Blending Mode */
+#define G2D_ROP_REG_ABM_NO_BLENDING (0<<10)
+#define G2D_ROP_REG_ABM_SRC_BITMAP (1<<10)
+#define G2D_ROP_REG_ABM_REGISTER (2<<10)
+#define G2D_ROP_REG_ABM_FADING (4<<10)
+
+/* Raster operation mode */
+#define G2D_ROP_REG_T_OPAQUE_MODE (0<<9)
+#define G2D_ROP_REG_T_TRANSP_MODE (1<<9)
+
+#define G2D_ROP_REG_B_BS_MODE_OFF (0<<8)
+#define G2D_ROP_REG_B_BS_MODE_ON (1<<8)
+
+/*
+#define G2D_ROP_REG_SRC_ONLY (0xf0)
+#define G2D_ROP_REG_3RD_OPRND_ONLY (0xaa)
+#define G2D_ROP_REG_DST_ONLY (0xcc)
+#define G2D_ROP_REG_SRC_OR_DST (0xfc)
+#define G2D_ROP_REG_SRC_OR_3RD_OPRND (0xfa)
+#define G2D_ROP_REG_SRC_AND_DST (0xc0) //(pat==1)? src:dst
+#define G2D_ROP_REG_SRC_AND_3RD_OPRND (0xa0)
+#define G2D_ROP_REG_SRC_XOR_3RD_OPRND (0x5a)
+#define G2D_ROP_REG_DST_OR_3RD_OPRND (0xee)
+*/
+#define G2D_ROP_REG_SRC (0xcc)
+#define G2D_ROP_REG_DST (0xaa)
+#define G2D_ROP_REG_SRC_AND_DST (0x88)
+#define G2D_ROP_REG_SRC_OR_DST (0xee)
+#define G2D_ROP_REG_3RD_OPRND (0xf0)
+#define G2D_ROP_REG_SRC_AND_3RD_OPRND (0xc0)
+#define G2D_ROP_REG_SRC_OR_3RD_OPRND (0xfc)
+#define G2D_ROP_REG_SRC_XOR_3RD_OPRND (0x3c)
+#define G2D_ROP_REG_DST_OR_3RD_OPRND (0xfa)
+
+
+/* stencil control */
+#define G2D_STENCIL_CNTL_REG_STENCIL_ON_ON (1<<31)
+#define G2D_STENCIL_CNTL_REG_STENCIL_ON_OFF (0<<31)
+
+#define G2D_STENCIL_CNTL_REG_STENCIL_INVERSE (1<<23)
+#define G2D_STENCIL_CNTL_REG_STENCIL_SWAP (1<<0)
+
+/*********************************************************************************/
+
+#endif /* __SEC_FIMG2D3X_REGS_H */
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_cache.c b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_cache.c
new file mode 100644
index 0000000..639b3f8
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_cache.c
@@ -0,0 +1,379 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d3x_cache.c
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file implements fimg2d cache control functions.
+ */
+
+#include <linux/kernel.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <asm/io.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+
+#include "fimg2d.h"
+
+#define L1_ALL_THRESHOLD_SIZE SZ_64K
+#define L2_ALL_THRESHOLD_SIZE SZ_1M
+
+#define L2_CACHE_SKIP_MARK 256*4
+
+void g2d_pagetable_clean(const void *start_addr, unsigned long size, unsigned long pgd)
+{
+ void *l1d_vir, *l1d_phy, *l2d_phy;
+ void *cur_addr, *end_addr;
+ size = ALIGN(size, SZ_1M);
+ cur_addr = (void *)((unsigned long)start_addr & ~(SZ_1M-1));
+ end_addr = cur_addr + size + SZ_1M;
+
+ l1d_phy = (void *)((pgd & 0xffffc000) | (((u32)(cur_addr) & 0xfff00000)>>18));
+
+ if (l1d_phy) {
+ l1d_vir = phys_to_virt((u32)l1d_phy);
+ dmac_map_area(l1d_vir, (size/SZ_1M)*4, DMA_TO_DEVICE);
+ }
+
+ while (cur_addr < end_addr) {
+ outer_clean_range((u32)l1d_phy, (u32)l1d_phy + 4);
+
+ if (l1d_phy) {
+ l2d_phy = (void *)((readl(phys_to_virt((u32)l1d_phy)) & 0xfffffc00) |
+ (((u32)cur_addr & 0x000ff000) >> 10));
+ if (l2d_phy)
+ dmac_map_area(phys_to_virt((u32)l2d_phy), SZ_1K, DMA_TO_DEVICE);
+ outer_clean_range((u32)l2d_phy, (u32)l2d_phy + SZ_1K);
+ }
+ cur_addr += SZ_1M;
+ l1d_phy = (void *)((pgd & 0xffffc000) | (((u32)(cur_addr) & 0xfff00000)>>18));
+ }
+}
+
+
+static unsigned long virt2phys(unsigned long addr)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if(!current->mm) {
+ current->mm = &init_mm;
+ }
+
+ pgd = pgd_offset(current->mm, addr);
+
+ if ((pgd_val(*pgd) & 0x1) != 0x1) {
+ return 0;
+ }
+
+ pmd = pmd_offset(pgd, addr);
+ pte = pte_offset_map(pmd, addr);
+
+ return (addr & 0xfff) | (pte_val(*pte) & 0xfffff000);
+}
+
+u32 g2d_check_pagetable(void * vaddr, unsigned int size, unsigned long pgd)
+{
+ unsigned int level_one_phy, level_two_phy;
+ unsigned int level_one_value, level_two_value;
+
+ for (;;) {
+ level_one_phy = (pgd & 0xffffc000) | (((u32)vaddr & 0xfff00000)>>18);
+ if ((int)phys_to_virt(level_one_phy) < 0xc0000000) {
+ FIMG2D_ERROR("Level1 page table mapping missed, missed address = %p", phys_to_virt(level_one_phy));
+ return G2D_PT_NOTVALID;
+ }
+ level_one_value = readl(phys_to_virt(level_one_phy));
+
+ level_two_phy = (level_one_value & 0xfffffc00) | (((u32)vaddr & 0x000ff000) >> 10);
+ if ((int)phys_to_virt(level_two_phy) < 0xc0000000) {
+ FIMG2D_ERROR("Level2 page table mapping missed, missed address = %p", phys_to_virt(level_two_phy));
+ return G2D_PT_NOTVALID;
+ }
+ level_two_value = readl(phys_to_virt(level_two_phy));
+
+ if (((level_one_value & 0x3) != 0x1) || ((level_two_value & 0x3) != 0x3)) {
+ FIMG2D_DEBUG("Surface memory mapping fail [L1: 0x%x, L2: 0x%x]\n",
+ level_one_value, level_two_value);
+ return G2D_PT_NOTVALID;
+ }
+ if (size == 0) {
+ if ((level_two_value & 0x08) != 0x08)
+ return G2D_PT_UNCACHED;
+ return G2D_PT_CACHED;
+ }
+
+ if (size <= PAGE_SIZE) {
+ vaddr += (size-1);
+ size = 0;
+ } else {
+ vaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+}
+
+void g2d_clip_for_src(g2d_rect *src_rect, g2d_rect *dst_rect, g2d_clip *clip, g2d_clip *src_clip)
+{
+ if ((src_rect->w == dst_rect->w) && (src_rect->h == dst_rect->h)) {
+ src_clip->t = src_rect->y + (clip->t - dst_rect->y);
+ src_clip->l = src_rect->x + (clip->l - dst_rect->x);
+ src_clip->b = src_clip->t + (clip->b - clip->t);
+ src_clip->r = src_clip->l + (clip->r - clip->l);
+ } else {
+ src_clip->t = src_rect->y;
+ src_clip->l = src_rect->x;
+ src_clip->b = src_clip->t + src_rect->h;
+ src_clip->r = src_clip->l + src_rect->w;
+ }
+}
+
+void g2d_mem_inner_cache(g2d_params * params)
+{
+ void *src_addr, *dst_addr;
+ unsigned long src_size, dst_size;
+ g2d_clip clip_src;
+ g2d_clip_for_src(&params->src_rect, &params->dst_rect, &params->clip, &clip_src);
+
+ src_addr = (void *)GET_START_ADDR_C(params->src_rect, clip_src);
+ dst_addr = (void *)GET_START_ADDR_C(params->dst_rect, params->clip);
+ src_size = (unsigned long)GET_RECT_SIZE_C(params->src_rect, clip_src);
+ dst_size = (unsigned long)GET_RECT_SIZE_C(params->dst_rect, params->clip);
+
+ if((src_size + dst_size) < L1_ALL_THRESHOLD_SIZE) {
+ dmac_map_area(src_addr, src_size, DMA_TO_DEVICE);
+ dmac_flush_range(dst_addr, dst_addr + dst_size);
+ } else {
+ flush_all_cpu_caches();
+ }
+}
+
+void g2d_mem_outer_cache(struct g2d_global *g2d_dev, g2d_params * params, int *need_dst_clean)
+{
+ unsigned long start_paddr, end_paddr;
+ unsigned long cur_addr, end_addr;
+ unsigned long width_bytes;
+ unsigned long stride;
+ unsigned long src_size, dst_size;
+
+#if 0
+ if (((GET_RECT_SIZE(params->src_rect) + GET_RECT_SIZE(params->dst_rect)) > L2_ALL_THRESHOLD_SIZE)
+ && ((*need_dst_clean == true) || ( GET_RECT_SIZE(params->src_rect) > 384*640*4))) {
+ outer_flush_all();
+ *need_dst_clean = true;
+ return;
+ }
+#endif
+
+ g2d_clip clip_src;
+ g2d_clip_for_src(&params->src_rect, &params->dst_rect, &params->clip, &clip_src);
+
+ src_size = GET_RECT_SIZE_C(params->src_rect, clip_src);
+ dst_size = GET_RECT_SIZE_C(params->dst_rect, params->clip);
+
+ if ((src_size + dst_size) >= L2_ALL_THRESHOLD_SIZE) {
+ outer_flush_all();
+ *need_dst_clean = true;
+ return;
+ }
+
+ if((GET_SPARE_BYTES(params->src_rect) < L2_CACHE_SKIP_MARK)
+ || ((params->src_rect.w * params->src_rect.bytes_per_pixel) >= PAGE_SIZE)) {
+ g2d_mem_outer_cache_clean((void *)GET_START_ADDR_C(params->src_rect, clip_src),
+ (unsigned int)GET_RECT_SIZE_C(params->src_rect, clip_src));
+ } else {
+ stride = GET_STRIDE(params->src_rect);
+ width_bytes = params->src_rect.w * params->src_rect.bytes_per_pixel;
+ cur_addr = (unsigned long)GET_REAL_START_ADDR_C(params->src_rect, clip_src);
+ end_addr = (unsigned long)GET_REAL_END_ADDR_C(params->src_rect, clip_src);
+
+ while (cur_addr <= end_addr) {
+ start_paddr = virt2phys((unsigned long)cur_addr);
+ end_paddr = virt2phys((unsigned long)cur_addr + width_bytes);
+
+ if (((end_paddr - start_paddr) > 0) && ((end_paddr -start_paddr) < PAGE_SIZE)) {
+ outer_clean_range(start_paddr, end_paddr);
+ } else {
+ outer_clean_range(start_paddr, ((start_paddr + PAGE_SIZE) & PAGE_MASK) - 1);
+ outer_clean_range(end_paddr & PAGE_MASK, end_paddr);
+ }
+ cur_addr += stride;
+ }
+ }
+
+ if (*need_dst_clean) {
+ if ((GET_SPARE_BYTES(params->dst_rect) < L2_CACHE_SKIP_MARK)
+ || ((params->dst_rect.w * params->src_rect.bytes_per_pixel) >= PAGE_SIZE)) {
+ g2d_mem_outer_cache_flush((void *)GET_START_ADDR_C(params->dst_rect, params->clip),
+ (unsigned int)GET_RECT_SIZE_C(params->dst_rect, params->clip));
+ } else {
+ stride = GET_STRIDE(params->dst_rect);
+ width_bytes = (params->clip.r - params->clip.l) * params->dst_rect.bytes_per_pixel;
+
+ cur_addr = (unsigned long)GET_REAL_START_ADDR_C(params->dst_rect, params->clip);
+ end_addr = (unsigned long)GET_REAL_END_ADDR_C(params->dst_rect, params->clip);
+
+ while (cur_addr <= end_addr) {
+ start_paddr = virt2phys((unsigned long)cur_addr);
+ end_paddr = virt2phys((unsigned long)cur_addr + width_bytes);
+
+ if (((end_paddr - start_paddr) > 0) && ((end_paddr -start_paddr) < PAGE_SIZE)) {
+ outer_flush_range(start_paddr, end_paddr);
+ } else {
+ outer_flush_range(start_paddr, ((start_paddr + PAGE_SIZE) & PAGE_MASK) - 1);
+ outer_flush_range(end_paddr & PAGE_MASK, end_paddr);
+ }
+ cur_addr += stride;
+ }
+ }
+ }
+}
+
+void g2d_mem_cache_oneshot(void *src_addr, void *dst_addr, unsigned long src_size, unsigned long dst_size)
+{
+ unsigned long paddr;
+ void *cur_addr, *end_addr;
+ unsigned long full_size;
+
+ full_size = src_size + dst_size;
+
+ if(full_size < L1_ALL_THRESHOLD_SIZE)
+ dmac_map_area(src_addr, src_size, DMA_TO_DEVICE);
+ else
+ flush_all_cpu_caches();
+
+ if(full_size > L2_ALL_THRESHOLD_SIZE) {
+ outer_flush_all();
+ return;
+ }
+
+ cur_addr = (void *)((unsigned long)src_addr & PAGE_MASK);
+ src_size = PAGE_ALIGN(src_size);
+ end_addr = cur_addr + src_size + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ paddr = virt2phys((unsigned long)cur_addr);
+ if (paddr) {
+ outer_clean_range(paddr, paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+
+ if(full_size < L1_ALL_THRESHOLD_SIZE)
+ dmac_flush_range(dst_addr, dst_addr + dst_size);
+
+ cur_addr = (void *)((unsigned long)dst_addr & PAGE_MASK);
+ dst_size = PAGE_ALIGN(dst_size);
+ end_addr = cur_addr + dst_size + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ paddr = virt2phys((unsigned long)cur_addr);
+ if (paddr) {
+ outer_flush_range(paddr, paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+}
+
+u32 g2d_mem_cache_op(unsigned int cmd, void *addr, unsigned int size)
+{
+ switch(cmd) {
+ case G2D_DMA_CACHE_CLEAN :
+ g2d_mem_outer_cache_clean((void *)addr, size);
+ break;
+ case G2D_DMA_CACHE_FLUSH :
+ g2d_mem_outer_cache_flush((void *)addr, size);
+ break;
+ default :
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+void g2d_mem_outer_cache_flush(void *start_addr, unsigned long size)
+{
+ unsigned long paddr;
+ void *cur_addr, *end_addr;
+
+ cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
+ size = PAGE_ALIGN(size);
+ end_addr = cur_addr + size + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ paddr = virt2phys((unsigned long)cur_addr);
+ if (paddr) {
+ outer_flush_range(paddr, paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+}
+
+void g2d_mem_outer_cache_clean(const void *start_addr, unsigned long size)
+{
+ unsigned long paddr;
+ void *cur_addr, *end_addr;
+
+ cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
+ size = PAGE_ALIGN(size);
+ end_addr = cur_addr + size + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ paddr = virt2phys((unsigned long)cur_addr);
+ if (paddr) {
+ outer_clean_range(paddr, paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+}
+
+void g2d_mem_outer_cache_inv(g2d_params *params)
+{
+ unsigned long start_paddr, end_paddr;
+ unsigned long cur_addr, end_addr;
+ unsigned long stride;
+
+ stride = GET_STRIDE(params->dst_rect);
+ cur_addr = (unsigned long)GET_START_ADDR_C(params->dst_rect, params->clip);
+ end_addr = cur_addr + (unsigned long)GET_RECT_SIZE_C(params->dst_rect, params->clip);
+
+ start_paddr = virt2phys((unsigned long)cur_addr);
+ outer_inv_range(start_paddr, (start_paddr & PAGE_MASK) + (PAGE_SIZE - 1));
+ cur_addr = ((unsigned long)cur_addr & PAGE_MASK) + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ start_paddr = virt2phys((unsigned long)cur_addr);
+ if ((cur_addr + PAGE_SIZE) > end_addr) {
+ end_paddr = virt2phys((unsigned long)end_addr);
+ outer_inv_range(start_paddr, end_paddr);
+ break;
+ }
+
+ if (start_paddr) {
+ outer_inv_range(start_paddr, start_paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+}
+
+int g2d_check_need_dst_cache_clean(g2d_params * params)
+{
+ unsigned long cur_addr, end_addr;
+ cur_addr = (unsigned long)GET_START_ADDR_C(params->dst_rect, params->clip);
+ end_addr = cur_addr + (unsigned long)GET_RECT_SIZE_C(params->dst_rect, params->clip);
+
+ if ((params->src_rect.color_format == G2D_RGB_565) &&
+ (params->flag.alpha_val == G2D_ALPHA_BLENDING_OPAQUE) &&
+ (params->dst_rect.full_w == (params->clip.r - params->clip.l)) &&
+ (cur_addr % 32 == 0) && (end_addr % 32 == 0)) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c
new file mode 100644
index 0000000..40508d5
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c
@@ -0,0 +1,318 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d_core.c
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file implements fimg2d core functions.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <plat/s5p-sysmmu.h>
+#include <linux/sched.h>
+
+#if defined(CONFIG_S5P_MEM_CMA)
+#include <linux/cma.h>
+#elif defined(CONFIG_S5P_MEM_BOOTMEM)
+#include <mach/media.h>
+#include <plat/media.h>
+#endif
+
+#include "fimg2d.h"
+
+int g2d_clk_enable(struct g2d_global *g2d_dev)
+{
+ if(!atomic_read(&g2d_dev->clk_enable_flag)) {
+ clk_enable(g2d_dev->clock);
+ atomic_set(&g2d_dev->clk_enable_flag, 1);
+ return 0;
+ }
+ return -1;
+}
+
+int g2d_clk_disable(struct g2d_global *g2d_dev)
+{
+ if(atomic_read(&g2d_dev->clk_enable_flag)) {
+ if(atomic_read(&g2d_dev->in_use) == 0) {
+ clk_disable(g2d_dev->clock);
+ atomic_set(&g2d_dev->clk_enable_flag, 0);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+void g2d_sysmmu_on(struct g2d_global *g2d_dev)
+{
+ g2d_clk_enable(g2d_dev);
+ s5p_sysmmu_enable(g2d_dev->dev,
+ (unsigned long)virt_to_phys((void *)init_mm.pgd));
+ g2d_clk_disable(g2d_dev);
+}
+
+void g2d_sysmmu_off(struct g2d_global *g2d_dev)
+{
+ g2d_clk_enable(g2d_dev);
+ s5p_sysmmu_disable(g2d_dev->dev);
+ g2d_clk_disable(g2d_dev);
+}
+
+void g2d_fail_debug(g2d_params *params)
+{
+ FIMG2D_ERROR("src : %d, %d, %d, %d / %d, %d / 0x%x, %d, 0x%x)\n",
+ params->src_rect.x,
+ params->src_rect.y,
+ params->src_rect.w,
+ params->src_rect.h,
+ params->src_rect.full_w,
+ params->src_rect.full_h,
+ params->src_rect.color_format,
+ params->src_rect.bytes_per_pixel,
+ (u32)params->src_rect.addr);
+ FIMG2D_ERROR("dst : %d, %d, %d, %d / %d, %d / 0x%x, %d, 0x%x)\n",
+ params->dst_rect.x,
+ params->dst_rect.y,
+ params->dst_rect.w,
+ params->dst_rect.h,
+ params->dst_rect.full_w,
+ params->dst_rect.full_h,
+ params->dst_rect.color_format,
+ params->dst_rect.bytes_per_pixel,
+ (u32)params->dst_rect.addr);
+ FIMG2D_ERROR("clip: %d, %d, %d, %d\n",
+ params->clip.t,
+ params->clip.b,
+ params->clip.l,
+ params->clip.r);
+ FIMG2D_ERROR("flag: %d, %d, %d, %d / %d, %d, %d, %d / %d, %d, %d, %d\n",
+ params->flag.rotate_val,
+ params->flag.alpha_val,
+ params->flag.blue_screen_mode,
+ params->flag.color_key_val,
+ params->flag.color_switch_val,
+ params->flag.src_color,
+ params->flag.third_op_mode,
+ params->flag.rop_mode,
+ params->flag.mask_mode,
+ params->flag.render_mode,
+ params->flag.potterduff_mode,
+ params->flag.memory_type);
+}
+
+int g2d_init_regs(struct g2d_global *g2d_dev, g2d_params *params)
+{
+ u32 blt_cmd = 0;
+
+ g2d_rect * src_rect = &params->src_rect;
+ g2d_rect * dst_rect = &params->dst_rect;
+ g2d_clip * clip = &params->clip;
+ g2d_flag * flag = &params->flag;
+
+ if (g2d_check_params(params) < 0)
+ return -1;
+
+ g2d_reset(g2d_dev);
+
+ /* source image */
+ blt_cmd |= g2d_set_src_img(g2d_dev, src_rect, flag);
+
+ /* destination image */
+ blt_cmd |= g2d_set_dst_img(g2d_dev, dst_rect);
+
+ /* rotation */
+ blt_cmd |= g2d_set_rotation(g2d_dev, flag);
+
+ /* clipping */
+ blt_cmd |= g2d_set_clip_win(g2d_dev, clip);
+
+ /* color key */
+ blt_cmd |= g2d_set_color_key(g2d_dev, flag);
+
+ /* pattern */
+ blt_cmd |= g2d_set_pattern(g2d_dev, src_rect, flag);
+
+ /* rop & alpha blending */
+ blt_cmd |= g2d_set_alpha(g2d_dev, flag);
+
+ /* command */
+ g2d_set_bitblt_cmd(g2d_dev, src_rect, dst_rect, clip, blt_cmd);
+
+ return 0;
+}
+
+int g2d_check_overlap(g2d_rect src_rect, g2d_rect dst_rect, g2d_clip clip)
+{
+ unsigned int src_start_addr;
+ unsigned int src_end_addr;
+ unsigned int dst_start_addr;
+ unsigned int dst_end_addr;
+
+ src_start_addr = (unsigned int)GET_START_ADDR(src_rect);
+ src_end_addr = src_start_addr + (unsigned int)GET_RECT_SIZE(src_rect);
+ dst_start_addr = (unsigned int)GET_START_ADDR_C(dst_rect, clip);
+ dst_end_addr = dst_start_addr + (unsigned int)GET_RECT_SIZE_C(dst_rect, clip);
+
+ if ((dst_start_addr >= src_start_addr) && (dst_start_addr <= src_end_addr))
+ return true;
+ if ((dst_end_addr >= src_start_addr) && (dst_end_addr <= src_end_addr))
+ return true;
+ if ((src_start_addr >= dst_start_addr) && (src_end_addr <= dst_end_addr))
+ return true;
+
+ return false;
+}
+
+int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params)
+{
+ unsigned long pgd;
+ int need_dst_clean = true;
+
+ if ((params->src_rect.addr == NULL)
+ || (params->dst_rect.addr == NULL)) {
+ FIMG2D_ERROR("error : addr Null\n");
+ return false;
+ }
+
+ if (params->flag.memory_type == G2D_MEMORY_KERNEL) {
+ params->src_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->src_rect.addr);
+ params->dst_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->dst_rect.addr);
+ pgd = (unsigned long)init_mm.pgd;
+ } else {
+ pgd = (unsigned long)current->mm->pgd;
+ }
+
+ if (params->flag.memory_type == G2D_MEMORY_USER)
+ {
+ g2d_clip clip_src;
+ g2d_clip_for_src(&params->src_rect, &params->dst_rect, &params->clip, &clip_src);
+
+ if (g2d_check_overlap(params->src_rect, params->dst_rect, params->clip))
+ return false;
+
+ g2d_dev->src_attribute =
+ g2d_check_pagetable((unsigned char *)GET_START_ADDR(params->src_rect),
+ (unsigned int)GET_RECT_SIZE(params->src_rect) + 8,
+ (u32)virt_to_phys((void *)pgd));
+ if (g2d_dev->src_attribute == G2D_PT_NOTVALID) {
+ FIMG2D_DEBUG("Src is not in valid pagetable\n");
+ return false;
+ }
+
+ g2d_dev->dst_attribute =
+ g2d_check_pagetable((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip),
+ (unsigned int)GET_RECT_SIZE_C(params->dst_rect, params->clip),
+ (u32)virt_to_phys((void *)pgd));
+ if (g2d_dev->dst_attribute == G2D_PT_NOTVALID) {
+ FIMG2D_DEBUG("Dst is not in valid pagetable\n");
+ return false;
+ }
+
+ g2d_pagetable_clean((unsigned char *)GET_START_ADDR(params->src_rect),
+ (u32)GET_RECT_SIZE(params->src_rect) + 8,
+ (u32)virt_to_phys((void *)pgd));
+ g2d_pagetable_clean((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip),
+ (u32)GET_RECT_SIZE_C(params->dst_rect, params->clip),
+ (u32)virt_to_phys((void *)pgd));
+
+ if (params->flag.render_mode & G2D_CACHE_OP) {
+ /*g2d_mem_cache_oneshot((void *)GET_START_ADDR(params->src_rect),
+ (void *)GET_START_ADDR(params->dst_rect),
+ (unsigned int)GET_REAL_SIZE(params->src_rect),
+ (unsigned int)GET_REAL_SIZE(params->dst_rect));*/
+ // need_dst_clean = g2d_check_need_dst_cache_clean(params);
+ g2d_mem_inner_cache(params);
+ g2d_mem_outer_cache(g2d_dev, params, &need_dst_clean);
+ }
+ }
+
+ s5p_sysmmu_set_tablebase_pgd(g2d_dev->dev,
+ (u32)virt_to_phys((void *)pgd));
+
+ if(g2d_init_regs(g2d_dev, params) < 0) {
+ return false;
+ }
+
+ /* Do bitblit */
+ g2d_start_bitblt(g2d_dev, params);
+
+ if (!need_dst_clean)
+ g2d_mem_outer_cache_inv(params);
+
+ return true;
+}
+
+int g2d_wait_for_finish(struct g2d_global *g2d_dev, g2d_params *params)
+{
+ if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) {
+ FIMG2D_ERROR("error : sysmmu_faulted early\n");
+ FIMG2D_ERROR("faulted addr: 0x%x\n", g2d_dev->faulted_addr);
+ g2d_fail_debug(params);
+ atomic_set(&g2d_dev->is_mmu_faulted, 0);
+ return false;
+ }
+
+ if (params->flag.render_mode & G2D_POLLING) {
+ g2d_check_fifo_state_wait(g2d_dev);
+ } else {
+ if(wait_event_interruptible_timeout(g2d_dev->waitq,
+ g2d_dev->irq_handled == 1,
+ msecs_to_jiffies(G2D_TIMEOUT)) == 0) {
+ if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) {
+ FIMG2D_ERROR("error : sysmmu_faulted\n");
+ FIMG2D_ERROR("faulted addr: 0x%x\n", g2d_dev->faulted_addr);
+ } else {
+ g2d_reset(g2d_dev);
+ FIMG2D_ERROR("error : waiting for interrupt is timeout\n");
+ }
+ atomic_set(&g2d_dev->is_mmu_faulted, 0);
+ g2d_fail_debug(params);
+ return false;
+ } else if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) {
+ FIMG2D_ERROR("error : sysmmu_faulted but auto recoveried\n");
+ FIMG2D_ERROR("faulted addr: 0x%x\n", g2d_dev->faulted_addr);
+ g2d_fail_debug(params);
+ atomic_set(&g2d_dev->is_mmu_faulted, 0);
+ return false;
+ }
+ }
+ return true;
+}
+
+int g2d_init_mem(struct device *dev, unsigned int *base, unsigned int *size)
+{
+#ifdef CONFIG_S5P_MEM_CMA
+ struct cma_info mem_info;
+ int err;
+ char cma_name[8];
+#endif
+
+#ifdef CONFIG_S5P_MEM_CMA
+ /* CMA */
+ sprintf(cma_name, "fimg2d");
+ err = cma_info(&mem_info, dev, 0);
+ FIMG2D_DEBUG("[cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x\n",
+ mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size);
+ if (err) {
+ FIMG2D_ERROR("%s: get cma info failed\n", __func__);
+ return -1;
+ }
+ *size = mem_info.total_size;
+ *base = (dma_addr_t)cma_alloc
+ (dev, cma_name, (size_t)(*size), 0);
+
+ FIMG2D_DEBUG("size = 0x%x\n", *size);
+ FIMG2D_DEBUG("*base phys= 0x%x\n", *base);
+ FIMG2D_DEBUG("*base virt = 0x%x\n", (u32)phys_to_virt(*base));
+
+#else
+ *base = s5p_get_media_memory_bank(S5P_MDEV_FIMG2D, 0);
+#endif
+ return 0;
+}
+
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_dev.c b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_dev.c
new file mode 100644
index 0000000..5ccde4a
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_dev.c
@@ -0,0 +1,609 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d3x_dev.c
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file implements fimg2d driver.
+ */
+
+#include <linux/init.h>
+
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/semaphore.h>
+
+#include <asm/io.h>
+
+#include <mach/cpufreq.h>
+#include <plat/cpu.h>
+#include <plat/fimg2d.h>
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+#include <linux/pm_runtime.h>
+#endif
+
+#include "fimg2d.h"
+#include "fimg2d3x_regs.h"
+
+#include <linux/smp.h>
+
+struct g2d_global *g2d_dev;
+
+int g2d_sysmmu_fault(unsigned int faulted_addr, unsigned int pt_base)
+{
+ g2d_reset(g2d_dev);
+
+ atomic_set(&g2d_dev->is_mmu_faulted, 1);
+
+ g2d_dev->faulted_addr = faulted_addr;
+
+ wake_up_interruptible(&g2d_dev->waitq);
+
+ return 0;
+}
+
+
+irqreturn_t g2d_irq(int irq, void *dev_id)
+{
+ g2d_set_int_finish(g2d_dev);
+
+ g2d_dev->irq_handled = 1;
+
+ wake_up_interruptible(&g2d_dev->waitq);
+
+ atomic_set(&g2d_dev->in_use, 0);
+
+ return IRQ_HANDLED;
+}
+
+
+static int g2d_open(struct inode *inode, struct file *file)
+{
+ atomic_inc(&g2d_dev->num_of_object);
+
+ FIMG2D_DEBUG("Context Opened %d\n", atomic_read(&g2d_dev->num_of_object));
+
+ return 0;
+}
+
+
+static int g2d_release(struct inode *inode, struct file *file)
+{
+ atomic_dec(&g2d_dev->num_of_object);
+
+ FIMG2D_DEBUG("Context Closed %d\n", atomic_read(&g2d_dev->num_of_object));
+
+ return 0;
+}
+
+static int g2d_mmap(struct file* filp, struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+
+static long g2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ g2d_params params;
+ int ret = -1;
+
+ struct g2d_dma_info dma_info;
+
+ switch(cmd) {
+ case G2D_GET_MEMORY :
+ ret = copy_to_user((unsigned int *)arg,
+ &(g2d_dev->reserved_mem.base), sizeof(g2d_dev->reserved_mem.base));
+ if (ret) {
+ FIMG2D_ERROR("error : copy_to_user\n");
+ return -EINVAL;
+ }
+ return 0;
+
+ case G2D_GET_MEMORY_SIZE :
+ ret = copy_to_user((unsigned int *)arg,
+ &(g2d_dev->reserved_mem.size), sizeof(g2d_dev->reserved_mem.size));
+ if (ret) {
+ FIMG2D_ERROR("error : copy_to_user\n");
+ return -EINVAL;
+ }
+ return 0;
+
+ case G2D_DMA_CACHE_CLEAN :
+ case G2D_DMA_CACHE_FLUSH :
+ mutex_lock(&g2d_dev->lock);
+ ret = copy_from_user(&dma_info, (struct g2d_dma_info *)arg, sizeof(dma_info));
+
+ if (ret) {
+ FIMG2D_ERROR("error : copy_from_user\n");
+ mutex_unlock(&g2d_dev->lock);
+ return -EINVAL;
+ }
+
+ if (dma_info.addr == 0) {
+ FIMG2D_ERROR("addr Null Error!!!\n");
+ mutex_unlock(&g2d_dev->lock);
+ return -EINVAL;
+ }
+
+ g2d_mem_cache_op(cmd, (void *)dma_info.addr, dma_info.size);
+ mutex_unlock(&g2d_dev->lock);
+ return 0;
+
+ case G2D_SYNC :
+ g2d_check_fifo_state_wait(g2d_dev);
+ ret = 0;
+ goto g2d_ioctl_done;
+
+ case G2D_RESET :
+ g2d_reset(g2d_dev);
+ FIMG2D_ERROR("G2D TimeOut Error\n");
+ ret = 0;
+ goto g2d_ioctl_done;
+
+ case G2D_BLIT:
+ if (atomic_read(&g2d_dev->ready_to_run) == 0)
+ goto g2d_ioctl_done2;
+
+ mutex_lock(&g2d_dev->lock);
+
+ g2d_clk_enable(g2d_dev);
+
+ if (copy_from_user(&params, (struct g2d_params *)arg, sizeof(g2d_params))) {
+ FIMG2D_ERROR("error : copy_from_user\n");
+ goto g2d_ioctl_done;
+ }
+
+ atomic_set(&g2d_dev->in_use, 1);
+ if (atomic_read(&g2d_dev->ready_to_run) == 0)
+ goto g2d_ioctl_done;
+
+ if (params.flag.memory_type == G2D_MEMORY_USER)
+ down_write(&page_alloc_slow_rwsem);
+
+ g2d_dev->irq_handled = 0;
+ if (!g2d_do_blit(g2d_dev, &params)) {
+ g2d_dev->irq_handled = 1;
+ if (params.flag.memory_type == G2D_MEMORY_USER)
+ up_write(&page_alloc_slow_rwsem);
+ goto g2d_ioctl_done;
+ }
+
+ if(!(file->f_flags & O_NONBLOCK)) {
+ if (!g2d_wait_for_finish(g2d_dev, &params)) {
+ if (params.flag.memory_type == G2D_MEMORY_USER)
+ up_write(&page_alloc_slow_rwsem);
+ goto g2d_ioctl_done;
+ }
+ }
+
+ if (params.flag.memory_type == G2D_MEMORY_USER)
+ up_write(&page_alloc_slow_rwsem);
+ ret = 0;
+
+ break;
+ default :
+ goto g2d_ioctl_done2;
+ break;
+ }
+
+g2d_ioctl_done :
+
+ g2d_clk_disable(g2d_dev);
+
+ mutex_unlock(&g2d_dev->lock);
+
+ atomic_set(&g2d_dev->in_use, 0);
+
+g2d_ioctl_done2 :
+
+ return ret;
+}
+
+static unsigned int g2d_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ if (atomic_read(&g2d_dev->in_use) == 0) {
+ mask = POLLOUT | POLLWRNORM;
+ g2d_clk_disable(g2d_dev);
+
+ mutex_unlock(&g2d_dev->lock);
+
+ } else {
+ poll_wait(file, &g2d_dev->waitq, wait);
+
+ if(atomic_read(&g2d_dev->in_use) == 0) {
+ mask = POLLOUT | POLLWRNORM;
+ g2d_clk_disable(g2d_dev);
+
+ mutex_unlock(&g2d_dev->lock);
+ }
+ }
+
+ return mask;
+}
+
+static struct file_operations fimg2d_fops = {
+ .owner = THIS_MODULE,
+ .open = g2d_open,
+ .release = g2d_release,
+ .mmap = g2d_mmap,
+ .unlocked_ioctl = g2d_ioctl,
+ .poll = g2d_poll,
+};
+
+
+static struct miscdevice fimg2d_dev = {
+ .minor = G2D_MINOR,
+ .name = "fimg2d",
+ .fops = &fimg2d_fops,
+};
+
+static int g2d_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+ struct clk *parent;
+ struct clk *sclk;
+
+ FIMG2D_DEBUG("start probe : name=%s num=%d res[0].start=0x%x res[1].start=0x%x\n",
+ pdev->name, pdev->num_resources,
+ pdev->resource[0].start, pdev->resource[1].start);
+
+ /* alloc g2d global */
+ g2d_dev = kzalloc(sizeof(*g2d_dev), GFP_KERNEL);
+ if (!g2d_dev) {
+ FIMG2D_ERROR( "not enough memory\n");
+ ret = -ENOENT;
+ goto probe_out;
+ }
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* to use the runtime PM helper functions */
+ pm_runtime_enable(&pdev->dev);
+ /* enable the power domain */
+ pm_runtime_get_sync(&pdev->dev);
+#endif
+
+ /* get the memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if(res == NULL) {
+ FIMG2D_ERROR("failed to get memory region resouce\n");
+ ret = -ENOENT;
+ goto err_get_res;
+ }
+
+ /* request momory region */
+ g2d_dev->mem = request_mem_region(res->start,
+ res->end - res->start + 1,
+ pdev->name);
+ if(g2d_dev->mem == NULL) {
+ FIMG2D_ERROR("failed to reserve memory region\n");
+ ret = -ENOENT;
+ goto err_mem_req;
+ }
+
+ /* ioremap */
+ g2d_dev->base = ioremap(g2d_dev->mem->start,
+ g2d_dev->mem->end - res->start + 1);
+ if(g2d_dev->base == NULL) {
+ FIMG2D_ERROR("failed ioremap\n");
+ ret = -ENOENT;
+ goto err_mem_map;
+ }
+
+ /* get irq */
+ g2d_dev->irq_num = platform_get_irq(pdev, 0);
+ if(g2d_dev->irq_num <= 0) {
+ FIMG2D_ERROR("failed to get irq resouce\n");
+ ret = -ENOENT;
+ goto err_irq_req;
+ }
+
+ /* blocking I/O */
+ init_waitqueue_head(&g2d_dev->waitq);
+
+ /* request irq */
+ ret = request_irq(g2d_dev->irq_num, g2d_irq,
+ IRQF_DISABLED, pdev->name, NULL);
+ if (ret) {
+ FIMG2D_ERROR("request_irq(g2d) failed.\n");
+ ret = -ENOENT;
+ goto err_irq_req;
+ }
+
+ /* clock domain setting*/
+ parent = clk_get(&pdev->dev, "mout_mpll");
+ if (IS_ERR(parent)) {
+ FIMG2D_ERROR("failed to get parent clock\n");
+ ret = -ENOENT;
+ goto err_clk_get1;
+ }
+
+ sclk = clk_get(&pdev->dev, "sclk_fimg2d");
+ if (IS_ERR(sclk)) {
+ FIMG2D_ERROR("failed to get sclk_g2d clock\n");
+ ret = -ENOENT;
+ goto err_clk_get2;
+ }
+
+ clk_set_parent(sclk, parent);
+ clk_set_rate(sclk, 267 * MHZ); /* 266 Mhz */
+
+ /* clock for gating */
+ g2d_dev->clock = clk_get(&pdev->dev, "fimg2d");
+ if (IS_ERR(g2d_dev->clock)) {
+ FIMG2D_ERROR("failed to get clock clock\n");
+ ret = -ENOENT;
+ goto err_clk_get3;
+ }
+
+ ret = g2d_init_mem(&pdev->dev, &g2d_dev->reserved_mem.base, &g2d_dev->reserved_mem.size);
+
+ if (ret != 0) {
+ FIMG2D_ERROR("failed to init. fimg2d mem");
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ /* atomic init */
+ atomic_set(&g2d_dev->in_use, 0);
+ atomic_set(&g2d_dev->num_of_object, 0);
+ atomic_set(&g2d_dev->is_mmu_faulted, 0);
+ g2d_dev->faulted_addr = 0;
+
+ /* misc register */
+ ret = misc_register(&fimg2d_dev);
+ if (ret) {
+ FIMG2D_ERROR("cannot register miscdev on minor=%d (%d)\n",
+ G2D_MINOR, ret);
+ ret = -ENOMEM;
+ goto err_misc_reg;
+ }
+
+ mutex_init(&g2d_dev->lock);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ g2d_dev->early_suspend.suspend = g2d_early_suspend;
+ g2d_dev->early_suspend.resume = g2d_late_resume;
+ g2d_dev->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ register_early_suspend(&g2d_dev->early_suspend);
+#endif
+
+ g2d_dev->dev = &pdev->dev;
+ atomic_set(&g2d_dev->ready_to_run, 1);
+
+ g2d_sysmmu_on(g2d_dev);
+
+ FIMG2D_DEBUG("g2d_probe ok!\n");
+
+ return 0;
+
+err_misc_reg:
+err_mem:
+ clk_put(g2d_dev->clock);
+ g2d_dev->clock = NULL;
+err_clk_get3:
+ clk_put(sclk);
+err_clk_get2:
+ clk_put(parent);
+err_clk_get1:
+ free_irq(g2d_dev->irq_num, NULL);
+err_irq_req:
+ iounmap(g2d_dev->base);
+err_mem_map:
+ release_resource(g2d_dev->mem);
+ kfree(g2d_dev->mem);
+err_mem_req:
+err_get_res:
+ kfree(g2d_dev);
+probe_out:
+ FIMG2D_ERROR("g2d: sec_g2d_probe fail!\n");
+ return ret;
+}
+
+
+static int g2d_remove(struct platform_device *dev)
+{
+ FIMG2D_DEBUG("g2d_remove called !\n");
+
+ free_irq(g2d_dev->irq_num, NULL);
+
+ if (g2d_dev->mem != NULL) {
+ FIMG2D_INFO("releasing resource\n");
+ iounmap(g2d_dev->base);
+ release_resource(g2d_dev->mem);
+ kfree(g2d_dev->mem);
+ }
+
+ misc_deregister(&fimg2d_dev);
+
+ atomic_set(&g2d_dev->in_use, 0);
+ atomic_set(&g2d_dev->num_of_object, 0);
+
+ g2d_clk_disable(g2d_dev);
+
+ if (g2d_dev->clock) {
+ clk_put(g2d_dev->clock);
+ g2d_dev->clock = NULL;
+ }
+
+ mutex_destroy(&g2d_dev->lock);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ unregister_early_suspend(&g2d_dev->early_suspend);
+#endif
+
+ kfree(g2d_dev);
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* disable the power domain */
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+#endif
+
+ FIMG2D_DEBUG("g2d_remove ok!\n");
+
+ return 0;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+void g2d_early_suspend(struct early_suspend *h)
+{
+ atomic_set(&g2d_dev->ready_to_run, 0);
+
+ /* wait until G2D running is finished */
+ while(1) {
+ if (!atomic_read(&g2d_dev->in_use))
+ break;
+
+ msleep_interruptible(2);
+ }
+
+ g2d_sysmmu_off(g2d_dev);
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* disable the power domain */
+ pm_runtime_put(g2d_dev->dev);
+#endif
+}
+
+void g2d_late_resume(struct early_suspend *h)
+{
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* enable the power domain */
+ pm_runtime_get_sync(g2d_dev->dev);
+#endif
+
+ g2d_sysmmu_on(g2d_dev);
+
+ atomic_set(&g2d_dev->ready_to_run, 1);
+
+}
+#endif
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND)
+static int g2d_suspend(struct platform_device *dev, pm_message_t state)
+{
+ atomic_set(&g2d_dev->ready_to_run, 0);
+
+ /* wait until G2D running is finished */
+ while(1) {
+ if (!atomic_read(&g2d_dev->in_use))
+ break;
+
+ msleep_interruptible(2);
+ }
+
+ g2d_sysmmu_off(g2d_dev);
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* disable the power domain */
+ pm_runtime_put(g2d_dev->dev);
+#endif
+
+ return 0;
+}
+static int g2d_resume(struct platform_device *pdev)
+{
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* enable the power domain */
+ pm_runtime_get_sync(g2d_dev->dev);
+#endif
+
+ g2d_sysmmu_on(g2d_dev);
+
+ atomic_set(&g2d_dev->ready_to_run, 1);
+
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+static int g2d_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int g2d_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops g2d_pm_ops = {
+ .runtime_suspend = g2d_runtime_suspend,
+ .runtime_resume = g2d_runtime_resume,
+};
+#endif
+
+
+static struct platform_driver fimg2d_driver = {
+ .probe = g2d_probe,
+ .remove = g2d_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND)
+ .suspend = g2d_suspend,
+ .resume = g2d_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s5p-fimg2d",
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ .pm = &g2d_pm_ops,
+#endif
+ },
+};
+
+int __init g2d_init(void)
+{
+ if(platform_driver_register(&fimg2d_driver)!=0) {
+ FIMG2D_ERROR("platform device register Failed \n");
+ return -1;
+ }
+
+ FIMG2D_DEBUG("ok!\n");
+
+ return 0;
+}
+
+void g2d_exit(void)
+{
+ platform_driver_unregister(&fimg2d_driver);
+
+ FIMG2D_DEBUG("ok!\n");
+}
+
+module_init(g2d_init);
+module_exit(g2d_exit);
+
+MODULE_AUTHOR("");
+MODULE_DESCRIPTION("SEC G2D Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/fimg2d3x/Kconfig b/drivers/media/video/samsung/fimg2d3x/Kconfig
new file mode 100644
index 0000000..dccbc16
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x/Kconfig
@@ -0,0 +1,22 @@
+# drivers/media/video/samsung/fimg2d3x/Kconfig
+#
+# Copyright (c) 2010 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+config VIDEO_FIMG2D3X
+ bool "Samsung Graphics 2D Driver"
+ select VIDEO_FIMG2D
+ depends on VIDEO_SAMSUNG && CPU_EXYNOS4210
+ default n
+ ---help---
+ This is a graphics 2D (FIMG2D 3.x) driver for Samsung ARM based SoC.
+
+config VIDEO_FIMG2D3X_DEBUG
+ bool "Enables FIMG2D debug messages"
+ depends on VIDEO_FIMG2D3X
+ default n
+ ---help---
+ This enables FIMG2D driver debug messages.
+
diff --git a/drivers/media/video/samsung/fimg2d3x/Makefile b/drivers/media/video/samsung/fimg2d3x/Makefile
new file mode 100644
index 0000000..a24f530
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x/Makefile
@@ -0,0 +1,17 @@
+# drivers/media/video/samsung/fimg2d3x/Makefile
+#
+# Copyright (c) 2010 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+obj-y :=
+obj-m :=
+obj-n :=
+obj- :=
+
+obj-$(CONFIG_VIDEO_FIMG2D3X) += fimg2d_dev.o fimg2d_cache.o fimg2d3x_regs.o fimg2d_core.o
+
+ifeq ($(CONFIG_VIDEO_FIMG2D3X_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/media/video/samsung/fimg2d3x/fimg2d.h b/drivers/media/video/samsung/fimg2d3x/fimg2d.h
new file mode 100644
index 0000000..2c2c07b
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x/fimg2d.h
@@ -0,0 +1,397 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d_3x.h
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SEC_FIMG2D_H_
+#define __SEC_FIMG2D_H_
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+
+#define G2D_SFR_SIZE 0x1000
+
+#define TRUE (1)
+#define FALSE (0)
+
+#define G2D_MINOR 240
+
+#define G2D_IOCTL_MAGIC 'G'
+
+#define G2D_BLIT _IO(G2D_IOCTL_MAGIC,0)
+#define G2D_GET_VERSION _IO(G2D_IOCTL_MAGIC,1)
+#define G2D_GET_MEMORY _IOR(G2D_IOCTL_MAGIC,2, unsigned int)
+#define G2D_GET_MEMORY_SIZE _IOR(G2D_IOCTL_MAGIC,3, unsigned int)
+#define G2D_DMA_CACHE_CLEAN _IOWR(G2D_IOCTL_MAGIC,4, struct g2d_dma_info)
+#define G2D_DMA_CACHE_FLUSH _IOWR(G2D_IOCTL_MAGIC,5, struct g2d_dma_info)
+#define G2D_SYNC _IO(G2D_IOCTL_MAGIC,6)
+#define G2D_RESET _IO(G2D_IOCTL_MAGIC, 7)
+
+#define G2D_TIMEOUT (1000)
+
+#define G2D_MAX_WIDTH (2048)
+#define G2D_MAX_HEIGHT (2048)
+
+#define G2D_ALPHA_VALUE_MAX (255)
+
+#define G2D_POLLING (1<<0)
+#define G2D_INTERRUPT (0<<0)
+#define G2D_CACHE_OP (1<<1)
+#define G2D_NONE_INVALIDATE (0<<1)
+#define G2D_HYBRID_MODE (1<<2)
+
+#define G2D_PT_NOTVALID (0)
+#define G2D_PT_CACHED (1)
+#define G2D_PT_UNCACHED (2)
+
+#define GET_FRAME_SIZE(rect) ((rect.full_w) * (rect.full_h) * (rect.bytes_per_pixel))
+#define GET_RECT_SIZE(rect) ((rect.full_w) * (rect.h) * (rect.bytes_per_pixel))
+#define GET_REAL_SIZE(rect) ((rect.full_w) * (rect.h) * (rect.bytes_per_pixel))
+#define GET_STRIDE(rect) ((rect.full_w) * (rect.bytes_per_pixel))
+#define GET_SPARE_BYTES(rect) ((rect.full_w - rect.w) * rect.bytes_per_pixel)
+#define GET_START_ADDR(rect) (rect.addr + ((rect.y * rect.full_w) * rect.bytes_per_pixel))
+#define GET_REAL_START_ADDR(rect) GET_START_ADDR(rect) + (rect.x * rect.bytes_per_pixel)
+#define GET_REAL_END_ADDR(rect) GET_START_ADDR(rect) + GET_RECT_SIZE(rect) - ((rect.full_w - (rect.x + rect.w)) * rect.bytes_per_pixel)
+
+#define GET_RECT_SIZE_C(rect, clip) ((rect.full_w) * (clip.b - clip.t) * (rect.bytes_per_pixel))
+#define GET_START_ADDR_C(rect, clip) (rect.addr + ((clip.t * rect.full_w) * rect.bytes_per_pixel))
+#define GET_REAL_START_ADDR_C(rect, clip) GET_START_ADDR_C(rect, clip) + (clip.l * rect.bytes_per_pixel)
+#define GET_REAL_END_ADDR_C(rect, clip) GET_START_ADDR_C(rect, clip) + GET_RECT_SIZE_C(rect, clip) - ((rect.full_w - clip.r) * rect.bytes_per_pixel)
+
+#define GET_USEC(before, after) ((after.tv_sec - before.tv_sec) * 1000000 + (after.tv_usec - before.tv_usec))
+
+typedef enum {
+ G2D_ROT_0 = 0,
+ G2D_ROT_90,
+ G2D_ROT_180,
+ G2D_ROT_270,
+ G2D_ROT_X_FLIP,
+ G2D_ROT_Y_FLIP
+} G2D_ROT_DEG;
+
+typedef enum {
+ G2D_ALPHA_BLENDING_MIN = 0, // wholly transparent
+ G2D_ALPHA_BLENDING_MAX = 255, // 255
+ G2D_ALPHA_BLENDING_OPAQUE = 256, // opaque
+} G2D_ALPHA_BLENDING_MODE;
+
+typedef enum {
+ G2D_COLORKEY_NONE = 0,
+ G2D_COLORKEY_SRC_ON,
+ G2D_COLORKEY_DST_ON,
+ G2D_COLORKEY_SRC_DST_ON,
+}G2D_COLORKEY_MODE;
+
+typedef enum {
+ G2D_BLUE_SCREEN_NONE = 0,
+ G2D_BLUE_SCREEN_TRANSPARENT,
+ G2D_BLUE_SCREEN_WITH_COLOR,
+}G2D_BLUE_SCREEN_MODE;
+
+typedef enum {
+ G2D_ROP_SRC = 0,
+ G2D_ROP_DST,
+ G2D_ROP_SRC_AND_DST,
+ G2D_ROP_SRC_OR_DST,
+ G2D_ROP_3RD_OPRND,
+ G2D_ROP_SRC_AND_3RD_OPRND,
+ G2D_ROP_SRC_OR_3RD_OPRND,
+ G2D_ROP_SRC_XOR_3RD_OPRND,
+ G2D_ROP_DST_OR_3RD,
+}G2D_ROP_TYPE;
+
+typedef enum {
+ G2D_THIRD_OP_NONE = 0,
+ G2D_THIRD_OP_PATTERN,
+ G2D_THIRD_OP_FG,
+ G2D_THIRD_OP_BG
+}G2D_THIRD_OP_MODE;
+
+typedef enum {
+ G2D_BLACK = 0,
+ G2D_RED,
+ G2D_GREEN,
+ G2D_BLUE,
+ G2D_WHITE,
+ G2D_YELLOW,
+ G2D_CYAN,
+ G2D_MAGENTA
+}G2D_COLOR;
+
+typedef enum {
+ G2D_RGB_565 = ((0<<4)|2),
+
+ G2D_ABGR_8888 = ((2<<4)|1),
+ G2D_BGRA_8888 = ((3<<4)|1),
+ G2D_ARGB_8888 = ((0<<4)|1),
+ G2D_RGBA_8888 = ((1<<4)|1),
+
+ G2D_XBGR_8888 = ((2<<4)|0),
+ G2D_BGRX_8888 = ((3<<4)|0),
+ G2D_XRGB_8888 = ((0<<4)|0),
+ G2D_RGBX_8888 = ((1<<4)|0),
+
+ G2D_ABGR_1555 = ((2<<4)|4),
+ G2D_BGRA_5551 = ((3<<4)|4),
+ G2D_ARGB_1555 = ((0<<4)|4),
+ G2D_RGBA_5551 = ((1<<4)|4),
+
+ G2D_XBGR_1555 = ((2<<4)|3),
+ G2D_BGRX_5551 = ((3<<4)|3),
+ G2D_XRGB_1555 = ((0<<4)|3),
+ G2D_RGBX_5551 = ((1<<4)|3),
+
+ G2D_ABGR_4444 = ((2<<4)|6),
+ G2D_BGRA_4444 = ((3<<4)|6),
+ G2D_ARGB_4444 = ((0<<4)|6),
+ G2D_RGBA_4444 = ((1<<4)|6),
+
+ G2D_XBGR_4444 = ((2<<4)|5),
+ G2D_BGRX_4444 = ((3<<4)|5),
+ G2D_XRGB_4444 = ((0<<4)|5),
+ G2D_RGBX_4444 = ((1<<4)|5),
+
+ G2D_PACKED_BGR_888 = ((2<<4)|7),
+ G2D_PACKED_RGB_888 = ((0<<4)|7),
+
+ G2D_MAX_COLOR_SPACE
+}G2D_COLOR_SPACE;
+
+typedef enum {
+ G2D_Clear_Mode, //!< [0, 0]
+ G2D_Src_Mode, //!< [Sa, Sc]
+ G2D_Dst_Mode, //!< [Da, Dc]
+ G2D_SrcOver_Mode, //!< [Sa + Da - Sa*Da, Rc = Sc + (1 - Sa)*Dc]
+ G2D_DstOver_Mode, //!< [Sa + Da - Sa*Da, Rc = Dc + (1 - Da)*Sc]
+ G2D_SrcIn_Mode, //!< [Sa * Da, Sc * Da]
+ G2D_DstIn_Mode, //!< [Sa * Da, Sa * Dc]
+ G2D_SrcOut_Mode, //!< [Sa * (1 - Da), Sc * (1 - Da)]
+ G2D_DstOut_Mode, //!< [Da * (1 - Sa), Dc * (1 - Sa)]
+ G2D_SrcATop_Mode, //!< [Da, Sc * Da + (1 - Sa) * Dc]
+ G2D_DstATop_Mode, //!< [Sa, Sa * Dc + Sc * (1 - Da)]
+ G2D_Xor_Mode, //!< [Sa + Da - 2 * Sa * Da, Sc * (1 - Da) + (1 - Sa) * Dc]
+
+ // these modes are defined in the SVG Compositing standard
+ // http://www.w3.org/TR/2009/WD-SVGCompositing-20090430/
+ G2D_Plus_Mode,
+ G2D_Multiply_Mode,
+ G2D_Screen_Mode,
+ G2D_Overlay_Mode,
+ G2D_Darken_Mode,
+ G2D_Lighten_Mode,
+ G2D_ColorDodge_Mode,
+ G2D_ColorBurn_Mode,
+ G2D_HardLight_Mode,
+ G2D_SoftLight_Mode,
+ G2D_Difference_Mode,
+ G2D_Exclusion_Mode,
+
+ kLastMode = G2D_Exclusion_Mode
+}G2D_PORTTERDUFF_MODE;
+
+typedef enum {
+ G2D_MEMORY_KERNEL,
+ G2D_MEMORY_USER
+}G2D_MEMORY_TYPE;
+
+typedef struct {
+ int x;
+ int y;
+ unsigned int w;
+ unsigned int h;
+ unsigned int full_w;
+ unsigned int full_h;
+ int color_format;
+ unsigned int bytes_per_pixel;
+ unsigned char * addr;
+} g2d_rect;
+
+typedef struct {
+ unsigned int t;
+ unsigned int b;
+ unsigned int l;
+ unsigned int r;
+} g2d_clip;
+
+typedef struct {
+ unsigned int rotate_val;
+ unsigned int alpha_val;
+
+ unsigned int blue_screen_mode; //true : enable, false : disable
+ unsigned int color_key_val; //screen color value
+ unsigned int color_switch_val; //one color
+
+ unsigned int src_color; // when set one color on SRC
+
+ unsigned int third_op_mode;
+ unsigned int rop_mode;
+ unsigned int mask_mode;
+ unsigned int render_mode;
+ unsigned int potterduff_mode;
+ unsigned int memory_type;
+} g2d_flag;
+
+typedef struct {
+ g2d_rect src_rect;
+ g2d_rect dst_rect;
+ g2d_clip clip;
+ g2d_flag flag;
+} g2d_params;
+
+/* for reserved memory */
+struct g2d_reserved_mem {
+ /* buffer base */
+ unsigned int base;
+ /* buffer size */
+ unsigned int size;
+};
+
+
+struct g2d_dma_info {
+ unsigned long addr;
+ unsigned int size;
+};
+
+struct g2d_platdata {
+ int hw_ver;
+ const char *parent_clkname;
+ const char *clkname;
+ const char *gate_clkname;
+ unsigned long clkrate;
+};
+
+struct g2d_timer {
+ int cnt;
+ struct timeval start_marker;
+ struct timeval cur_marker;
+};
+
+struct g2d_global {
+ int irq_num;
+ struct resource * mem;
+ void __iomem * base;
+ struct clk * clock;
+ atomic_t clk_enable_flag;
+ wait_queue_head_t waitq;
+ atomic_t in_use;
+ atomic_t num_of_object;
+ struct mutex lock;
+ struct device * dev;
+ atomic_t ready_to_run;
+ int src_attribute;
+ int dst_attribute;
+
+ struct g2d_reserved_mem reserved_mem; /* for reserved memory */
+ atomic_t is_mmu_faulted;
+ unsigned int faulted_addr;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ struct early_suspend early_suspend;
+#endif
+ int irq_handled;
+};
+
+
+/****** debug message API *****/
+enum fimg2d_log {
+ FIMG2D_LOG_DEBUG = 0x1000,
+ FIMG2D_LOG_INFO = 0x0100,
+ FIMG2D_LOG_WARN = 0x0010,
+ FIMG2D_LOG_ERR = 0x0001,
+};
+
+/* debug macro */
+#define FIMG2D_LOG_DEFAULT (FIMG2D_LOG_WARN | FIMG2D_LOG_ERR)
+
+#define FIMG2D_DEBUG(fmt, ...) \
+ do { \
+ if (FIMG2D_LOG_DEFAULT & FIMG2D_LOG_DEBUG) \
+ printk(KERN_DEBUG "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define FIMG2D_INFO(fmt, ...) \
+ do { \
+ if (FIMG2D_LOG_DEFAULT & FIMG2D_LOG_INFO) \
+ printk(KERN_INFO "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define FIMG2D_WARN(fmt, ...) \
+ do { \
+ if (FIMG2D_LOG_DEFAULT & FIMG2D_LOG_WARN) \
+ printk(KERN_WARNING "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define FIMG2D_ERROR(fmt, ...) \
+ do { \
+ if (FIMG2D_LOG_DEFAULT & FIMG2D_LOG_ERR) \
+ printk(KERN_ERR "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define fimg2d_dbg(fmt, ...) FIMG2D_DEBUG(fmt, ##__VA_ARGS__)
+#define fimg2d_info(fmt, ...) FIMG2D_INFO(fmt, ##__VA_ARGS__)
+#define fimg2d_warn(fmt, ...) FIMG2D_WARN(fmt, ##__VA_ARGS__)
+#define fimg2d_err(fmt, ...) FIMG2D_ERROR(fmt, ##__VA_ARGS__)
+
+
+/**** function declearation***************************/
+int g2d_check_params(g2d_params *params);
+void g2d_start_bitblt(struct g2d_global *g2d_dev, g2d_params *params);
+void g2d_check_fifo_state_wait(struct g2d_global *g2d_dev);
+u32 g2d_set_src_img(struct g2d_global *g2d_dev, g2d_rect * rect, g2d_flag * flag);
+u32 g2d_set_dst_img(struct g2d_global *g2d_dev, g2d_rect * rect);
+u32 g2d_set_pattern(struct g2d_global *g2d_dev, g2d_rect * rect, g2d_flag * flag);
+u32 g2d_set_clip_win(struct g2d_global *g2d_dev, g2d_clip * rect);
+u32 g2d_set_rotation(struct g2d_global *g2d_dev, g2d_flag * flag);
+u32 g2d_set_color_key(struct g2d_global *g2d_dev, g2d_flag * flag);
+u32 g2d_set_alpha(struct g2d_global *g2d_dev, g2d_flag * flag);
+void g2d_set_bitblt_cmd(struct g2d_global *g2d_dev, g2d_rect * src_rect, g2d_rect * dst_rect, g2d_clip * clip, u32 blt_cmd);
+void g2d_reset(struct g2d_global *g2d_dev);
+void g2d_disable_int(struct g2d_global *g2d_dev);
+void g2d_set_int_finish(struct g2d_global *g2d_dev);
+
+/* fimg2d_cache */
+void g2d_clip_for_src(g2d_rect *src_rect, g2d_rect *dst_rect, g2d_clip *clip, g2d_clip *src_clip);
+void g2d_mem_inner_cache(g2d_params *params);
+void g2d_mem_outer_cache(struct g2d_global *g2d_dev, g2d_params *params, int *need_dst_clean);
+void g2d_mem_cache_oneshot(void *src_addr, void *dst_addr, unsigned long src_size, unsigned long dst_size);
+u32 g2d_mem_cache_op(unsigned int cmd, void * addr, unsigned int size);
+void g2d_mem_outer_cache_flush(void *start_addr, unsigned long size);
+void g2d_mem_outer_cache_clean(const void *start_addr, unsigned long size);
+void g2d_mem_outer_cache_inv(g2d_params *params);
+u32 g2d_check_pagetable(void * vaddr, unsigned int size, unsigned long pgd);
+void g2d_pagetable_clean(const void *start_addr, unsigned long size, unsigned long pgd);
+int g2d_check_need_dst_cache_clean(g2d_params * params);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void g2d_early_suspend(struct early_suspend *h);
+void g2d_late_resume(struct early_suspend *h);
+#endif
+
+/* fimg2d_core */
+int g2d_clk_enable(struct g2d_global *g2d_dev);
+int g2d_clk_disable(struct g2d_global *g2d_dev);
+void g2d_sysmmu_on(struct g2d_global *g2d_dev);
+void g2d_sysmmu_off(struct g2d_global *g2d_dev);
+void g2d_sysmmu_set_pgd(u32 pgd);
+void g2d_fail_debug(g2d_params *params);
+int g2d_init_regs(struct g2d_global *g2d_dev, g2d_params *params);
+int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params);
+int g2d_wait_for_finish(struct g2d_global *g2d_dev, g2d_params *params);
+int g2d_init_mem(struct device *dev, unsigned int *base, unsigned int *size);
+
+#endif /*__SEC_FIMG2D_H_*/
diff --git a/drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.c b/drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.c
new file mode 100644
index 0000000..33ce53e
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.c
@@ -0,0 +1,376 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.c
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file implements fimg2d register control functions.
+ */
+
+#include <mach/map.h>
+#include <asm/io.h>
+#include <mach/regs-fimg2d3x.h>
+
+#include "fimg2d3x_regs.h"
+#include "fimg2d.h"
+
+void get_rot_config(unsigned int rotate_value, u32 *rot, u32 *src_dir, u32 *dst_dir)
+{
+ switch(rotate_value) {
+ case G2D_ROT_90:
+ *rot = 1; /* rotation = 1, src_y_dir == dst_y_dir, src_x_dir == dst_x_dir */
+ *src_dir = 0;
+ *dst_dir = 0;
+ break;
+
+ case G2D_ROT_270:
+ *rot = 1; /* rotation = 1, src_y_dir != dst_y_dir, src_x_dir != dst_x_dir */
+ *src_dir = 0;
+ *dst_dir = 0x3;
+ break;
+
+ case G2D_ROT_180:
+ *rot = 0; /* rotation = 0, src_y_dir != dst_y_dir, src_x_dir != dst_x_dir */
+ *src_dir = 0;
+ *dst_dir = 0x3;
+ break;
+
+ case G2D_ROT_X_FLIP:
+ *rot = 0; /* rotation = 0, src_y_dir != dst_y_dir */
+ *src_dir = 0;
+ *dst_dir = 0x2;
+ break;
+
+ case G2D_ROT_Y_FLIP:
+ *rot = 0; /* rotation = 0, src_x_dir != dst_y_dir */
+ *src_dir = 0;
+ *dst_dir = 0x1;
+ break;
+
+ default :
+ *rot = 0; /* rotation = 0; */
+ *src_dir = 0;
+ *dst_dir = 0;
+ break;
+ }
+
+ return ;
+}
+
+int g2d_check_params(g2d_params *params)
+{
+ g2d_rect * src_rect = &params->src_rect;
+ g2d_rect * dst_rect = &params->dst_rect;
+ g2d_flag * flag = &params->flag;
+
+ /* source */
+ if (0 > src_rect->x || 0 > src_rect->y) {
+ return -1;
+ }
+
+ if (0 == src_rect->h || 0 == src_rect->w) {
+ return -1;
+ }
+
+ if (8000 < src_rect->x+src_rect->w || 8000 < src_rect->y+src_rect->h) {
+ return -1;
+ }
+
+ /* destination */
+ if (0 > dst_rect->x || 0 > dst_rect->y) {
+ return -1;
+ }
+
+ if (0 == dst_rect->h || 0 == dst_rect->w) {
+ return -1;
+ }
+
+ if (8000 < dst_rect->x+dst_rect->w || 8000 < dst_rect->y+dst_rect->h) {
+ return -1;
+ }
+
+ if (flag->alpha_val > G2D_ALPHA_BLENDING_OPAQUE) {
+ return -1;
+ }
+
+ return 0;
+}
+
+void g2d_check_fifo_state_wait(struct g2d_global *g2d_dev)
+{
+ /* 1 = The graphics engine finishes the execution of command. */
+ /* 0 = in the middle of rendering process. */
+ while(!(readl(g2d_dev->base + FIFO_STAT_REG) & 0x1));
+
+ return;
+}
+
+
+u32 g2d_set_src_img(struct g2d_global *g2d_dev, g2d_rect * rect, g2d_flag * flag)
+{
+ u32 data = 0;
+ u32 blt_cmd = 0;
+
+ /* set source to one color */
+ //if(rect == NULL)
+ if (flag->potterduff_mode == G2D_Clear_Mode) {
+ /* select source */
+ writel(G2D_SRC_SELECT_R_USE_FG_COLOR, g2d_dev->base + SRC_SELECT_REG);
+
+ /* foreground color */
+ // writel(flag->src_color, g2d_dev->base + FG_COLOR_REG);
+ writel(0, g2d_dev->base + FG_COLOR_REG);
+ } else {
+ /* select source */
+ writel(G2D_SRC_SELECT_R_NORMAL, g2d_dev->base + SRC_SELECT_REG);
+
+ /* set base address of source image */
+ writel((u32)rect->addr, g2d_dev->base + SRC_BASE_ADDR_REG);
+
+ /* set stride */
+ writel(rect->full_w * rect->bytes_per_pixel, g2d_dev->base + SRC_STRIDE_REG);
+
+ /* set color mode */
+ writel(rect->color_format, g2d_dev->base + SRC_COLOR_MODE_REG);
+
+ /* set coordinate of source image */
+ data = (rect->y << 16) | (rect->x);
+ writel(data, g2d_dev->base + SRC_LEFT_TOP_REG);
+
+ data = ((rect->y + rect->h) << 16) | (rect->x + rect->w);
+ writel(data, g2d_dev->base + SRC_RIGHT_BOTTOM_REG);
+
+ }
+
+ return blt_cmd;
+}
+
+u32 g2d_set_dst_img(struct g2d_global *g2d_dev, g2d_rect * rect)
+{
+ u32 data = 0;
+ u32 blt_cmd = 0;
+
+ /* select destination */
+ writel(G2D_DST_SELECT_R_NORMAL, g2d_dev->base + DST_SELECT_REG);
+
+ /* set base address of destination image */
+ writel((u32)rect->addr, g2d_dev->base + DST_BASE_ADDR_REG);
+
+ /* set stride */
+ writel(rect->full_w * rect->bytes_per_pixel, g2d_dev->base + DST_STRIDE_REG);
+
+ /* set color mode */
+ writel(rect->color_format, g2d_dev->base + DST_COLOR_MODE_REG);
+
+ /* set coordinate of destination image */
+ data = (rect->y << 16) | (rect->x);
+ writel(data, g2d_dev->base + DST_LEFT_TOP_REG);
+
+ data = ((rect->y + rect->h) << 16) | (rect->x + rect->w);
+ writel(data, g2d_dev->base + DST_RIGHT_BOTTOM_REG);
+
+ return blt_cmd;
+}
+
+u32 g2d_set_rotation(struct g2d_global *g2d_dev, g2d_flag * flag)
+{
+ u32 blt_cmd = 0;
+ u32 rot=0, src_dir=0, dst_dir=0;
+
+ get_rot_config(flag->rotate_val, &rot, &src_dir, &dst_dir);
+
+ writel(rot, g2d_dev->base + ROTATE_REG);
+ writel(src_dir, g2d_dev->base + SRC_MSK_DIRECT_REG);
+ writel(dst_dir, g2d_dev->base + DST_PAT_DIRECT_REG);
+
+ return blt_cmd;
+}
+
+u32 g2d_set_clip_win(struct g2d_global *g2d_dev, g2d_clip * clip)
+{
+ u32 blt_cmd = 0;
+
+ //blt_cmd |= G2D_BLT_CMD_R_CW_ENABLE;
+ writel((clip->t << 16) | (clip->l), g2d_dev->base + CW_LEFT_TOP_REG);
+ writel((clip->b << 16) | (clip->r), g2d_dev->base + CW_RIGHT_BOTTOM_REG);
+
+ return blt_cmd;
+}
+
+u32 g2d_set_color_key(struct g2d_global *g2d_dev, g2d_flag * flag)
+{
+ u32 blt_cmd = 0;
+
+ /* Transparent Selection */
+ switch(flag->blue_screen_mode) {
+ case G2D_BLUE_SCREEN_TRANSPARENT :
+ writel(flag->color_key_val, g2d_dev->base + BS_COLOR_REG);
+
+ blt_cmd |= G2D_BLT_CMD_R_TRANSPARENT_MODE_TRANS;
+ break;
+
+ case G2D_BLUE_SCREEN_WITH_COLOR :
+ writel(flag->color_switch_val, g2d_dev->base + BG_COLOR_REG);
+ writel(flag->color_key_val, g2d_dev->base + BS_COLOR_REG);
+
+ blt_cmd |= G2D_BLT_CMD_R_TRANSPARENT_MODE_BLUESCR;
+ break;
+
+ case G2D_BLUE_SCREEN_NONE :
+ default:
+ blt_cmd |= G2D_BLT_CMD_R_TRANSPARENT_MODE_OPAQUE;
+ break;
+ }
+
+ blt_cmd |= G2D_BLT_CMD_R_COLOR_KEY_DISABLE;
+
+ return blt_cmd;
+}
+
+u32 g2d_set_pattern(struct g2d_global *g2d_dev, g2d_rect * rect, g2d_flag * flag)
+{
+ u32 data = 0;
+ u32 blt_cmd = 0;
+
+ /* Third Operand Selection */
+ switch(flag->third_op_mode) {
+ case G2D_THIRD_OP_PATTERN :
+ /* set base address of pattern image */
+ writel((u32)rect->addr, g2d_dev->base + PAT_BASE_ADDR_REG);
+
+ /* set size of pattern image */
+ data = ((rect->y + rect->h) << 16) | (rect->x + rect->w);
+ writel(data, g2d_dev->base + PAT_SIZE_REG);
+
+ /* set stride */
+ writel(rect->full_w * rect->bytes_per_pixel, g2d_dev->base + PAT_STRIDE_REG);
+
+ /* set color mode */
+ writel(rect->color_format, g2d_dev->base + PAT_COLOR_MODE_REG);
+
+ data = (rect->y << 16) | rect->x;
+ writel(data, g2d_dev->base + PAT_OFFSET_REG);
+
+ data = G2D_THIRD_OP_REG_PATTERN;
+ break;
+ case G2D_THIRD_OP_FG :
+ data = G2D_THIRD_OP_REG_FG_COLOR;
+ break;
+ case G2D_THIRD_OP_BG :
+ data = G2D_THIRD_OP_REG_BG_COLOR;
+ break;
+ case G2D_THIRD_OP_NONE :
+ default:
+ data = G2D_THIRD_OP_REG_NONE;
+ break;
+ }
+
+ writel(data, g2d_dev->base + THIRD_OPERAND_REG);
+
+ if(flag->third_op_mode == G2D_THIRD_OP_NONE) {
+ data = ((G2D_ROP_REG_SRC << 8) | G2D_ROP_REG_SRC);
+ } else {
+ switch(flag->rop_mode) {
+ case G2D_ROP_DST:
+ data = ((G2D_ROP_REG_DST << 8) | G2D_ROP_REG_DST);
+ break;
+ case G2D_ROP_SRC_AND_DST:
+ data = ((G2D_ROP_REG_SRC_AND_DST << 8) | G2D_ROP_REG_SRC_AND_DST);
+ break;
+ case G2D_ROP_SRC_OR_DST:
+ data = ((G2D_ROP_REG_SRC_OR_DST << 8) | G2D_ROP_REG_SRC_OR_DST);
+ break;
+ case G2D_ROP_3RD_OPRND:
+ data = ((G2D_ROP_REG_3RD_OPRND << 8) | G2D_ROP_REG_3RD_OPRND);
+ break;
+ case G2D_ROP_SRC_AND_3RD_OPRND:
+ data = ((G2D_ROP_REG_SRC_AND_3RD_OPRND << 8) | G2D_ROP_REG_SRC_AND_3RD_OPRND);
+ break;
+ case G2D_ROP_SRC_OR_3RD_OPRND:
+ data = ((G2D_ROP_REG_SRC_OR_3RD_OPRND << 8) | G2D_ROP_REG_SRC_OR_3RD_OPRND);
+ break;
+ case G2D_ROP_SRC_XOR_3RD_OPRND:
+ data = ((G2D_ROP_REG_SRC_XOR_3RD_OPRND << 8) | G2D_ROP_REG_SRC_XOR_3RD_OPRND);
+ break;
+ case G2D_ROP_DST_OR_3RD:
+ data = ((G2D_ROP_REG_DST_OR_3RD_OPRND << 8) | G2D_ROP_REG_DST_OR_3RD_OPRND);
+ break;
+ case G2D_ROP_SRC:
+ default:
+ data = ((G2D_ROP_REG_SRC << 8) | G2D_ROP_REG_SRC);
+ break;
+ }
+ }
+ writel(data, g2d_dev->base + ROP4_REG);
+
+ /* Mask Operation */
+ if(flag->mask_mode == TRUE) {
+ writel((u32)rect->addr, g2d_dev->base + MASK_BASE_ADDR_REG);
+ writel(rect->full_w * rect->bytes_per_pixel, g2d_dev->base + MASK_STRIDE_REG);
+
+ blt_cmd |= G2D_BLT_CMD_R_MASK_ENABLE;
+ }
+
+ return blt_cmd;
+}
+
+u32 g2d_set_alpha(struct g2d_global *g2d_dev, g2d_flag * flag)
+{
+ u32 blt_cmd = 0;
+
+ /* Alpha Value */
+ if(flag->alpha_val <= G2D_ALPHA_VALUE_MAX) {
+ if ((flag->potterduff_mode == G2D_Clear_Mode) || (flag->potterduff_mode == G2D_Src_Mode))
+ blt_cmd |= G2D_BLT_CMD_R_ALPHA_BLEND_NONE;
+ else
+ blt_cmd |= G2D_BLT_CMD_R_ALPHA_BLEND_ALPHA_BLEND;
+ writel((flag->alpha_val & 0xff), g2d_dev->base + ALPHA_REG);
+ } else {
+ blt_cmd |= G2D_BLT_CMD_R_ALPHA_BLEND_NONE;
+ }
+
+ return blt_cmd;
+}
+
+void g2d_set_bitblt_cmd(struct g2d_global *g2d_dev, g2d_rect * src_rect, g2d_rect * dst_rect, g2d_clip * clip, u32 blt_cmd)
+{
+ if ((src_rect->w != dst_rect->w)
+ || (src_rect->h != dst_rect->h)) {
+ blt_cmd |= G2D_BLT_CMD_R_STRETCH_ENABLE;
+ }
+
+ if ((clip->t != dst_rect->y) || (clip->b != dst_rect->y + dst_rect->h)
+ || (clip->l != dst_rect->x) || (clip->r != dst_rect->x + dst_rect->w)) {
+ blt_cmd |= G2D_BLT_CMD_R_CW_ENABLE;
+ }
+ writel(blt_cmd, g2d_dev->base + BITBLT_COMMAND_REG);
+}
+
+void g2d_reset(struct g2d_global *g2d_dev)
+{
+ writel(G2D_SWRESET_R_RESET, g2d_dev->base + SOFT_RESET_REG);
+}
+
+void g2d_disable_int(struct g2d_global *g2d_dev)
+{
+ writel(G2D_INTEN_R_CF_DISABLE, g2d_dev->base + INTEN_REG);
+}
+
+void g2d_set_int_finish(struct g2d_global *g2d_dev)
+{
+ writel(G2D_INTC_PEND_R_INTP_CMD_FIN, g2d_dev->base + INTC_PEND_REG);
+}
+
+void g2d_start_bitblt(struct g2d_global *g2d_dev, g2d_params *params)
+{
+ if (!(params->flag.render_mode & G2D_POLLING)) {
+ writel(G2D_INTEN_R_CF_ENABLE, g2d_dev->base + INTEN_REG);
+ }
+ writel(0x7, g2d_dev->base + CACHECTL_REG);
+
+ writel(G2D_BITBLT_R_START, g2d_dev->base + BITBLT_START_REG);
+}
+
diff --git a/drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.h b/drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.h
new file mode 100644
index 0000000..f67f636
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.h
@@ -0,0 +1,278 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d3x_regs.h
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.*/
+
+#ifndef __SEC_FIMG2D3X_REGS_H
+#define __SEC_FIMG2D3X_REGS_H
+
+//**********************************************************
+// Address Definition of SFR
+//**********************************************************
+#define SEC_G2DREG(x) ((x))
+
+//** General Register *****************
+#define CONRTOL_REG SEC_G2DREG(0x0000)
+#define SOFT_RESET_REG SEC_G2DREG(0x0000)
+#define INTEN_REG SEC_G2DREG(0x0004)
+#define INTC_PEND_REG SEC_G2DREG(0x000C)
+#define FIFO_STAT_REG SEC_G2DREG(0x0010)
+#define AXI_ID_MODE_REG SEC_G2DREG(0x0014)
+#define CACHECTL_REG SEC_G2DREG(0x0018)
+
+//** G2D Command *********************
+#define BITBLT_START_REG SEC_G2DREG(0x0100)
+#define BITBLT_COMMAND_REG SEC_G2DREG(0x0104)
+
+//** Rotation & Direction *************
+#define ROTATE_REG SEC_G2DREG(0x0200)
+#define SRC_MSK_DIRECT_REG SEC_G2DREG(0x0204)
+#define DST_PAT_DIRECT_REG SEC_G2DREG(0x0208)
+// for old vector
+#define SRC_DIRECT_REG SEC_G2DREG(0x0204)
+#define DST_DIRECT_REG SEC_G2DREG(0x0208)
+
+//** Source **************************
+#define SRC_SELECT_REG SEC_G2DREG(0x0300)
+#define SRC_BASE_ADDR_REG SEC_G2DREG(0x0304)
+#define SRC_STRIDE_REG SEC_G2DREG(0x0308)
+#define SRC_COLOR_MODE_REG SEC_G2DREG(0x030C)
+#define SRC_LEFT_TOP_REG SEC_G2DREG(0x0310)
+#define SRC_RIGHT_BOTTOM_REG SEC_G2DREG(0x0314)
+
+//** Destination ***********************
+#define DST_SELECT_REG SEC_G2DREG(0x0400)
+#define DST_BASE_ADDR_REG SEC_G2DREG(0x0404)
+#define DST_STRIDE_REG SEC_G2DREG(0x0408)
+#define DST_COLOR_MODE_REG SEC_G2DREG(0x040C)
+#define DST_LEFT_TOP_REG SEC_G2DREG(0x0410)
+#define DST_RIGHT_BOTTOM_REG SEC_G2DREG(0x0414)
+
+//** Pattern **************************
+#define PAT_BASE_ADDR_REG SEC_G2DREG(0x0500)
+#define PAT_SIZE_REG SEC_G2DREG(0x0504)
+#define PAT_COLOR_MODE_REG SEC_G2DREG(0x0508)
+#define PAT_OFFSET_REG SEC_G2DREG(0x050C)
+#define PAT_STRIDE_REG SEC_G2DREG(0x0510)
+
+//** Mask *****************************
+#define MASK_BASE_ADDR_REG SEC_G2DREG(0x0520)
+#define MASK_STRIDE_REG SEC_G2DREG(0x0524)
+
+//** Clipping Window *******************
+#define CW_LEFT_TOP_REG SEC_G2DREG(0x0600)
+#define CW_RIGHT_BOTTOM_REG SEC_G2DREG(0x0604)
+
+//** ROP4 & Blending *****************
+#define THIRD_OPERAND_REG SEC_G2DREG(0x0610)
+#define ROP4_REG SEC_G2DREG(0x0614)
+#define ALPHA_REG SEC_G2DREG(0x0618)
+
+//** Color ***************************
+#define FG_COLOR_REG SEC_G2DREG(0x0700)
+#define BG_COLOR_REG SEC_G2DREG(0x0704)
+#define BS_COLOR_REG SEC_G2DREG(0x0708)
+
+//** Color Key ***********************
+#define SRC_COLORKEY_CTRL_REG SEC_G2DREG(0x0710)
+#define SRC_COLORKEY_DR_MIN_REG SEC_G2DREG(0x0714)
+#define SRC_COLORKEY_DR_MAX_REG SEC_G2DREG(0x0718)
+#define DST_COLORKEY_CTRL_REG SEC_G2DREG(0x071C)
+#define DST_COLORKEY_DR_MIN_REG SEC_G2DREG(0x0720)
+#define DST_COLORKEY_DR_MAX_REG SEC_G2DREG(0x0724)
+
+//**********************************************************
+// Bit Definition part
+//**********************************************************
+
+/* software reset register (SOFT_RESET_REG : 0x0000) */
+#define G2D_SWRESET_R_RESET (1<<0)
+
+/* interrupt enable register (INTEN_REG : 0x0004)) */
+#define G2D_INTEN_R_INT_TYPE_EDGE (1<<1)
+#define G2D_INTEN_R_INT_TYPE_LEVEL (0<<1)
+#define G2D_INTEN_R_CF_ENABLE (1<<0)
+#define G2D_INTEN_R_CF_DISABLE (0<<0)
+
+/* interrupt pending register (INTC_PEND_REG : 0x000C) */
+#define G2D_INTC_PEND_R_INTP_CMD_FIN (1<<0)
+
+/* AXI ID mode register (AXI_ID_MODE_REG : 0x0014) */
+#define G2D_AXIID_MODE_R_MULTI_ID (1<<0)
+#define G2D_AXIID_MODE_R_SIGNLE_ID (0<<0)
+
+/* bitblit start register (BITBLT_START_REG : 0x0100) */
+#define G2D_BITBLT_R_START (1<<0)
+
+/* bitblt command register (BITBLT_COMMAND_REG : 0x0104) */
+#define G2D_BLT_CMD_R_COLOR_EXP_CORRECT (0<<24)
+#define G2D_BLT_CMD_R_COLOR_EXP_ZERO (1<<24)
+
+#define G2D_BLT_CMD_R_SRC_NON_PRE_BLEND_DISLABE (0<<22)
+#define G2D_BLT_CMD_R_SRC_NON_PRE_BLEND_CONSTANT_ALPHA (1<<22)
+#define G2D_BLT_CMD_R_SRC_NON_PRE_BLEND_PERPIXEL_ALPHA (2<<22)
+
+#define G2D_BLT_CMD_R_ALPHA_BLEND_NONE (0<<20)
+#define G2D_BLT_CMD_R_ALPHA_BLEND_ALPHA_BLEND (1<<20)
+#define G2D_BLT_CMD_R_ALPHA_BLEND_FADE (2<<20)
+// #define G2D_BLT_CMD_R_ALPHA_BLEND_PERPIXEL (3<<20)
+
+#define G2D_BLT_CMD_R_ALPHA_BLEND_FAD_OFFSET (8)
+
+#define G2D_BLT_CMD_R_COLOR_KEY_DISABLE (0<<16)
+#define G2D_BLT_CMD_R_COLOR_KEY_ENABLE_SRC (1<<16)
+#define G2D_BLT_CMD_R_COLOR_KEY_ENABLE_DST (2<<16)
+#define G2D_BLT_CMD_R_COLOP_KEY_ENABLE_SRC_DST (3<<16)
+
+#define G2D_BLT_CMD_R_TRANSPARENT_MODE_OPAQUE (0<<12)
+#define G2D_BLT_CMD_R_TRANSPARENT_MODE_TRANS (1<<12)
+#define G2D_BLT_CMD_R_TRANSPARENT_MODE_BLUESCR (2<<12)
+
+#define G2D_BLT_CMD_R_CW_ENABLE (1<<8)
+#define G2D_BLT_CMD_R_STRETCH_ENABLE (1<<4)
+#define G2D_BLT_CMD_R_MASK_ENABLE (1<<0)
+
+/* rotation register (ROTATE_REG : 0x02000) */
+#define G2D_ROT_CMD_R_0 (0<<0)
+#define G2D_ROT_CMD_R_90 (1<<0)
+
+/* source and mask direction register (SRC_MSK_DIRECT_REG : 0x0204) */
+#define G2D_SRC_MSK_DIR_R_MSK_Y_POSITIVE (0<<8)
+#define G2D_SRC_MSK_DIR_R_MSK_Y_NEGATIVE (0<<8)
+#define G2D_SRC_MSK_DIR_R_SRC_Y_POSITIVE (0<<8)
+#define G2D_SRC_MSK_DIR_R_SRC_Y_POSITIVE (0<<8)
+
+/* source image selection register (SRC_SELECT_REG : 0x0300) */
+#define G2D_SRC_SELECT_R_NORMAL (0<<0)
+#define G2D_SRC_SELECT_R_USE_FG_COLOR (1<<0)
+#define G2D_SRC_SELECT_R_USE_BG_COLOR (2<<0)
+
+/* source image color mode register (SRC_COLOR_MODE_REG : 0x030C) */
+
+
+/* destination image selection register (DST_SELECT_REG : 0x0400) */
+#define G2D_DST_SELECT_R_NORMAL (0<<0)
+#define G2D_DST_SELECT_R_USE_FG_COLOR (1<<0)
+#define G2D_DST_SELECT_R_USE_BG_COLOR (2<<0)
+
+#define G2D_CMD0_REG_M_X (1<<8)
+
+#define G2D_CMD0_REG_L (1<<1)
+#define G2D_CMD0_REG_P (1<<0)
+
+/* BitBLT */
+#define G2D_CMD1_REG_S (1<<1)
+#define G2D_CMD1_REG_N (1<<0)
+
+/* resource color mode */
+#define G2D_COLOR_MODE_REG_C3_32BPP (1<<3)
+#define G2D_COLOR_MODE_REG_C3_24BPP (1<<3)
+#define G2D_COLOR_MODE_REG_C2_18BPP (1<<2)
+#define G2D_COLOR_MODE_REG_C1_16BPP (1<<1)
+#define G2D_COLOR_MODE_REG_C0_15BPP (1<<0)
+
+#define G2D_COLOR_RGB_565 (0x0<<0)
+#define G2D_COLOR_RGBA_5551 (0x1<<0)
+#define G2D_COLOR_ARGB_1555 (0x2<<0)
+#define G2D_COLOR_RGBA_8888 (0x3<<0)
+#define G2D_COLOR_ARGB_8888 (0x4<<0)
+#define G2D_COLOR_XRGB_8888 (0x5<<0)
+#define G2D_COLOR_RGBX_8888 (0x6<<0)
+#define G2D_COLOR_YUV422_SP (0x1<<3)
+
+#define G2D_CHL_ORDER_XRGB (0<<4) // ARGB,XRGB
+#define G2D_CHL_ORDER_RGBX (1<<4) // RGBA,RGBX
+#define G2D_CHL_ORDER_XBGR (2<<4) // ABGR,XBGR
+#define G2D_CHL_ORDER_BGRX (3<<4) // BGRA,BGRX
+
+#define G2D_FMT_XRGB_8888 (0)
+#define G2D_FMT_ARGB_8888 (1)
+#define G2D_FMT_RGB_565 (2)
+#define G2D_FMT_XRGB_1555 (3)
+#define G2D_FMT_ARGB_1555 (4)
+#define G2D_FMT_XRGB_4444 (5)
+#define G2D_FMT_ARGB_4444 (6)
+#define G2D_FMT_PACKED_RGB_888 (7)
+
+/* rotation mode */
+#define G2D_ROTATRE_REG_FY (1<<5)
+#define G2D_ROTATRE_REG_FX (1<<4)
+#define G2D_ROTATRE_REG_R3_270 (1<<3)
+#define G2D_ROTATRE_REG_R2_180 (1<<2)
+#define G2D_ROTATRE_REG_R1_90 (1<<1)
+#define G2D_ROTATRE_REG_R0_0 (1<<0)
+
+/* Endian select */
+#define G2D_ENDIAN_DST_BIG_ENDIAN (1<<1)
+#define G2D_ENDIAN_DST_LITTLE_ENDIAN (0<<1)
+
+#define G2D_ENDIAN_SRC_BIG_ENDIAN (1<<0)
+#define G2D_ENDIAN_SRC_LITTLE_ENDIAN (0<<0)
+
+/* read buffer size */
+#define G2D_ENDIAN_READSIZE_READ_SIZE_1 (0<<0)
+#define G2D_ENDIAN_READSIZE_READ_SIZE_4 (1<<0)
+#define G2D_ENDIAN_READSIZE_READ_SIZE_8 (2<<0)
+#define G2D_ENDIAN_READSIZE_READ_SIZE_16 (3<<0)
+
+/* Third Operans Select */
+/*
+#define G2D_ROP_REG_OS_PATTERN (0<<13)
+#define G2D_ROP_REG_OS_FG_COLOR (1<<13)
+#define G2D_ROP_REG_OS_PATTERN_MASK_SELECT (0<<4)
+#define G2D_ROP_REG_OS_PATTERN_THIRD (0)
+*/
+#define G2D_THIRD_OP_REG_PATTERN ((0<<4) | (0))
+#define G2D_THIRD_OP_REG_FG_COLOR ((1<<4) | (1))
+#define G2D_THIRD_OP_REG_BG_COLOR ((2<<4) | (2))
+#define G2D_THIRD_OP_REG_NONE ((3<<4) | (3))
+
+/* Alpha Blending Mode */
+#define G2D_ROP_REG_ABM_NO_BLENDING (0<<10)
+#define G2D_ROP_REG_ABM_SRC_BITMAP (1<<10)
+#define G2D_ROP_REG_ABM_REGISTER (2<<10)
+#define G2D_ROP_REG_ABM_FADING (4<<10)
+
+/* Raster operation mode */
+#define G2D_ROP_REG_T_OPAQUE_MODE (0<<9)
+#define G2D_ROP_REG_T_TRANSP_MODE (1<<9)
+
+#define G2D_ROP_REG_B_BS_MODE_OFF (0<<8)
+#define G2D_ROP_REG_B_BS_MODE_ON (1<<8)
+
+/*
+#define G2D_ROP_REG_SRC_ONLY (0xf0)
+#define G2D_ROP_REG_3RD_OPRND_ONLY (0xaa)
+#define G2D_ROP_REG_DST_ONLY (0xcc)
+#define G2D_ROP_REG_SRC_OR_DST (0xfc)
+#define G2D_ROP_REG_SRC_OR_3RD_OPRND (0xfa)
+#define G2D_ROP_REG_SRC_AND_DST (0xc0) //(pat==1)? src:dst
+#define G2D_ROP_REG_SRC_AND_3RD_OPRND (0xa0)
+#define G2D_ROP_REG_SRC_XOR_3RD_OPRND (0x5a)
+#define G2D_ROP_REG_DST_OR_3RD_OPRND (0xee)
+*/
+#define G2D_ROP_REG_SRC (0xcc)
+#define G2D_ROP_REG_DST (0xaa)
+#define G2D_ROP_REG_SRC_AND_DST (0x88)
+#define G2D_ROP_REG_SRC_OR_DST (0xee)
+#define G2D_ROP_REG_3RD_OPRND (0xf0)
+#define G2D_ROP_REG_SRC_AND_3RD_OPRND (0xc0)
+#define G2D_ROP_REG_SRC_OR_3RD_OPRND (0xfc)
+#define G2D_ROP_REG_SRC_XOR_3RD_OPRND (0x3c)
+#define G2D_ROP_REG_DST_OR_3RD_OPRND (0xfa)
+
+
+/* stencil control */
+#define G2D_STENCIL_CNTL_REG_STENCIL_ON_ON (1<<31)
+#define G2D_STENCIL_CNTL_REG_STENCIL_ON_OFF (0<<31)
+
+#define G2D_STENCIL_CNTL_REG_STENCIL_INVERSE (1<<23)
+#define G2D_STENCIL_CNTL_REG_STENCIL_SWAP (1<<0)
+
+/*********************************************************************************/
+
+#endif /* __SEC_FIMG2D3X_REGS_H */
diff --git a/drivers/media/video/samsung/fimg2d3x/fimg2d_cache.c b/drivers/media/video/samsung/fimg2d3x/fimg2d_cache.c
new file mode 100644
index 0000000..639b3f8
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x/fimg2d_cache.c
@@ -0,0 +1,379 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d3x_cache.c
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file implements fimg2d cache control functions.
+ */
+
+#include <linux/kernel.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <asm/io.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+
+#include "fimg2d.h"
+
+#define L1_ALL_THRESHOLD_SIZE SZ_64K
+#define L2_ALL_THRESHOLD_SIZE SZ_1M
+
+#define L2_CACHE_SKIP_MARK 256*4
+
+void g2d_pagetable_clean(const void *start_addr, unsigned long size, unsigned long pgd)
+{
+ void *l1d_vir, *l1d_phy, *l2d_phy;
+ void *cur_addr, *end_addr;
+ size = ALIGN(size, SZ_1M);
+ cur_addr = (void *)((unsigned long)start_addr & ~(SZ_1M-1));
+ end_addr = cur_addr + size + SZ_1M;
+
+ l1d_phy = (void *)((pgd & 0xffffc000) | (((u32)(cur_addr) & 0xfff00000)>>18));
+
+ if (l1d_phy) {
+ l1d_vir = phys_to_virt((u32)l1d_phy);
+ dmac_map_area(l1d_vir, (size/SZ_1M)*4, DMA_TO_DEVICE);
+ }
+
+ while (cur_addr < end_addr) {
+ outer_clean_range((u32)l1d_phy, (u32)l1d_phy + 4);
+
+ if (l1d_phy) {
+ l2d_phy = (void *)((readl(phys_to_virt((u32)l1d_phy)) & 0xfffffc00) |
+ (((u32)cur_addr & 0x000ff000) >> 10));
+ if (l2d_phy)
+ dmac_map_area(phys_to_virt((u32)l2d_phy), SZ_1K, DMA_TO_DEVICE);
+ outer_clean_range((u32)l2d_phy, (u32)l2d_phy + SZ_1K);
+ }
+ cur_addr += SZ_1M;
+ l1d_phy = (void *)((pgd & 0xffffc000) | (((u32)(cur_addr) & 0xfff00000)>>18));
+ }
+}
+
+
+static unsigned long virt2phys(unsigned long addr)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if(!current->mm) {
+ current->mm = &init_mm;
+ }
+
+ pgd = pgd_offset(current->mm, addr);
+
+ if ((pgd_val(*pgd) & 0x1) != 0x1) {
+ return 0;
+ }
+
+ pmd = pmd_offset(pgd, addr);
+ pte = pte_offset_map(pmd, addr);
+
+ return (addr & 0xfff) | (pte_val(*pte) & 0xfffff000);
+}
+
+u32 g2d_check_pagetable(void * vaddr, unsigned int size, unsigned long pgd)
+{
+ unsigned int level_one_phy, level_two_phy;
+ unsigned int level_one_value, level_two_value;
+
+ for (;;) {
+ level_one_phy = (pgd & 0xffffc000) | (((u32)vaddr & 0xfff00000)>>18);
+ if ((int)phys_to_virt(level_one_phy) < 0xc0000000) {
+ FIMG2D_ERROR("Level1 page table mapping missed, missed address = %p", phys_to_virt(level_one_phy));
+ return G2D_PT_NOTVALID;
+ }
+ level_one_value = readl(phys_to_virt(level_one_phy));
+
+ level_two_phy = (level_one_value & 0xfffffc00) | (((u32)vaddr & 0x000ff000) >> 10);
+ if ((int)phys_to_virt(level_two_phy) < 0xc0000000) {
+ FIMG2D_ERROR("Level2 page table mapping missed, missed address = %p", phys_to_virt(level_two_phy));
+ return G2D_PT_NOTVALID;
+ }
+ level_two_value = readl(phys_to_virt(level_two_phy));
+
+ if (((level_one_value & 0x3) != 0x1) || ((level_two_value & 0x3) != 0x3)) {
+ FIMG2D_DEBUG("Surface memory mapping fail [L1: 0x%x, L2: 0x%x]\n",
+ level_one_value, level_two_value);
+ return G2D_PT_NOTVALID;
+ }
+ if (size == 0) {
+ if ((level_two_value & 0x08) != 0x08)
+ return G2D_PT_UNCACHED;
+ return G2D_PT_CACHED;
+ }
+
+ if (size <= PAGE_SIZE) {
+ vaddr += (size-1);
+ size = 0;
+ } else {
+ vaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+}
+
+void g2d_clip_for_src(g2d_rect *src_rect, g2d_rect *dst_rect, g2d_clip *clip, g2d_clip *src_clip)
+{
+ if ((src_rect->w == dst_rect->w) && (src_rect->h == dst_rect->h)) {
+ src_clip->t = src_rect->y + (clip->t - dst_rect->y);
+ src_clip->l = src_rect->x + (clip->l - dst_rect->x);
+ src_clip->b = src_clip->t + (clip->b - clip->t);
+ src_clip->r = src_clip->l + (clip->r - clip->l);
+ } else {
+ src_clip->t = src_rect->y;
+ src_clip->l = src_rect->x;
+ src_clip->b = src_clip->t + src_rect->h;
+ src_clip->r = src_clip->l + src_rect->w;
+ }
+}
+
+void g2d_mem_inner_cache(g2d_params * params)
+{
+ void *src_addr, *dst_addr;
+ unsigned long src_size, dst_size;
+ g2d_clip clip_src;
+ g2d_clip_for_src(&params->src_rect, &params->dst_rect, &params->clip, &clip_src);
+
+ src_addr = (void *)GET_START_ADDR_C(params->src_rect, clip_src);
+ dst_addr = (void *)GET_START_ADDR_C(params->dst_rect, params->clip);
+ src_size = (unsigned long)GET_RECT_SIZE_C(params->src_rect, clip_src);
+ dst_size = (unsigned long)GET_RECT_SIZE_C(params->dst_rect, params->clip);
+
+ if((src_size + dst_size) < L1_ALL_THRESHOLD_SIZE) {
+ dmac_map_area(src_addr, src_size, DMA_TO_DEVICE);
+ dmac_flush_range(dst_addr, dst_addr + dst_size);
+ } else {
+ flush_all_cpu_caches();
+ }
+}
+
+void g2d_mem_outer_cache(struct g2d_global *g2d_dev, g2d_params * params, int *need_dst_clean)
+{
+ unsigned long start_paddr, end_paddr;
+ unsigned long cur_addr, end_addr;
+ unsigned long width_bytes;
+ unsigned long stride;
+ unsigned long src_size, dst_size;
+
+#if 0
+ if (((GET_RECT_SIZE(params->src_rect) + GET_RECT_SIZE(params->dst_rect)) > L2_ALL_THRESHOLD_SIZE)
+ && ((*need_dst_clean == true) || ( GET_RECT_SIZE(params->src_rect) > 384*640*4))) {
+ outer_flush_all();
+ *need_dst_clean = true;
+ return;
+ }
+#endif
+
+ g2d_clip clip_src;
+ g2d_clip_for_src(&params->src_rect, &params->dst_rect, &params->clip, &clip_src);
+
+ src_size = GET_RECT_SIZE_C(params->src_rect, clip_src);
+ dst_size = GET_RECT_SIZE_C(params->dst_rect, params->clip);
+
+ if ((src_size + dst_size) >= L2_ALL_THRESHOLD_SIZE) {
+ outer_flush_all();
+ *need_dst_clean = true;
+ return;
+ }
+
+ if((GET_SPARE_BYTES(params->src_rect) < L2_CACHE_SKIP_MARK)
+ || ((params->src_rect.w * params->src_rect.bytes_per_pixel) >= PAGE_SIZE)) {
+ g2d_mem_outer_cache_clean((void *)GET_START_ADDR_C(params->src_rect, clip_src),
+ (unsigned int)GET_RECT_SIZE_C(params->src_rect, clip_src));
+ } else {
+ stride = GET_STRIDE(params->src_rect);
+ width_bytes = params->src_rect.w * params->src_rect.bytes_per_pixel;
+ cur_addr = (unsigned long)GET_REAL_START_ADDR_C(params->src_rect, clip_src);
+ end_addr = (unsigned long)GET_REAL_END_ADDR_C(params->src_rect, clip_src);
+
+ while (cur_addr <= end_addr) {
+ start_paddr = virt2phys((unsigned long)cur_addr);
+ end_paddr = virt2phys((unsigned long)cur_addr + width_bytes);
+
+ if (((end_paddr - start_paddr) > 0) && ((end_paddr -start_paddr) < PAGE_SIZE)) {
+ outer_clean_range(start_paddr, end_paddr);
+ } else {
+ outer_clean_range(start_paddr, ((start_paddr + PAGE_SIZE) & PAGE_MASK) - 1);
+ outer_clean_range(end_paddr & PAGE_MASK, end_paddr);
+ }
+ cur_addr += stride;
+ }
+ }
+
+ if (*need_dst_clean) {
+ if ((GET_SPARE_BYTES(params->dst_rect) < L2_CACHE_SKIP_MARK)
+ || ((params->dst_rect.w * params->src_rect.bytes_per_pixel) >= PAGE_SIZE)) {
+ g2d_mem_outer_cache_flush((void *)GET_START_ADDR_C(params->dst_rect, params->clip),
+ (unsigned int)GET_RECT_SIZE_C(params->dst_rect, params->clip));
+ } else {
+ stride = GET_STRIDE(params->dst_rect);
+ width_bytes = (params->clip.r - params->clip.l) * params->dst_rect.bytes_per_pixel;
+
+ cur_addr = (unsigned long)GET_REAL_START_ADDR_C(params->dst_rect, params->clip);
+ end_addr = (unsigned long)GET_REAL_END_ADDR_C(params->dst_rect, params->clip);
+
+ while (cur_addr <= end_addr) {
+ start_paddr = virt2phys((unsigned long)cur_addr);
+ end_paddr = virt2phys((unsigned long)cur_addr + width_bytes);
+
+ if (((end_paddr - start_paddr) > 0) && ((end_paddr -start_paddr) < PAGE_SIZE)) {
+ outer_flush_range(start_paddr, end_paddr);
+ } else {
+ outer_flush_range(start_paddr, ((start_paddr + PAGE_SIZE) & PAGE_MASK) - 1);
+ outer_flush_range(end_paddr & PAGE_MASK, end_paddr);
+ }
+ cur_addr += stride;
+ }
+ }
+ }
+}
+
+void g2d_mem_cache_oneshot(void *src_addr, void *dst_addr, unsigned long src_size, unsigned long dst_size)
+{
+ unsigned long paddr;
+ void *cur_addr, *end_addr;
+ unsigned long full_size;
+
+ full_size = src_size + dst_size;
+
+ if(full_size < L1_ALL_THRESHOLD_SIZE)
+ dmac_map_area(src_addr, src_size, DMA_TO_DEVICE);
+ else
+ flush_all_cpu_caches();
+
+ if(full_size > L2_ALL_THRESHOLD_SIZE) {
+ outer_flush_all();
+ return;
+ }
+
+ cur_addr = (void *)((unsigned long)src_addr & PAGE_MASK);
+ src_size = PAGE_ALIGN(src_size);
+ end_addr = cur_addr + src_size + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ paddr = virt2phys((unsigned long)cur_addr);
+ if (paddr) {
+ outer_clean_range(paddr, paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+
+ if(full_size < L1_ALL_THRESHOLD_SIZE)
+ dmac_flush_range(dst_addr, dst_addr + dst_size);
+
+ cur_addr = (void *)((unsigned long)dst_addr & PAGE_MASK);
+ dst_size = PAGE_ALIGN(dst_size);
+ end_addr = cur_addr + dst_size + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ paddr = virt2phys((unsigned long)cur_addr);
+ if (paddr) {
+ outer_flush_range(paddr, paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+}
+
+u32 g2d_mem_cache_op(unsigned int cmd, void *addr, unsigned int size)
+{
+ switch(cmd) {
+ case G2D_DMA_CACHE_CLEAN :
+ g2d_mem_outer_cache_clean((void *)addr, size);
+ break;
+ case G2D_DMA_CACHE_FLUSH :
+ g2d_mem_outer_cache_flush((void *)addr, size);
+ break;
+ default :
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+void g2d_mem_outer_cache_flush(void *start_addr, unsigned long size)
+{
+ unsigned long paddr;
+ void *cur_addr, *end_addr;
+
+ cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
+ size = PAGE_ALIGN(size);
+ end_addr = cur_addr + size + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ paddr = virt2phys((unsigned long)cur_addr);
+ if (paddr) {
+ outer_flush_range(paddr, paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+}
+
+void g2d_mem_outer_cache_clean(const void *start_addr, unsigned long size)
+{
+ unsigned long paddr;
+ void *cur_addr, *end_addr;
+
+ cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
+ size = PAGE_ALIGN(size);
+ end_addr = cur_addr + size + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ paddr = virt2phys((unsigned long)cur_addr);
+ if (paddr) {
+ outer_clean_range(paddr, paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+}
+
+void g2d_mem_outer_cache_inv(g2d_params *params)
+{
+ unsigned long start_paddr, end_paddr;
+ unsigned long cur_addr, end_addr;
+ unsigned long stride;
+
+ stride = GET_STRIDE(params->dst_rect);
+ cur_addr = (unsigned long)GET_START_ADDR_C(params->dst_rect, params->clip);
+ end_addr = cur_addr + (unsigned long)GET_RECT_SIZE_C(params->dst_rect, params->clip);
+
+ start_paddr = virt2phys((unsigned long)cur_addr);
+ outer_inv_range(start_paddr, (start_paddr & PAGE_MASK) + (PAGE_SIZE - 1));
+ cur_addr = ((unsigned long)cur_addr & PAGE_MASK) + PAGE_SIZE;
+
+ while (cur_addr < end_addr) {
+ start_paddr = virt2phys((unsigned long)cur_addr);
+ if ((cur_addr + PAGE_SIZE) > end_addr) {
+ end_paddr = virt2phys((unsigned long)end_addr);
+ outer_inv_range(start_paddr, end_paddr);
+ break;
+ }
+
+ if (start_paddr) {
+ outer_inv_range(start_paddr, start_paddr + PAGE_SIZE);
+ }
+ cur_addr += PAGE_SIZE;
+ }
+}
+
+int g2d_check_need_dst_cache_clean(g2d_params * params)
+{
+ unsigned long cur_addr, end_addr;
+ cur_addr = (unsigned long)GET_START_ADDR_C(params->dst_rect, params->clip);
+ end_addr = cur_addr + (unsigned long)GET_RECT_SIZE_C(params->dst_rect, params->clip);
+
+ if ((params->src_rect.color_format == G2D_RGB_565) &&
+ (params->flag.alpha_val == G2D_ALPHA_BLENDING_OPAQUE) &&
+ (params->dst_rect.full_w == (params->clip.r - params->clip.l)) &&
+ (cur_addr % 32 == 0) && (end_addr % 32 == 0)) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/media/video/samsung/fimg2d3x/fimg2d_core.c b/drivers/media/video/samsung/fimg2d3x/fimg2d_core.c
new file mode 100644
index 0000000..5b1fe07
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x/fimg2d_core.c
@@ -0,0 +1,314 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d_core.c
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file implements fimg2d core functions.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <plat/sysmmu.h>
+#include <linux/sched.h>
+
+#if defined(CONFIG_S5P_MEM_CMA)
+#include <linux/cma.h>
+#elif defined(CONFIG_S5P_MEM_BOOTMEM)
+#include <mach/media.h>
+#include <plat/media.h>
+#endif
+
+#include "fimg2d.h"
+
+int g2d_clk_enable(struct g2d_global *g2d_dev)
+{
+ if(!atomic_read(&g2d_dev->clk_enable_flag)) {
+ clk_enable(g2d_dev->clock);
+ atomic_set(&g2d_dev->clk_enable_flag, 1);
+ return 0;
+ }
+ return -1;
+}
+
+int g2d_clk_disable(struct g2d_global *g2d_dev)
+{
+ if(atomic_read(&g2d_dev->clk_enable_flag)) {
+ if(atomic_read(&g2d_dev->in_use) == 0) {
+ clk_disable(g2d_dev->clock);
+ atomic_set(&g2d_dev->clk_enable_flag, 0);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+void g2d_sysmmu_on(struct g2d_global *g2d_dev)
+{
+ exynos_sysmmu_enable(g2d_dev->dev,
+ (unsigned long)virt_to_phys((void *)init_mm.pgd));
+}
+
+void g2d_sysmmu_off(struct g2d_global *g2d_dev)
+{
+ exynos_sysmmu_disable(g2d_dev->dev);
+}
+
+void g2d_fail_debug(g2d_params *params)
+{
+ FIMG2D_ERROR("src : %d, %d, %d, %d / %d, %d / 0x%x, %d, 0x%x)\n",
+ params->src_rect.x,
+ params->src_rect.y,
+ params->src_rect.w,
+ params->src_rect.h,
+ params->src_rect.full_w,
+ params->src_rect.full_h,
+ params->src_rect.color_format,
+ params->src_rect.bytes_per_pixel,
+ (u32)params->src_rect.addr);
+ FIMG2D_ERROR("dst : %d, %d, %d, %d / %d, %d / 0x%x, %d, 0x%x)\n",
+ params->dst_rect.x,
+ params->dst_rect.y,
+ params->dst_rect.w,
+ params->dst_rect.h,
+ params->dst_rect.full_w,
+ params->dst_rect.full_h,
+ params->dst_rect.color_format,
+ params->dst_rect.bytes_per_pixel,
+ (u32)params->dst_rect.addr);
+ FIMG2D_ERROR("clip: %d, %d, %d, %d\n",
+ params->clip.t,
+ params->clip.b,
+ params->clip.l,
+ params->clip.r);
+ FIMG2D_ERROR("flag: %d, %d, %d, %d / %d, %d, %d, %d / %d, %d, %d, %d\n",
+ params->flag.rotate_val,
+ params->flag.alpha_val,
+ params->flag.blue_screen_mode,
+ params->flag.color_key_val,
+ params->flag.color_switch_val,
+ params->flag.src_color,
+ params->flag.third_op_mode,
+ params->flag.rop_mode,
+ params->flag.mask_mode,
+ params->flag.render_mode,
+ params->flag.potterduff_mode,
+ params->flag.memory_type);
+}
+
+int g2d_init_regs(struct g2d_global *g2d_dev, g2d_params *params)
+{
+ u32 blt_cmd = 0;
+
+ g2d_rect * src_rect = &params->src_rect;
+ g2d_rect * dst_rect = &params->dst_rect;
+ g2d_clip * clip = &params->clip;
+ g2d_flag * flag = &params->flag;
+
+ if (g2d_check_params(params) < 0)
+ return -1;
+
+ g2d_reset(g2d_dev);
+
+ /* source image */
+ blt_cmd |= g2d_set_src_img(g2d_dev, src_rect, flag);
+
+ /* destination image */
+ blt_cmd |= g2d_set_dst_img(g2d_dev, dst_rect);
+
+ /* rotation */
+ blt_cmd |= g2d_set_rotation(g2d_dev, flag);
+
+ /* clipping */
+ blt_cmd |= g2d_set_clip_win(g2d_dev, clip);
+
+ /* color key */
+ blt_cmd |= g2d_set_color_key(g2d_dev, flag);
+
+ /* pattern */
+ blt_cmd |= g2d_set_pattern(g2d_dev, src_rect, flag);
+
+ /* rop & alpha blending */
+ blt_cmd |= g2d_set_alpha(g2d_dev, flag);
+
+ /* command */
+ g2d_set_bitblt_cmd(g2d_dev, src_rect, dst_rect, clip, blt_cmd);
+
+ return 0;
+}
+
+int g2d_check_overlap(g2d_rect src_rect, g2d_rect dst_rect, g2d_clip clip)
+{
+ unsigned int src_start_addr;
+ unsigned int src_end_addr;
+ unsigned int dst_start_addr;
+ unsigned int dst_end_addr;
+
+ src_start_addr = (unsigned int)GET_START_ADDR(src_rect);
+ src_end_addr = src_start_addr + (unsigned int)GET_RECT_SIZE(src_rect);
+ dst_start_addr = (unsigned int)GET_START_ADDR_C(dst_rect, clip);
+ dst_end_addr = dst_start_addr + (unsigned int)GET_RECT_SIZE_C(dst_rect, clip);
+
+ if ((dst_start_addr >= src_start_addr) && (dst_start_addr <= src_end_addr))
+ return true;
+ if ((dst_end_addr >= src_start_addr) && (dst_end_addr <= src_end_addr))
+ return true;
+ if ((src_start_addr >= dst_start_addr) && (src_end_addr <= dst_end_addr))
+ return true;
+
+ return false;
+}
+
+int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params)
+{
+ unsigned long pgd;
+ int need_dst_clean = true;
+
+ if ((params->src_rect.addr == NULL)
+ || (params->dst_rect.addr == NULL)) {
+ FIMG2D_ERROR("error : addr Null\n");
+ return false;
+ }
+
+ if (params->flag.memory_type == G2D_MEMORY_KERNEL) {
+ params->src_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->src_rect.addr);
+ params->dst_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->dst_rect.addr);
+ pgd = (unsigned long)init_mm.pgd;
+ } else {
+ pgd = (unsigned long)current->mm->pgd;
+ }
+
+ if (params->flag.memory_type == G2D_MEMORY_USER)
+ {
+ g2d_clip clip_src;
+ g2d_clip_for_src(&params->src_rect, &params->dst_rect, &params->clip, &clip_src);
+
+ if (g2d_check_overlap(params->src_rect, params->dst_rect, params->clip))
+ return false;
+
+ g2d_dev->src_attribute =
+ g2d_check_pagetable((unsigned char *)GET_START_ADDR(params->src_rect),
+ (unsigned int)GET_RECT_SIZE(params->src_rect) + 8,
+ (u32)virt_to_phys((void *)pgd));
+ if (g2d_dev->src_attribute == G2D_PT_NOTVALID) {
+ FIMG2D_DEBUG("Src is not in valid pagetable\n");
+ return false;
+ }
+
+ g2d_dev->dst_attribute =
+ g2d_check_pagetable((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip),
+ (unsigned int)GET_RECT_SIZE_C(params->dst_rect, params->clip),
+ (u32)virt_to_phys((void *)pgd));
+ if (g2d_dev->dst_attribute == G2D_PT_NOTVALID) {
+ FIMG2D_DEBUG("Dst is not in valid pagetable\n");
+ return false;
+ }
+
+ g2d_pagetable_clean((unsigned char *)GET_START_ADDR(params->src_rect),
+ (u32)GET_RECT_SIZE(params->src_rect) + 8,
+ (u32)virt_to_phys((void *)pgd));
+ g2d_pagetable_clean((unsigned char *)GET_START_ADDR_C(params->dst_rect, params->clip),
+ (u32)GET_RECT_SIZE_C(params->dst_rect, params->clip),
+ (u32)virt_to_phys((void *)pgd));
+
+ if (params->flag.render_mode & G2D_CACHE_OP) {
+ /*g2d_mem_cache_oneshot((void *)GET_START_ADDR(params->src_rect),
+ (void *)GET_START_ADDR(params->dst_rect),
+ (unsigned int)GET_REAL_SIZE(params->src_rect),
+ (unsigned int)GET_REAL_SIZE(params->dst_rect));*/
+ // need_dst_clean = g2d_check_need_dst_cache_clean(params);
+ g2d_mem_inner_cache(params);
+ g2d_mem_outer_cache(g2d_dev, params, &need_dst_clean);
+ }
+ }
+
+ exynos_sysmmu_disable(g2d_dev->dev);
+ exynos_sysmmu_enable(g2d_dev->dev, (u32)virt_to_phys((void *)pgd));
+
+ if(g2d_init_regs(g2d_dev, params) < 0) {
+ return false;
+ }
+
+ /* Do bitblit */
+ g2d_start_bitblt(g2d_dev, params);
+
+ if (!need_dst_clean)
+ g2d_mem_outer_cache_inv(params);
+
+ return true;
+}
+
+int g2d_wait_for_finish(struct g2d_global *g2d_dev, g2d_params *params)
+{
+ if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) {
+ FIMG2D_ERROR("error : sysmmu_faulted early\n");
+ FIMG2D_ERROR("faulted addr: 0x%x\n", g2d_dev->faulted_addr);
+ g2d_fail_debug(params);
+ atomic_set(&g2d_dev->is_mmu_faulted, 0);
+ return false;
+ }
+
+ if (params->flag.render_mode & G2D_POLLING) {
+ g2d_check_fifo_state_wait(g2d_dev);
+ } else {
+ if(wait_event_interruptible_timeout(g2d_dev->waitq,
+ g2d_dev->irq_handled == 1,
+ msecs_to_jiffies(G2D_TIMEOUT)) == 0) {
+ if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) {
+ FIMG2D_ERROR("error : sysmmu_faulted\n");
+ FIMG2D_ERROR("faulted addr: 0x%x\n", g2d_dev->faulted_addr);
+ } else {
+ g2d_reset(g2d_dev);
+ FIMG2D_ERROR("error : waiting for interrupt is timeout\n");
+ }
+ atomic_set(&g2d_dev->is_mmu_faulted, 0);
+ g2d_fail_debug(params);
+ return false;
+ } else if(atomic_read(&g2d_dev->is_mmu_faulted) == 1) {
+ FIMG2D_ERROR("error : sysmmu_faulted but auto recoveried\n");
+ FIMG2D_ERROR("faulted addr: 0x%x\n", g2d_dev->faulted_addr);
+ g2d_fail_debug(params);
+ atomic_set(&g2d_dev->is_mmu_faulted, 0);
+ return false;
+ }
+ }
+ return true;
+}
+
+int g2d_init_mem(struct device *dev, unsigned int *base, unsigned int *size)
+{
+#ifdef CONFIG_S5P_MEM_CMA
+ struct cma_info mem_info;
+ int err;
+ char cma_name[8];
+#endif
+
+#ifdef CONFIG_S5P_MEM_CMA
+ /* CMA */
+ sprintf(cma_name, "fimg2d");
+ err = cma_info(&mem_info, dev, 0);
+ FIMG2D_DEBUG("[cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x\n",
+ mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size);
+ if (err) {
+ FIMG2D_ERROR("%s: get cma info failed\n", __func__);
+ return -1;
+ }
+ *size = mem_info.total_size;
+ *base = (dma_addr_t)cma_alloc
+ (dev, cma_name, (size_t)(*size), 0);
+
+ FIMG2D_DEBUG("size = 0x%x\n", *size);
+ FIMG2D_DEBUG("*base phys= 0x%x\n", *base);
+ FIMG2D_DEBUG("*base virt = 0x%x\n", (u32)phys_to_virt(*base));
+
+#else
+ *base = s5p_get_media_memory_bank(S5P_MDEV_FIMG2D, 0);
+#endif
+ return 0;
+}
+
diff --git a/drivers/media/video/samsung/fimg2d3x/fimg2d_dev.c b/drivers/media/video/samsung/fimg2d3x/fimg2d_dev.c
new file mode 100644
index 0000000..5ccde4a
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d3x/fimg2d_dev.c
@@ -0,0 +1,609 @@
+/* drivers/media/video/samsung/fimg2d3x/fimg2d3x_dev.c
+ *
+ * Copyright 2010 Samsung Electronics Co, Ltd. All Rights Reserved.
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file implements fimg2d driver.
+ */
+
+#include <linux/init.h>
+
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h>
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/semaphore.h>
+
+#include <asm/io.h>
+
+#include <mach/cpufreq.h>
+#include <plat/cpu.h>
+#include <plat/fimg2d.h>
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+#include <linux/pm_runtime.h>
+#endif
+
+#include "fimg2d.h"
+#include "fimg2d3x_regs.h"
+
+#include <linux/smp.h>
+
+struct g2d_global *g2d_dev;
+
+int g2d_sysmmu_fault(unsigned int faulted_addr, unsigned int pt_base)
+{
+ g2d_reset(g2d_dev);
+
+ atomic_set(&g2d_dev->is_mmu_faulted, 1);
+
+ g2d_dev->faulted_addr = faulted_addr;
+
+ wake_up_interruptible(&g2d_dev->waitq);
+
+ return 0;
+}
+
+
+irqreturn_t g2d_irq(int irq, void *dev_id)
+{
+ g2d_set_int_finish(g2d_dev);
+
+ g2d_dev->irq_handled = 1;
+
+ wake_up_interruptible(&g2d_dev->waitq);
+
+ atomic_set(&g2d_dev->in_use, 0);
+
+ return IRQ_HANDLED;
+}
+
+
+static int g2d_open(struct inode *inode, struct file *file)
+{
+ atomic_inc(&g2d_dev->num_of_object);
+
+ FIMG2D_DEBUG("Context Opened %d\n", atomic_read(&g2d_dev->num_of_object));
+
+ return 0;
+}
+
+
+static int g2d_release(struct inode *inode, struct file *file)
+{
+ atomic_dec(&g2d_dev->num_of_object);
+
+ FIMG2D_DEBUG("Context Closed %d\n", atomic_read(&g2d_dev->num_of_object));
+
+ return 0;
+}
+
+static int g2d_mmap(struct file* filp, struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+
+static long g2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ g2d_params params;
+ int ret = -1;
+
+ struct g2d_dma_info dma_info;
+
+ switch(cmd) {
+ case G2D_GET_MEMORY :
+ ret = copy_to_user((unsigned int *)arg,
+ &(g2d_dev->reserved_mem.base), sizeof(g2d_dev->reserved_mem.base));
+ if (ret) {
+ FIMG2D_ERROR("error : copy_to_user\n");
+ return -EINVAL;
+ }
+ return 0;
+
+ case G2D_GET_MEMORY_SIZE :
+ ret = copy_to_user((unsigned int *)arg,
+ &(g2d_dev->reserved_mem.size), sizeof(g2d_dev->reserved_mem.size));
+ if (ret) {
+ FIMG2D_ERROR("error : copy_to_user\n");
+ return -EINVAL;
+ }
+ return 0;
+
+ case G2D_DMA_CACHE_CLEAN :
+ case G2D_DMA_CACHE_FLUSH :
+ mutex_lock(&g2d_dev->lock);
+ ret = copy_from_user(&dma_info, (struct g2d_dma_info *)arg, sizeof(dma_info));
+
+ if (ret) {
+ FIMG2D_ERROR("error : copy_from_user\n");
+ mutex_unlock(&g2d_dev->lock);
+ return -EINVAL;
+ }
+
+ if (dma_info.addr == 0) {
+ FIMG2D_ERROR("addr Null Error!!!\n");
+ mutex_unlock(&g2d_dev->lock);
+ return -EINVAL;
+ }
+
+ g2d_mem_cache_op(cmd, (void *)dma_info.addr, dma_info.size);
+ mutex_unlock(&g2d_dev->lock);
+ return 0;
+
+ case G2D_SYNC :
+ g2d_check_fifo_state_wait(g2d_dev);
+ ret = 0;
+ goto g2d_ioctl_done;
+
+ case G2D_RESET :
+ g2d_reset(g2d_dev);
+ FIMG2D_ERROR("G2D TimeOut Error\n");
+ ret = 0;
+ goto g2d_ioctl_done;
+
+ case G2D_BLIT:
+ if (atomic_read(&g2d_dev->ready_to_run) == 0)
+ goto g2d_ioctl_done2;
+
+ mutex_lock(&g2d_dev->lock);
+
+ g2d_clk_enable(g2d_dev);
+
+ if (copy_from_user(&params, (struct g2d_params *)arg, sizeof(g2d_params))) {
+ FIMG2D_ERROR("error : copy_from_user\n");
+ goto g2d_ioctl_done;
+ }
+
+ atomic_set(&g2d_dev->in_use, 1);
+ if (atomic_read(&g2d_dev->ready_to_run) == 0)
+ goto g2d_ioctl_done;
+
+ if (params.flag.memory_type == G2D_MEMORY_USER)
+ down_write(&page_alloc_slow_rwsem);
+
+ g2d_dev->irq_handled = 0;
+ if (!g2d_do_blit(g2d_dev, &params)) {
+ g2d_dev->irq_handled = 1;
+ if (params.flag.memory_type == G2D_MEMORY_USER)
+ up_write(&page_alloc_slow_rwsem);
+ goto g2d_ioctl_done;
+ }
+
+ if(!(file->f_flags & O_NONBLOCK)) {
+ if (!g2d_wait_for_finish(g2d_dev, &params)) {
+ if (params.flag.memory_type == G2D_MEMORY_USER)
+ up_write(&page_alloc_slow_rwsem);
+ goto g2d_ioctl_done;
+ }
+ }
+
+ if (params.flag.memory_type == G2D_MEMORY_USER)
+ up_write(&page_alloc_slow_rwsem);
+ ret = 0;
+
+ break;
+ default :
+ goto g2d_ioctl_done2;
+ break;
+ }
+
+g2d_ioctl_done :
+
+ g2d_clk_disable(g2d_dev);
+
+ mutex_unlock(&g2d_dev->lock);
+
+ atomic_set(&g2d_dev->in_use, 0);
+
+g2d_ioctl_done2 :
+
+ return ret;
+}
+
+static unsigned int g2d_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+
+ if (atomic_read(&g2d_dev->in_use) == 0) {
+ mask = POLLOUT | POLLWRNORM;
+ g2d_clk_disable(g2d_dev);
+
+ mutex_unlock(&g2d_dev->lock);
+
+ } else {
+ poll_wait(file, &g2d_dev->waitq, wait);
+
+ if(atomic_read(&g2d_dev->in_use) == 0) {
+ mask = POLLOUT | POLLWRNORM;
+ g2d_clk_disable(g2d_dev);
+
+ mutex_unlock(&g2d_dev->lock);
+ }
+ }
+
+ return mask;
+}
+
+static struct file_operations fimg2d_fops = {
+ .owner = THIS_MODULE,
+ .open = g2d_open,
+ .release = g2d_release,
+ .mmap = g2d_mmap,
+ .unlocked_ioctl = g2d_ioctl,
+ .poll = g2d_poll,
+};
+
+
+static struct miscdevice fimg2d_dev = {
+ .minor = G2D_MINOR,
+ .name = "fimg2d",
+ .fops = &fimg2d_fops,
+};
+
+static int g2d_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+ struct clk *parent;
+ struct clk *sclk;
+
+ FIMG2D_DEBUG("start probe : name=%s num=%d res[0].start=0x%x res[1].start=0x%x\n",
+ pdev->name, pdev->num_resources,
+ pdev->resource[0].start, pdev->resource[1].start);
+
+ /* alloc g2d global */
+ g2d_dev = kzalloc(sizeof(*g2d_dev), GFP_KERNEL);
+ if (!g2d_dev) {
+ FIMG2D_ERROR( "not enough memory\n");
+ ret = -ENOENT;
+ goto probe_out;
+ }
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* to use the runtime PM helper functions */
+ pm_runtime_enable(&pdev->dev);
+ /* enable the power domain */
+ pm_runtime_get_sync(&pdev->dev);
+#endif
+
+ /* get the memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if(res == NULL) {
+ FIMG2D_ERROR("failed to get memory region resouce\n");
+ ret = -ENOENT;
+ goto err_get_res;
+ }
+
+ /* request momory region */
+ g2d_dev->mem = request_mem_region(res->start,
+ res->end - res->start + 1,
+ pdev->name);
+ if(g2d_dev->mem == NULL) {
+ FIMG2D_ERROR("failed to reserve memory region\n");
+ ret = -ENOENT;
+ goto err_mem_req;
+ }
+
+ /* ioremap */
+ g2d_dev->base = ioremap(g2d_dev->mem->start,
+ g2d_dev->mem->end - res->start + 1);
+ if(g2d_dev->base == NULL) {
+ FIMG2D_ERROR("failed ioremap\n");
+ ret = -ENOENT;
+ goto err_mem_map;
+ }
+
+ /* get irq */
+ g2d_dev->irq_num = platform_get_irq(pdev, 0);
+ if(g2d_dev->irq_num <= 0) {
+ FIMG2D_ERROR("failed to get irq resouce\n");
+ ret = -ENOENT;
+ goto err_irq_req;
+ }
+
+ /* blocking I/O */
+ init_waitqueue_head(&g2d_dev->waitq);
+
+ /* request irq */
+ ret = request_irq(g2d_dev->irq_num, g2d_irq,
+ IRQF_DISABLED, pdev->name, NULL);
+ if (ret) {
+ FIMG2D_ERROR("request_irq(g2d) failed.\n");
+ ret = -ENOENT;
+ goto err_irq_req;
+ }
+
+ /* clock domain setting*/
+ parent = clk_get(&pdev->dev, "mout_mpll");
+ if (IS_ERR(parent)) {
+ FIMG2D_ERROR("failed to get parent clock\n");
+ ret = -ENOENT;
+ goto err_clk_get1;
+ }
+
+ sclk = clk_get(&pdev->dev, "sclk_fimg2d");
+ if (IS_ERR(sclk)) {
+ FIMG2D_ERROR("failed to get sclk_g2d clock\n");
+ ret = -ENOENT;
+ goto err_clk_get2;
+ }
+
+ clk_set_parent(sclk, parent);
+ clk_set_rate(sclk, 267 * MHZ); /* 266 Mhz */
+
+ /* clock for gating */
+ g2d_dev->clock = clk_get(&pdev->dev, "fimg2d");
+ if (IS_ERR(g2d_dev->clock)) {
+ FIMG2D_ERROR("failed to get clock clock\n");
+ ret = -ENOENT;
+ goto err_clk_get3;
+ }
+
+ ret = g2d_init_mem(&pdev->dev, &g2d_dev->reserved_mem.base, &g2d_dev->reserved_mem.size);
+
+ if (ret != 0) {
+ FIMG2D_ERROR("failed to init. fimg2d mem");
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ /* atomic init */
+ atomic_set(&g2d_dev->in_use, 0);
+ atomic_set(&g2d_dev->num_of_object, 0);
+ atomic_set(&g2d_dev->is_mmu_faulted, 0);
+ g2d_dev->faulted_addr = 0;
+
+ /* misc register */
+ ret = misc_register(&fimg2d_dev);
+ if (ret) {
+ FIMG2D_ERROR("cannot register miscdev on minor=%d (%d)\n",
+ G2D_MINOR, ret);
+ ret = -ENOMEM;
+ goto err_misc_reg;
+ }
+
+ mutex_init(&g2d_dev->lock);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ g2d_dev->early_suspend.suspend = g2d_early_suspend;
+ g2d_dev->early_suspend.resume = g2d_late_resume;
+ g2d_dev->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
+ register_early_suspend(&g2d_dev->early_suspend);
+#endif
+
+ g2d_dev->dev = &pdev->dev;
+ atomic_set(&g2d_dev->ready_to_run, 1);
+
+ g2d_sysmmu_on(g2d_dev);
+
+ FIMG2D_DEBUG("g2d_probe ok!\n");
+
+ return 0;
+
+err_misc_reg:
+err_mem:
+ clk_put(g2d_dev->clock);
+ g2d_dev->clock = NULL;
+err_clk_get3:
+ clk_put(sclk);
+err_clk_get2:
+ clk_put(parent);
+err_clk_get1:
+ free_irq(g2d_dev->irq_num, NULL);
+err_irq_req:
+ iounmap(g2d_dev->base);
+err_mem_map:
+ release_resource(g2d_dev->mem);
+ kfree(g2d_dev->mem);
+err_mem_req:
+err_get_res:
+ kfree(g2d_dev);
+probe_out:
+ FIMG2D_ERROR("g2d: sec_g2d_probe fail!\n");
+ return ret;
+}
+
+
+static int g2d_remove(struct platform_device *dev)
+{
+ FIMG2D_DEBUG("g2d_remove called !\n");
+
+ free_irq(g2d_dev->irq_num, NULL);
+
+ if (g2d_dev->mem != NULL) {
+ FIMG2D_INFO("releasing resource\n");
+ iounmap(g2d_dev->base);
+ release_resource(g2d_dev->mem);
+ kfree(g2d_dev->mem);
+ }
+
+ misc_deregister(&fimg2d_dev);
+
+ atomic_set(&g2d_dev->in_use, 0);
+ atomic_set(&g2d_dev->num_of_object, 0);
+
+ g2d_clk_disable(g2d_dev);
+
+ if (g2d_dev->clock) {
+ clk_put(g2d_dev->clock);
+ g2d_dev->clock = NULL;
+ }
+
+ mutex_destroy(&g2d_dev->lock);
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ unregister_early_suspend(&g2d_dev->early_suspend);
+#endif
+
+ kfree(g2d_dev);
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* disable the power domain */
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+#endif
+
+ FIMG2D_DEBUG("g2d_remove ok!\n");
+
+ return 0;
+}
+
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+void g2d_early_suspend(struct early_suspend *h)
+{
+ atomic_set(&g2d_dev->ready_to_run, 0);
+
+ /* wait until G2D running is finished */
+ while(1) {
+ if (!atomic_read(&g2d_dev->in_use))
+ break;
+
+ msleep_interruptible(2);
+ }
+
+ g2d_sysmmu_off(g2d_dev);
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* disable the power domain */
+ pm_runtime_put(g2d_dev->dev);
+#endif
+}
+
+void g2d_late_resume(struct early_suspend *h)
+{
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* enable the power domain */
+ pm_runtime_get_sync(g2d_dev->dev);
+#endif
+
+ g2d_sysmmu_on(g2d_dev);
+
+ atomic_set(&g2d_dev->ready_to_run, 1);
+
+}
+#endif
+
+#if !defined(CONFIG_HAS_EARLYSUSPEND)
+static int g2d_suspend(struct platform_device *dev, pm_message_t state)
+{
+ atomic_set(&g2d_dev->ready_to_run, 0);
+
+ /* wait until G2D running is finished */
+ while(1) {
+ if (!atomic_read(&g2d_dev->in_use))
+ break;
+
+ msleep_interruptible(2);
+ }
+
+ g2d_sysmmu_off(g2d_dev);
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* disable the power domain */
+ pm_runtime_put(g2d_dev->dev);
+#endif
+
+ return 0;
+}
+static int g2d_resume(struct platform_device *pdev)
+{
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ /* enable the power domain */
+ pm_runtime_get_sync(g2d_dev->dev);
+#endif
+
+ g2d_sysmmu_on(g2d_dev);
+
+ atomic_set(&g2d_dev->ready_to_run, 1);
+
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_EXYNOS_DEV_PD)
+static int g2d_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int g2d_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops g2d_pm_ops = {
+ .runtime_suspend = g2d_runtime_suspend,
+ .runtime_resume = g2d_runtime_resume,
+};
+#endif
+
+
+static struct platform_driver fimg2d_driver = {
+ .probe = g2d_probe,
+ .remove = g2d_remove,
+#if !defined(CONFIG_HAS_EARLYSUSPEND)
+ .suspend = g2d_suspend,
+ .resume = g2d_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s5p-fimg2d",
+#if defined(CONFIG_EXYNOS_DEV_PD)
+ .pm = &g2d_pm_ops,
+#endif
+ },
+};
+
+int __init g2d_init(void)
+{
+ if(platform_driver_register(&fimg2d_driver)!=0) {
+ FIMG2D_ERROR("platform device register Failed \n");
+ return -1;
+ }
+
+ FIMG2D_DEBUG("ok!\n");
+
+ return 0;
+}
+
+void g2d_exit(void)
+{
+ platform_driver_unregister(&fimg2d_driver);
+
+ FIMG2D_DEBUG("ok!\n");
+}
+
+module_init(g2d_init);
+module_exit(g2d_exit);
+
+MODULE_AUTHOR("");
+MODULE_DESCRIPTION("SEC G2D Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/Kconfig b/drivers/media/video/samsung/fimg2d4x-exynos4/Kconfig
new file mode 100644
index 0000000..51294d7
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/Kconfig
@@ -0,0 +1,23 @@
+# drivers/media/video/samsung/fimg2d4x/Kconfig
+#
+# Copyright (c) 2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+config VIDEO_FIMG2D4X
+ bool "Samsung Graphics 2D Driver"
+ select VIDEO_FIMG2D
+ depends on VIDEO_SAMSUNG && (CPU_EXYNOS4212 || CPU_EXYNOS4412 || CPU_EXYNOS5250)
+ default n
+ ---help---
+ This is a graphics 2D (FIMG2D 4.x) driver for Samsung ARM based SoC.
+
+config VIDEO_FIMG2D4X_DEBUG
+ bool "Enables FIMG2D debug messages"
+ select VIDEO_FIMG2D_DEBUG
+ depends on VIDEO_FIMG2D4X
+ default n
+ ---help---
+ This enables FIMG2D driver debug messages.
+
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/Makefile b/drivers/media/video/samsung/fimg2d4x-exynos4/Makefile
new file mode 100644
index 0000000..40b93a9
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/Makefile
@@ -0,0 +1,18 @@
+# drivers/media/video/samsung/fimg2d4x/Makefile
+#
+# Copyright (c) 2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+obj-y :=
+obj-m :=
+obj-n :=
+obj- :=
+
+obj-$(CONFIG_VIDEO_FIMG2D) += fimg2d_drv.o fimg2d_ctx.o fimg2d_cache.o fimg2d_clk.o fimg2d_helper.o
+obj-$(CONFIG_VIDEO_FIMG2D4X) += fimg2d4x_blt.o fimg2d4x_hw.o
+
+ifeq ($(CONFIG_VIDEO_FIMG2D_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d.h b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d.h
new file mode 100644
index 0000000..97fc4cb
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d.h
@@ -0,0 +1,509 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D_H
+#define __FIMG2D_H __FILE__
+
+#ifdef __KERNEL__
+
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#define FIMG2D_MINOR (240)
+#define to_fimg2d_plat(d) (to_platform_device(d)->dev.platform_data)
+
+#ifdef CONFIG_VIDEO_FIMG2D_DEBUG
+#define fimg2d_debug(fmt, arg...) printk(KERN_INFO "[%s] " fmt, __func__, ## arg)
+#else
+#define fimg2d_debug(fmt, arg...) do { } while (0)
+#endif
+
+#endif /* __KERNEL__ */
+
+/* ioctl commands */
+#define FIMG2D_IOCTL_MAGIC 'F'
+#define FIMG2D_BITBLT_BLIT _IOWR(FIMG2D_IOCTL_MAGIC, 0, struct fimg2d_blit)
+#define FIMG2D_BITBLT_SYNC _IOW(FIMG2D_IOCTL_MAGIC, 1, int)
+#define FIMG2D_BITBLT_VERSION _IOR(FIMG2D_IOCTL_MAGIC, 2, struct fimg2d_version)
+
+struct fimg2d_version {
+ unsigned int hw;
+ unsigned int sw;
+};
+
+/**
+ * @BLIT_SYNC: sync mode, to wait for blit done irq
+ * @BLIT_ASYNC: async mode, not to wait for blit done irq
+ *
+ */
+enum blit_sync {
+ BLIT_SYNC,
+ BLIT_ASYNC,
+};
+
+/**
+ * @ADDR_PHYS: physical address
+ * @ADDR_USER: user virtual address (physically Non-contiguous)
+ * @ADDR_USER_CONTIG: user virtual address (physically Contiguous)
+ * @ADDR_DEVICE: specific device virtual address
+ */
+enum addr_space {
+ ADDR_NONE,
+ ADDR_PHYS,
+ ADDR_KERN,
+ ADDR_USER,
+ ADDR_USER_CONTIG,
+ ADDR_DEVICE,
+};
+
+/**
+ * Pixel order complies with little-endian style
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum pixel_order {
+ AX_RGB = 0,
+ RGB_AX,
+ AX_BGR,
+ BGR_AX,
+ ARGB_ORDER_END,
+
+ P1_CRY1CBY0,
+ P1_CBY1CRY0,
+ P1_Y1CRY0CB,
+ P1_Y1CBY0CR,
+ P1_ORDER_END,
+
+ P2_CRCB,
+ P2_CBCR,
+ P2_ORDER_END,
+};
+
+/**
+ * DO NOT CHANGE THIS ORDER
+ */
+enum color_format {
+ CF_XRGB_8888 = 0,
+ CF_ARGB_8888,
+ CF_RGB_565,
+ CF_XRGB_1555,
+ CF_ARGB_1555,
+ CF_XRGB_4444,
+ CF_ARGB_4444,
+ CF_RGB_888,
+ CF_YCBCR_444,
+ CF_YCBCR_422,
+ CF_YCBCR_420,
+ CF_A8,
+ CF_L8,
+ SRC_DST_FORMAT_END,
+
+ CF_MSK_1BIT,
+ CF_MSK_4BIT,
+ CF_MSK_8BIT,
+ CF_MSK_16BIT_565,
+ CF_MSK_16BIT_1555,
+ CF_MSK_16BIT_4444,
+ CF_MSK_32BIT_8888,
+ MSK_FORMAT_END,
+};
+
+enum rotation {
+ ORIGIN,
+ ROT_90, /* clockwise */
+ ROT_180,
+ ROT_270,
+ XFLIP, /* x-axis flip */
+ YFLIP, /* y-axis flip */
+};
+
+/**
+ * @NO_REPEAT: no effect
+ * @REPEAT_NORMAL: repeat horizontally and vertically
+ * @REPEAT_PAD: pad with pad color
+ * @REPEAT_REFLECT: reflect horizontally and vertically
+ * @REPEAT_CLAMP: pad with edge color of original image
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum repeat {
+ NO_REPEAT = 0,
+ REPEAT_NORMAL, /* default setting */
+ REPEAT_PAD,
+ REPEAT_REFLECT, REPEAT_MIRROR = REPEAT_REFLECT,
+ REPEAT_CLAMP,
+};
+
+enum scaling {
+ NO_SCALING,
+ SCALING_NEAREST,
+ SCALING_BILINEAR,
+};
+
+/**
+ * @SCALING_PIXELS: ratio in pixels
+ * @SCALING_RATIO: ratio in fixed point 16
+ */
+enum scaling_factor {
+ SCALING_PIXELS,
+ SCALING_RATIO,
+};
+
+/**
+ * premultiplied alpha
+ */
+enum premultiplied {
+ PREMULTIPLIED,
+ NON_PREMULTIPLIED,
+};
+
+/**
+ * @TRANSP: discard bluescreen color
+ * @BLUSCR: replace bluescreen color with background color
+ */
+enum bluescreen {
+ OPAQUE,
+ TRANSP,
+ BLUSCR,
+};
+
+/**
+ * DO NOT CHANGE THIS ORDER
+ */
+enum blit_op {
+ BLIT_OP_SOLID_FILL = 0,
+
+ BLIT_OP_CLR,
+ BLIT_OP_SRC, BLIT_OP_SRC_COPY = BLIT_OP_SRC,
+ BLIT_OP_DST,
+ BLIT_OP_SRC_OVER,
+ BLIT_OP_DST_OVER, BLIT_OP_OVER_REV = BLIT_OP_DST_OVER,
+ BLIT_OP_SRC_IN,
+ BLIT_OP_DST_IN, BLIT_OP_IN_REV = BLIT_OP_DST_IN,
+ BLIT_OP_SRC_OUT,
+ BLIT_OP_DST_OUT, BLIT_OP_OUT_REV = BLIT_OP_DST_OUT,
+ BLIT_OP_SRC_ATOP,
+ BLIT_OP_DST_ATOP, BLIT_OP_ATOP_REV = BLIT_OP_DST_ATOP,
+ BLIT_OP_XOR,
+
+ BLIT_OP_ADD,
+ BLIT_OP_MULTIPLY,
+ BLIT_OP_SCREEN,
+ BLIT_OP_DARKEN,
+ BLIT_OP_LIGHTEN,
+
+ BLIT_OP_DISJ_SRC_OVER,
+ BLIT_OP_DISJ_DST_OVER, BLIT_OP_SATURATE = BLIT_OP_DISJ_DST_OVER,
+ BLIT_OP_DISJ_SRC_IN,
+ BLIT_OP_DISJ_DST_IN, BLIT_OP_DISJ_IN_REV = BLIT_OP_DISJ_DST_IN,
+ BLIT_OP_DISJ_SRC_OUT,
+ BLIT_OP_DISJ_DST_OUT, BLIT_OP_DISJ_OUT_REV = BLIT_OP_DISJ_DST_OUT,
+ BLIT_OP_DISJ_SRC_ATOP,
+ BLIT_OP_DISJ_DST_ATOP, BLIT_OP_DISJ_ATOP_REV = BLIT_OP_DISJ_DST_ATOP,
+ BLIT_OP_DISJ_XOR,
+
+ BLIT_OP_CONJ_SRC_OVER,
+ BLIT_OP_CONJ_DST_OVER, BLIT_OP_CONJ_OVER_REV = BLIT_OP_CONJ_DST_OVER,
+ BLIT_OP_CONJ_SRC_IN,
+ BLIT_OP_CONJ_DST_IN, BLIT_OP_CONJ_IN_REV = BLIT_OP_CONJ_DST_IN,
+ BLIT_OP_CONJ_SRC_OUT,
+ BLIT_OP_CONJ_DST_OUT, BLIT_OP_CONJ_OUT_REV = BLIT_OP_CONJ_DST_OUT,
+ BLIT_OP_CONJ_SRC_ATOP,
+ BLIT_OP_CONJ_DST_ATOP, BLIT_OP_CONJ_ATOP_REV = BLIT_OP_CONJ_DST_ATOP,
+ BLIT_OP_CONJ_XOR,
+
+ /* user select coefficient manually */
+ BLIT_OP_USER_COEFF,
+
+ BLIT_OP_USER_SRC_GA,
+
+ /* Add new operation type here */
+
+ /* end of blit operation */
+ BLIT_OP_END,
+};
+#define MAX_FIMG2D_BLIT_OP (int)BLIT_OP_END
+
+#ifdef __KERNEL__
+
+/**
+ * @TMP: temporary buffer for 2-step blit at a single command
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum image_object {
+ IMAGE_SRC = 0,
+ IMAGE_MSK,
+ IMAGE_TMP,
+ IMAGE_DST,
+ IMAGE_END,
+};
+#define MAX_IMAGES IMAGE_END
+#define ISRC IMAGE_SRC
+#define IMSK IMAGE_MSK
+#define ITMP IMAGE_TMP
+#define IDST IMAGE_DST
+#define image_table(u) \
+ { \
+ (u)->src, \
+ (u)->msk, \
+ (u)->tmp, \
+ (u)->dst \
+ }
+
+/**
+ * @size: dma size of image
+ * @cached: cached dma size of image
+ */
+struct fimg2d_dma {
+ unsigned long addr;
+ size_t size;
+ size_t cached;
+};
+
+#endif /* __KERNEL__ */
+
+/**
+ * @start: start address or unique id of image
+ */
+struct fimg2d_addr {
+ enum addr_space type;
+ unsigned long start;
+};
+
+struct fimg2d_rect {
+ int x1;
+ int y1;
+ int x2; /* x1 + width */
+ int y2; /* y1 + height */
+};
+
+/**
+ * pixels can be different from src, dst or clip rect
+ */
+struct fimg2d_scale {
+ enum scaling mode;
+
+ /* ratio in pixels */
+ int src_w, src_h;
+ int dst_w, dst_h;
+};
+
+struct fimg2d_clip {
+ bool enable;
+ int x1;
+ int y1;
+ int x2; /* x1 + width */
+ int y2; /* y1 + height */
+};
+
+struct fimg2d_repeat {
+ enum repeat mode;
+ unsigned long pad_color;
+};
+
+/**
+ * @bg_color: bg_color is valid only if bluescreen mode is BLUSCR.
+ */
+struct fimg2d_bluscr {
+ enum bluescreen mode;
+ unsigned long bs_color;
+ unsigned long bg_color;
+};
+
+/**
+ * @plane2: address info for CbCr in YCbCr 2plane mode
+ * @rect: crop/clip rect
+ * @need_cacheopr: true if cache coherency is required
+ */
+struct fimg2d_image {
+ int width;
+ int height;
+ int stride;
+ enum pixel_order order;
+ enum color_format fmt;
+ struct fimg2d_addr addr;
+ struct fimg2d_addr plane2;
+ struct fimg2d_rect rect;
+ bool need_cacheopr;
+};
+
+/**
+ * @solid_color:
+ * src color instead of src image
+ * color format and order must be ARGB8888(A is MSB).
+ * @g_alpha: global(constant) alpha. 0xff is opaque, 0 is transparnet
+ * @dither: dithering
+ * @rotate: rotation degree in clockwise
+ * @premult: alpha premultiplied mode for read & write
+ * @scaling: common scaling info for src and mask image.
+ * @repeat: repeat type (tile mode)
+ * @bluscr: blue screen and transparent mode
+ * @clipping: clipping rect within dst rect
+ */
+struct fimg2d_param {
+ unsigned long solid_color;
+ unsigned char g_alpha;
+ bool dither;
+ enum rotation rotate;
+ enum premultiplied premult;
+ struct fimg2d_scale scaling;
+ struct fimg2d_repeat repeat;
+ struct fimg2d_bluscr bluscr;
+ struct fimg2d_clip clipping;
+};
+
+/**
+ * @op: blit operation mode
+ * @src: set when using src image
+ * @msk: set when using mask image
+ * @tmp: set when using 2-step blit at a single command
+ * @dst: dst must not be null
+ * * tmp image must be the same to dst except memory address
+ * @seq_no: user debugging info.
+ * for example, user can set sequence number or pid.
+ */
+struct fimg2d_blit {
+ enum blit_op op;
+ struct fimg2d_param param;
+ struct fimg2d_image *src;
+ struct fimg2d_image *msk;
+ struct fimg2d_image *tmp;
+ struct fimg2d_image *dst;
+ enum blit_sync sync;
+ unsigned int seq_no;
+};
+
+#ifdef __KERNEL__
+
+/**
+ * Enables definition to estimate performance.
+ * These debug codes includes printk, so perf
+ * data are unreliable under multi instance environment
+ */
+#undef PERF_PROFILE
+#define PERF_TIMEVAL
+
+enum perf_desc {
+ PERF_INNERCACHE,
+ PERF_OUTERCACHE,
+ PERF_BLIT,
+ PERF_END
+};
+#define MAX_PERF_DESCS PERF_END
+
+struct fimg2d_perf {
+ int valid;
+#ifdef PERF_TIMEVAL
+ struct timeval start;
+ struct timeval end;
+#else
+ unsigned long long start;
+ unsigned long long end;
+#endif
+};
+
+/**
+ * @pgd: base address of arm mmu pagetable
+ * @ncmd: request count in blit command queue
+ * @wait_q: conext wait queue head
+*/
+struct fimg2d_context {
+ struct mm_struct *mm;
+ atomic_t ncmd;
+ wait_queue_head_t wait_q;
+ struct fimg2d_perf perf[MAX_PERF_DESCS];
+};
+
+/**
+ * @op: blit operation mode
+ * @sync: sync/async blit mode (currently support sync mode only)
+ * @image: array of image object.
+ * [0] is for src image
+ * [1] is for mask image
+ * [2] is for temporary buffer
+ * set when using 2-step blit at a single command
+ * [3] is for dst, dst must not be null
+ * * tmp image must be the same to dst except memory address
+ * @seq_no: user debugging info.
+ * for example, user can set sequence number or pid.
+ * @dma_all: total dma size of src, msk, dst
+ * @dma: array of dma info for each src, msk, tmp and dst
+ * @ctx: context is created when user open fimg2d device.
+ * @node: list head of blit command queue
+ */
+struct fimg2d_bltcmd {
+ enum blit_op op;
+ enum blit_sync sync;
+ unsigned int seq_no;
+ size_t dma_all;
+ struct fimg2d_param param;
+ struct fimg2d_image image[MAX_IMAGES];
+ struct fimg2d_dma dma[MAX_IMAGES];
+ struct fimg2d_context *ctx;
+ struct list_head node;
+};
+
+/**
+ * @suspended: in suspend mode
+ * @clkon: power status for runtime pm
+ * @mem: resource platform device
+ * @regs: base address of hardware
+ * @dev: pointer to device struct
+ * @err: true if hardware is timed out while blitting
+ * @irq: irq number
+ * @nctx: context count
+ * @busy: 1 if hardware is running
+ * @bltlock: spinlock for blit
+ * @wait_q: blit wait queue head
+ * @cmd_q: blit command queue
+ * @workqueue: workqueue_struct for kfimg2dd
+*/
+struct fimg2d_control {
+ atomic_t suspended;
+ atomic_t clkon;
+ struct clk *clock;
+ struct device *dev;
+ struct device *bus_dev;
+ struct resource *mem;
+ void __iomem *regs;
+
+ bool err;
+ int irq;
+ atomic_t nctx;
+ atomic_t busy;
+ atomic_t active;
+ spinlock_t bltlock;
+ wait_queue_head_t wait_q;
+ struct list_head cmd_q;
+ struct workqueue_struct *work_q;
+
+ void (*blit)(struct fimg2d_control *info);
+ int (*configure)(struct fimg2d_control *info,
+ struct fimg2d_bltcmd *cmd);
+ void (*run)(struct fimg2d_control *info);
+ void (*stop)(struct fimg2d_control *info);
+ void (*dump)(struct fimg2d_control *info);
+ void (*finalize)(struct fimg2d_control *info);
+};
+
+int fimg2d_register_ops(struct fimg2d_control *info);
+
+#endif /* __KERNEL__ */
+
+#endif /* __FIMG2D_H__ */
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x.h b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x.h
new file mode 100644
index 0000000..9165d6f
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x.h
@@ -0,0 +1,225 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d4x.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D4X_H
+#define __FIMG2D4X_H __FILE__
+
+#include "fimg2d4x_regs.h"
+
+/**
+ * @IMG_MEMORY: read from external memory
+ * @IMG_FGCOLOR: read from foreground color
+ * @IMG_BGCOLOR: read from background color
+ */
+enum image_sel {
+ IMG_MEMORY,
+ IMG_FGCOLOR,
+ IMG_BGCOLOR,
+};
+
+/**
+ * @FORWARD_ADDRESSING: read data in forward direction
+ * @REVERSE_ADDRESSING: read data in reverse direction
+ */
+enum addressing {
+ FORWARD_ADDRESSING,
+ REVERSE_ADDRESSING,
+};
+
+/**
+ * The other addressing modes can cause data corruption,
+ * if src and dst are overlapped.
+ */
+enum dir_addressing {
+ UP_FORWARD,
+ DOWN_REVERSE,
+ LEFT_FORWARD,
+ RIGHT_REVERSE,
+ VALID_ADDRESSING_END,
+};
+
+/**
+ * DO NOT CHANGE THIS ORDER
+ */
+enum max_burst_len {
+ MAX_BURST_2 = 0,
+ MAX_BURST_4,
+ MAX_BURST_8, /* initial value */
+ MAX_BURST_16,
+};
+
+#define DEFAULT_MAX_BURST_LEN MAX_BURST_8
+
+/**
+ * mask operation type for 16-bpp, 32-bpp mask image
+ * @MSK_ALPHA: use mask alpha for src argb
+ * @MSK_ARGB: use mask argb for src argb
+ * @MSK_MIXED: use mask alpha for src alpha and mask rgb for src rgb
+ */
+enum mask_opr {
+ MSK_ALPHA, /* initial value */
+ MSK_ARGB,
+ MSK_MIXED,
+};
+
+#define DEFAULT_MSK_OPR MSK_ALPHA
+
+/**
+ * @ALPHA_PERPIXEL: perpixel alpha
+ * @ALPHA_PERPIXEL_SUM_GLOBAL: perpixel + global
+ * @ALPHA_PERPIXEL_MUL_GLOBAL: perpixel x global
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum alpha_opr {
+ ALPHA_PERPIXEL = 0, /* initial value */
+ ALPHA_PERPIXEL_SUM_GLOBAL,
+ ALPHA_PERPIXEL_MUL_GLOBAL,
+};
+
+#define DEFAULT_ALPHA_OPR ALPHA_PERPIXEL
+
+/**
+ * sampling policy at boundary for bilinear scaling
+ * @FOLLOW_REPEAT_MODE: sampling 1 or 2 pixels within bounds
+ * @IGNORE_REPEAT_MODE: sampling 4 pixels according to repeat mode
+ */
+enum boundary_sampling_policy {
+ FOLLOW_REPEAT_MODE,
+ IGNORE_REPEAT_MODE,
+};
+
+#define DEFAULT_BOUNDARY_SAMPLING FOLLOW_REPEAT_MODE
+
+/**
+ * @COEFF_ONE: 1
+ * @COEFF_ZERO: 0
+ * @COEFF_SA: src alpha
+ * @COEFF_SC: src color
+ * @COEFF_DA: dst alpha
+ * @COEFF_DC: dst color
+ * @COEFF_GA: global(constant) alpha
+ * @COEFF_GC: global(constant) color
+ * @COEFF_DISJ_S:
+ * @COEFF_DISJ_D:
+ * @COEFF_CONJ_S:
+ * @COEFF_CONJ_D:
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum fimg2d_coeff {
+ COEFF_ONE = 0,
+ COEFF_ZERO,
+ COEFF_SA,
+ COEFF_SC,
+ COEFF_DA,
+ COEFF_DC,
+ COEFF_GA,
+ COEFF_GC,
+ COEFF_DISJ_S,
+ COEFF_DISJ_D,
+ COEFF_CONJ_S,
+ COEFF_CONJ_D,
+};
+
+/**
+ * @PREMULT_ROUND_0: (A*B) >> 8
+ * @PREMULT_ROUND_1: (A+1)*B) >> 8
+ * @PREMULT_ROUND_2: (A+(A>>7))* B) >> 8
+ * @PREMULT_ROUND_3: TMP= A*8 + 0x80, (TMP + (TMP >> 8)) >> 8
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum premult_round {
+ PREMULT_ROUND_0 = 0,
+ PREMULT_ROUND_1,
+ PREMULT_ROUND_2,
+ PREMULT_ROUND_3, /* initial value */
+};
+
+#define DEFAULT_PREMULT_ROUND_MODE PREMULT_ROUND_3
+
+/**
+ * @BLEND_ROUND_0: (A+1)*B) >> 8
+ * @BLEND_ROUND_1: (A+(A>>7))* B) >> 8
+ * @BLEND_ROUND_2: TMP= A*8 + 0x80, (TMP + (TMP >> 8)) >> 8
+ * @BLEND_ROUND_3: TMP= (A*B + C*D + 0x80), (TMP + (TMP >> 8)) >> 8
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum blend_round {
+ BLEND_ROUND_0 = 0,
+ BLEND_ROUND_1,
+ BLEND_ROUND_2,
+ BLEND_ROUND_3, /* initial value */
+};
+
+#define DEFAULT_BLEND_ROUND_MODE BLEND_ROUND_3
+
+struct fimg2d_blend_coeff {
+ bool s_coeff_inv;
+ enum fimg2d_coeff s_coeff;
+ bool d_coeff_inv;
+ enum fimg2d_coeff d_coeff;
+};
+
+void fimg2d4x_reset(struct fimg2d_control *info);
+void fimg2d4x_enable_irq(struct fimg2d_control *info);
+void fimg2d4x_disable_irq(struct fimg2d_control *info);
+void fimg2d4x_clear_irq(struct fimg2d_control *info);
+int fimg2d4x_is_blit_done(struct fimg2d_control *info);
+int fimg2d4x_blit_done_status(struct fimg2d_control *info);
+void fimg2d4x_start_blit(struct fimg2d_control *info);
+void fimg2d4x_set_max_burst_length(struct fimg2d_control *info,
+ enum max_burst_len len);
+void fimg2d4x_set_src_type(struct fimg2d_control *info, enum image_sel type);
+void fimg2d4x_set_src_image(struct fimg2d_control *info,
+ struct fimg2d_image *s);
+void fimg2d4x_set_src_rect(struct fimg2d_control *info, struct fimg2d_rect *r);
+void fimg2d4x_set_dst_type(struct fimg2d_control *info, enum image_sel type);
+void fimg2d4x_set_dst_image(struct fimg2d_control *info,
+ struct fimg2d_image *d);
+void fimg2d4x_set_dst_rect(struct fimg2d_control *info, struct fimg2d_rect *r);
+void fimg2d4x_enable_msk(struct fimg2d_control *info);
+void fimg2d4x_set_msk_image(struct fimg2d_control *info,
+ struct fimg2d_image *m);
+void fimg2d4x_set_msk_rect(struct fimg2d_control *info, struct fimg2d_rect *r);
+void fimg2d4x_set_color_fill(struct fimg2d_control *info, unsigned long color);
+void fimg2d4x_set_premultiplied(struct fimg2d_control *info);
+void fimg2d4x_src_premultiply(struct fimg2d_control *info);
+void fimg2d4x_dst_premultiply(struct fimg2d_control *info);
+void fimg2d4x_dst_depremultiply(struct fimg2d_control *info);
+void fimg2d4x_enable_transparent(struct fimg2d_control *info);
+void fimg2d4x_set_bluescreen(struct fimg2d_control *info,
+ struct fimg2d_bluscr *bluscr);
+void fimg2d4x_enable_clipping(struct fimg2d_control *info,
+ struct fimg2d_clip *clp);
+void fimg2d4x_enable_dithering(struct fimg2d_control *info);
+void fimg2d4x_set_src_scaling(struct fimg2d_control *info,
+ struct fimg2d_scale *scl,
+ struct fimg2d_repeat *rep);
+void fimg2d4x_set_msk_scaling(struct fimg2d_control *info,
+ struct fimg2d_scale *scl,
+ struct fimg2d_repeat *rep);
+void fimg2d4x_set_src_repeat(struct fimg2d_control *info,
+ struct fimg2d_repeat *rep);
+void fimg2d4x_set_msk_repeat(struct fimg2d_control *info,
+ struct fimg2d_repeat *rep);
+void fimg2d4x_set_rotation(struct fimg2d_control *info, enum rotation rot);
+void fimg2d4x_set_fgcolor(struct fimg2d_control *info, unsigned long fg);
+void fimg2d4x_set_bgcolor(struct fimg2d_control *info, unsigned long bg);
+void fimg2d4x_enable_alpha(struct fimg2d_control *info, unsigned char g_alpha);
+void fimg2d4x_set_alpha_composite(struct fimg2d_control *info,
+ enum blit_op op, unsigned char g_alpha);
+void fimg2d4x_dump_regs(struct fimg2d_control *info);
+
+#endif /* __FIMG2D4X_H__ */
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_blt.c b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_blt.c
new file mode 100644
index 0000000..9a4f8ad
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_blt.c
@@ -0,0 +1,330 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d4x_blt.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <plat/s5p-sysmmu.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <plat/devs.h>
+#include <linux/pm_runtime.h>
+#endif
+#include "fimg2d.h"
+#include "fimg2d_clk.h"
+#include "fimg2d4x.h"
+#include "fimg2d_ctx.h"
+#include "fimg2d_cache.h"
+#include "fimg2d_helper.h"
+
+#define BLIT_TIMEOUT msecs_to_jiffies(500)
+
+static inline void fimg2d4x_blit_wait(struct fimg2d_control *info, struct fimg2d_bltcmd *cmd)
+{
+ if (!wait_event_timeout(info->wait_q, !atomic_read(&info->busy), BLIT_TIMEOUT)) {
+ printk(KERN_ERR "[%s] blit wait timeout\n", __func__);
+ fimg2d_dump_command(cmd);
+
+ if (!fimg2d4x_blit_done_status(info))
+ info->err = true; /* device error */
+ }
+}
+
+static void fimg2d4x_pre_bitblt(struct fimg2d_control *info, struct fimg2d_bltcmd *cmd)
+{
+ /* TODO */
+}
+
+void fimg2d4x_bitblt(struct fimg2d_control *info)
+{
+ struct fimg2d_context *ctx;
+ struct fimg2d_bltcmd *cmd;
+ unsigned long *pgd;
+ int ret;
+
+ fimg2d_debug("enter blitter\n");
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_get_sync(info->dev);
+ fimg2d_debug("pm_runtime_get_sync\n");
+#endif
+ fimg2d_clk_on(info);
+
+ while (1) {
+ cmd = fimg2d_get_first_command(info);
+ if (!cmd) {
+ spin_lock(&info->bltlock);
+ atomic_set(&info->active, 0);
+ spin_unlock(&info->bltlock);
+ break;
+ }
+
+ ctx = cmd->ctx;
+ if (info->err) {
+ printk(KERN_ERR "[%s] device error\n", __func__);
+ goto blitend;
+ }
+
+ atomic_set(&info->busy, 1);
+
+ ret = info->configure(info, cmd);
+ if (ret)
+ goto blitend;
+
+ if (cmd->image[IDST].addr.type != ADDR_PHYS) {
+ pgd = (unsigned long *)ctx->mm->pgd;
+ s5p_sysmmu_enable(info->dev, (unsigned long)virt_to_phys(pgd));
+ fimg2d_debug("sysmmu enable: pgd %p ctx %p seq_no(%u)\n",
+ pgd, ctx, cmd->seq_no);
+ }
+
+ fimg2d4x_pre_bitblt(info, cmd);
+
+#ifdef PERF_PROFILE
+ perf_start(cmd->ctx, PERF_BLIT);
+#endif
+ /* start blit */
+ info->run(info);
+ fimg2d4x_blit_wait(info, cmd);
+
+#ifdef PERF_PROFILE
+ perf_end(cmd->ctx, PERF_BLIT);
+#endif
+ if (cmd->image[IDST].addr.type != ADDR_PHYS) {
+ s5p_sysmmu_disable(info->dev);
+ fimg2d_debug("sysmmu disable\n");
+ }
+blitend:
+ spin_lock(&info->bltlock);
+ fimg2d_dequeue(&cmd->node);
+ kfree(cmd);
+ atomic_dec(&ctx->ncmd);
+
+ /* wake up context */
+ if (!atomic_read(&ctx->ncmd))
+ wake_up(&ctx->wait_q);
+ spin_unlock(&info->bltlock);
+ }
+
+ fimg2d_clk_off(info);
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_put_sync(info->dev);
+ fimg2d_debug("pm_runtime_put_sync\n");
+#endif
+
+ fimg2d_debug("exit blitter\n");
+}
+
+static int fast_op(struct fimg2d_bltcmd *cmd)
+{
+ int sa, da, ga;
+ int fop = cmd->op;
+ struct fimg2d_image *src, *msk, *dst;
+ struct fimg2d_param *p = &cmd->param;
+
+ src = &cmd->image[ISRC];
+ msk = &cmd->image[IMSK];
+ dst = &cmd->image[IDST];
+
+ if (msk->addr.type)
+ return fop;
+
+ ga = p->g_alpha;
+ da = is_opaque(dst->fmt) ? 0xff : 0;
+
+ if (!src->addr.type)
+ sa = (p->solid_color >> 24) & 0xff;
+ else
+ sa = is_opaque(src->fmt) ? 0xff : 0;
+
+ switch (cmd->op) {
+ case BLIT_OP_SRC_OVER:
+ /* Sc + (1-Sa)*Dc = Sc */
+ if (sa == 0xff && ga == 0xff)
+ fop = BLIT_OP_SRC;
+ break;
+ case BLIT_OP_DST_OVER:
+ /* (1-Da)*Sc + Dc = Dc */
+ if (da == 0xff)
+ fop = BLIT_OP_DST; /* nop */
+ break;
+ case BLIT_OP_SRC_IN:
+ /* Da*Sc = Sc */
+ if (da == 0xff)
+ fop = BLIT_OP_SRC;
+ break;
+ case BLIT_OP_DST_IN:
+ /* Sa*Dc = Dc */
+ if (sa == 0xff && ga == 0xff)
+ fop = BLIT_OP_DST; /* nop */
+ break;
+ case BLIT_OP_SRC_OUT:
+ /* (1-Da)*Sc = 0 */
+ if (da == 0xff)
+ fop = BLIT_OP_CLR;
+ break;
+ case BLIT_OP_DST_OUT:
+ /* (1-Sa)*Dc = 0 */
+ if (sa == 0xff && ga == 0xff)
+ fop = BLIT_OP_CLR;
+ break;
+ case BLIT_OP_SRC_ATOP:
+ /* Da*Sc + (1-Sa)*Dc = Sc */
+ if (sa == 0xff && da == 0xff && ga == 0xff)
+ fop = BLIT_OP_SRC;
+ break;
+ case BLIT_OP_DST_ATOP:
+ /* (1-Da)*Sc + Sa*Dc = Dc */
+ if (sa == 0xff && da == 0xff && ga == 0xff)
+ fop = BLIT_OP_DST; /* nop */
+ break;
+ default:
+ break;
+ }
+
+ if (fop == BLIT_OP_SRC && !src->addr.type && ga == 0xff)
+ fop = BLIT_OP_SOLID_FILL;
+
+ return fop;
+}
+
+static int fimg2d4x_configure(struct fimg2d_control *info,
+ struct fimg2d_bltcmd *cmd)
+{
+ int op;
+ enum image_sel srcsel, dstsel;
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *src, *msk, *dst;
+
+ fimg2d_debug("ctx %p seq_no(%u)\n", cmd->ctx, cmd->seq_no);
+
+ src = &cmd->image[ISRC];
+ msk = &cmd->image[IMSK];
+ dst = &cmd->image[IDST];
+
+ /* TODO: batch blit */
+ fimg2d4x_reset(info);
+
+ /* src and dst select */
+ srcsel = dstsel = IMG_MEMORY;
+
+ op = fast_op(cmd);
+
+ switch (op) {
+ case BLIT_OP_SOLID_FILL:
+ srcsel = dstsel = IMG_FGCOLOR;
+ fimg2d4x_set_fgcolor(info, p->solid_color);
+ break;
+ case BLIT_OP_CLR:
+ srcsel = dstsel = IMG_FGCOLOR;
+ fimg2d4x_set_color_fill(info, 0);
+ break;
+ case BLIT_OP_DST:
+ return -1; /* nop */
+ default:
+ if (!src->addr.type) {
+ srcsel = IMG_FGCOLOR;
+ fimg2d4x_set_fgcolor(info, p->solid_color);
+ }
+
+ if (op == BLIT_OP_SRC)
+ dstsel = IMG_FGCOLOR;
+
+ fimg2d4x_enable_alpha(info, p->g_alpha);
+ fimg2d4x_set_alpha_composite(info, op, p->g_alpha);
+ if (p->premult == NON_PREMULTIPLIED)
+ fimg2d4x_set_premultiplied(info);
+ break;
+ }
+
+ fimg2d4x_set_src_type(info, srcsel);
+ fimg2d4x_set_dst_type(info, dstsel);
+
+ /* src */
+ if (src->addr.type) {
+ fimg2d4x_set_src_image(info, src);
+ fimg2d4x_set_src_rect(info, &src->rect);
+ fimg2d4x_set_src_repeat(info, &p->repeat);
+ if (p->scaling.mode)
+ fimg2d4x_set_src_scaling(info, &p->scaling, &p->repeat);
+ }
+
+ /* msk */
+ if (msk->addr.type) {
+ fimg2d4x_enable_msk(info);
+ fimg2d4x_set_msk_image(info, msk);
+ fimg2d4x_set_msk_rect(info, &msk->rect);
+ fimg2d4x_set_msk_repeat(info, &p->repeat);
+ if (p->scaling.mode)
+ fimg2d4x_set_msk_scaling(info, &p->scaling, &p->repeat);
+ }
+
+ /* dst */
+ if (dst->addr.type) {
+ fimg2d4x_set_dst_image(info, dst);
+ fimg2d4x_set_dst_rect(info, &dst->rect);
+ if (p->clipping.enable)
+ fimg2d4x_enable_clipping(info, &p->clipping);
+ }
+
+ /* bluescreen */
+ if (p->bluscr.mode)
+ fimg2d4x_set_bluescreen(info, &p->bluscr);
+
+ /* rotation */
+ if (p->rotate)
+ fimg2d4x_set_rotation(info, p->rotate);
+
+ /* dithering */
+ if (p->dither)
+ fimg2d4x_enable_dithering(info);
+
+ return 0;
+}
+
+static void fimg2d4x_run(struct fimg2d_control *info)
+{
+ fimg2d_debug("start blit\n");
+ fimg2d4x_enable_irq(info);
+ fimg2d4x_clear_irq(info);
+ fimg2d4x_start_blit(info);
+}
+
+static void fimg2d4x_stop(struct fimg2d_control *info)
+{
+ if (fimg2d4x_is_blit_done(info)) {
+ fimg2d_debug("blit done\n");
+ fimg2d4x_disable_irq(info);
+ fimg2d4x_clear_irq(info);
+ atomic_set(&info->busy, 0);
+ wake_up(&info->wait_q);
+ }
+}
+
+static void fimg2d4x_dump(struct fimg2d_control *info)
+{
+ fimg2d4x_dump_regs(info);
+}
+
+int fimg2d_register_ops(struct fimg2d_control *info)
+{
+ info->blit = fimg2d4x_bitblt;
+ info->configure = fimg2d4x_configure;
+ info->run = fimg2d4x_run;
+ info->dump = fimg2d4x_dump;
+ info->stop = fimg2d4x_stop;
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_hw.c b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_hw.c
new file mode 100644
index 0000000..8135ecd
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_hw.c
@@ -0,0 +1,839 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d4x_hw.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/sched.h>
+
+#include "fimg2d.h"
+#include "fimg2d4x.h"
+#include "fimg2d_clk.h"
+
+#define wr(d, a) writel((d), info->regs + (a))
+#define rd(a) readl(info->regs + (a))
+
+#undef SOFT_RESET_ENABLED
+#undef FIMG2D_RESET_WA
+
+static const int a8_rgbcolor = (int)0x0;
+static const int msk_oprmode = (int)MSK_ARGB;
+static const int premult_round_mode = (int)PREMULT_ROUND_1; /* (A+1)*B) >> 8 */
+static const int blend_round_mode = (int)BLEND_ROUND_0; /* (A+1)*B) >> 8 */
+
+void fimg2d4x_reset(struct fimg2d_control *info)
+{
+#ifdef SOFT_RESET_ENABLED
+#ifdef FIMG2D_RESET_WA
+ fimg2d_clk_save(info);
+#endif
+ wr(FIMG2D_SOFT_RESET, FIMG2D_SOFT_RESET_REG);
+#ifdef FIMG2D_RESET_WA
+ fimg2d_clk_restore(info);
+#endif
+#else
+ wr(FIMG2D_SFR_CLEAR, FIMG2D_SOFT_RESET_REG);
+#endif
+ /* turn off wince option */
+ wr(0x0, FIMG2D_BLEND_FUNCTION_REG);
+
+ /* set default repeat mode to reflect(mirror) */
+ wr(FIMG2D_SRC_REPEAT_REFLECT, FIMG2D_SRC_REPEAT_MODE_REG);
+ wr(FIMG2D_MSK_REPEAT_REFLECT, FIMG2D_MSK_REPEAT_MODE_REG);
+}
+
+void fimg2d4x_enable_irq(struct fimg2d_control *info)
+{
+ wr(FIMG2D_BLIT_INT_ENABLE, FIMG2D_INTEN_REG);
+}
+
+void fimg2d4x_disable_irq(struct fimg2d_control *info)
+{
+ wr(0, FIMG2D_INTEN_REG);
+}
+
+void fimg2d4x_clear_irq(struct fimg2d_control *info)
+{
+ wr(FIMG2D_BLIT_INT_FLAG, FIMG2D_INTC_PEND_REG);
+}
+
+int fimg2d4x_is_blit_done(struct fimg2d_control *info)
+{
+ return rd(FIMG2D_INTC_PEND_REG) & FIMG2D_BLIT_INT_FLAG;
+}
+
+int fimg2d4x_blit_done_status(struct fimg2d_control *info)
+{
+ volatile unsigned long sts;
+
+ /* read twice */
+ sts = rd(FIMG2D_FIFO_STAT_REG);
+ sts = rd(FIMG2D_FIFO_STAT_REG);
+
+ return (int)(sts & FIMG2D_BLIT_FINISHED);
+}
+
+void fimg2d4x_start_blit(struct fimg2d_control *info)
+{
+ wr(FIMG2D_START_BITBLT, FIMG2D_BITBLT_START_REG);
+}
+
+void fimg2d4x_set_max_burst_length(struct fimg2d_control *info, enum max_burst_len len)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_AXI_MODE_REG);
+
+ cfg &= ~FIMG2D_MAX_BURST_LEN_MASK;
+ cfg |= len << FIMG2D_MAX_BURST_LEN_SHIFT;
+}
+
+void fimg2d4x_set_src_type(struct fimg2d_control *info, enum image_sel type)
+{
+ unsigned long cfg;
+
+ if (type == IMG_MEMORY)
+ cfg = FIMG2D_IMAGE_TYPE_MEMORY;
+ else if (type == IMG_FGCOLOR)
+ cfg = FIMG2D_IMAGE_TYPE_FGCOLOR;
+ else
+ cfg = FIMG2D_IMAGE_TYPE_BGCOLOR;
+
+ wr(cfg, FIMG2D_SRC_SELECT_REG);
+}
+
+void fimg2d4x_set_src_image(struct fimg2d_control *info, struct fimg2d_image *s)
+{
+ unsigned long cfg;
+
+ wr(FIMG2D_ADDR(s->addr.start), FIMG2D_SRC_BASE_ADDR_REG);
+ wr(FIMG2D_STRIDE(s->stride), FIMG2D_SRC_STRIDE_REG);
+
+ if (s->order < ARGB_ORDER_END) { /* argb */
+ cfg = s->order << FIMG2D_RGB_ORDER_SHIFT;
+ if (s->fmt == CF_A8)
+ wr(a8_rgbcolor, FIMG2D_SRC_A8_RGB_EXT_REG);
+ } else if (s->order < P1_ORDER_END) { /* YCbC1 1plane */
+ cfg = (s->order - P1_CRY1CBY0) << FIMG2D_YCBCR_ORDER_SHIFT;
+ } else { /* YCbCr 2plane */
+ cfg = (s->order - P2_CRCB) << FIMG2D_YCBCR_ORDER_SHIFT;
+ cfg |= FIMG2D_YCBCR_2PLANE;
+
+ wr(FIMG2D_ADDR(s->plane2.start),
+ FIMG2D_SRC_PLANE2_BASE_ADDR_REG);
+ }
+
+ cfg |= s->fmt << FIMG2D_COLOR_FORMAT_SHIFT;
+
+ wr(cfg, FIMG2D_SRC_COLOR_MODE_REG);
+}
+
+void fimg2d4x_set_src_rect(struct fimg2d_control *info, struct fimg2d_rect *r)
+{
+ wr(FIMG2D_OFFSET(r->x1, r->y1), FIMG2D_SRC_LEFT_TOP_REG);
+ wr(FIMG2D_OFFSET(r->x2, r->y2), FIMG2D_SRC_RIGHT_BOTTOM_REG);
+}
+
+void fimg2d4x_set_dst_type(struct fimg2d_control *info, enum image_sel type)
+{
+ unsigned long cfg;
+
+ if (type == IMG_MEMORY)
+ cfg = FIMG2D_IMAGE_TYPE_MEMORY;
+ else if (type == IMG_FGCOLOR)
+ cfg = FIMG2D_IMAGE_TYPE_FGCOLOR;
+ else
+ cfg = FIMG2D_IMAGE_TYPE_BGCOLOR;
+
+ wr(cfg, FIMG2D_DST_SELECT_REG);
+}
+
+/**
+ * @d: set base address, stride, color format, order
+ */
+void fimg2d4x_set_dst_image(struct fimg2d_control *info, struct fimg2d_image *d)
+{
+ unsigned long cfg;
+
+ wr(FIMG2D_ADDR(d->addr.start), FIMG2D_DST_BASE_ADDR_REG);
+ wr(FIMG2D_STRIDE(d->stride), FIMG2D_DST_STRIDE_REG);
+
+ if (d->order < ARGB_ORDER_END) {
+ cfg = d->order << FIMG2D_RGB_ORDER_SHIFT;
+ if (d->fmt == CF_A8)
+ wr(a8_rgbcolor, FIMG2D_DST_A8_RGB_EXT_REG);
+ } else if (d->order < P1_ORDER_END) {
+ cfg = (d->order - P1_CRY1CBY0) << FIMG2D_YCBCR_ORDER_SHIFT;
+ } else {
+ cfg = (d->order - P2_CRCB) << FIMG2D_YCBCR_ORDER_SHIFT;
+ cfg |= FIMG2D_YCBCR_2PLANE;
+
+ wr(FIMG2D_ADDR(d->plane2.start),
+ FIMG2D_DST_PLANE2_BASE_ADDR_REG);
+ }
+
+ cfg |= d->fmt << FIMG2D_COLOR_FORMAT_SHIFT;
+
+ wr(cfg, FIMG2D_DST_COLOR_MODE_REG);
+}
+
+void fimg2d4x_set_dst_rect(struct fimg2d_control *info, struct fimg2d_rect *r)
+{
+ wr(FIMG2D_OFFSET(r->x1, r->y1), FIMG2D_DST_LEFT_TOP_REG);
+ wr(FIMG2D_OFFSET(r->x2, r->y2), FIMG2D_DST_RIGHT_BOTTOM_REG);
+}
+
+void fimg2d4x_enable_msk(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_ENABLE_NORMAL_MSK;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+void fimg2d4x_set_msk_image(struct fimg2d_control *info, struct fimg2d_image *m)
+{
+ unsigned long cfg;
+
+ wr(FIMG2D_ADDR(m->addr.start), FIMG2D_MSK_BASE_ADDR_REG);
+ wr(FIMG2D_STRIDE(m->stride), FIMG2D_MSK_STRIDE_REG);
+
+ cfg = m->order << FIMG2D_MSK_ORDER_SHIFT;
+ cfg |= (m->fmt - CF_MSK_1BIT) << FIMG2D_MSK_FORMAT_SHIFT;
+
+ /* 16, 32bit mask only */
+ if (m->fmt >= CF_MSK_16BIT_565) {
+ if (msk_oprmode == MSK_ALPHA)
+ cfg |= FIMG2D_MSK_TYPE_ALPHA;
+ else if (msk_oprmode == MSK_ARGB)
+ cfg |= FIMG2D_MSK_TYPE_ARGB;
+ else
+ cfg |= FIMG2D_MSK_TYPE_MIXED;
+ }
+
+ wr(cfg, FIMG2D_MSK_MODE_REG);
+}
+
+void fimg2d4x_set_msk_rect(struct fimg2d_control *info, struct fimg2d_rect *r)
+{
+ wr(FIMG2D_OFFSET(r->x1, r->y1), FIMG2D_MSK_LEFT_TOP_REG);
+ wr(FIMG2D_OFFSET(r->x2, r->y2), FIMG2D_MSK_RIGHT_BOTTOM_REG);
+}
+
+/**
+ * If solid color fill is enabled, other blit command is ignored.
+ * Color format of solid color is considered to be
+ * the same as destination color format
+ * Channel order of solid color is A-R-G-B or Y-Cb-Cr
+ */
+void fimg2d4x_set_color_fill(struct fimg2d_control *info, unsigned long color)
+{
+ wr(FIMG2D_SOLID_FILL, FIMG2D_BITBLT_COMMAND_REG);
+
+ /* sf color */
+ wr(color, FIMG2D_SF_COLOR_REG);
+
+ /* set 16 burst for performance */
+ fimg2d4x_set_max_burst_length(info, MAX_BURST_16);
+}
+
+/**
+ * set alpha-multiply mode for src, dst, pat read (pre-bitblt)
+ * set alpha-demultiply for dst write (post-bitblt)
+ */
+void fimg2d4x_set_premultiplied(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_PREMULT_ALL;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+void fimg2d4x_src_premultiply(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_SRC_PREMULT;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+void fimg2d4x_dst_premultiply(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_DST_RD_PREMULT;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+void fimg2d4x_dst_depremultiply(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_DST_WR_DEPREMULT;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+/**
+ * set transp/bluscr mode, bs color, bg color
+ */
+void fimg2d4x_set_bluescreen(struct fimg2d_control *info,
+ struct fimg2d_bluscr *bluscr)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+
+ if (bluscr->mode == TRANSP)
+ cfg |= FIMG2D_TRANSP_MODE;
+ else if (bluscr->mode == BLUSCR)
+ cfg |= FIMG2D_BLUSCR_MODE;
+ else /* opaque: initial value */
+ return;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+
+ /* bs color */
+ if (bluscr->bs_color)
+ wr(bluscr->bs_color, FIMG2D_BS_COLOR_REG);
+
+ /* bg color */
+ if (bluscr->mode == BLUSCR && bluscr->bg_color)
+ wr(bluscr->bg_color, FIMG2D_BG_COLOR_REG);
+}
+
+/**
+ * @c: destination clipping region
+ */
+void fimg2d4x_enable_clipping(struct fimg2d_control *info,
+ struct fimg2d_clip *clp)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_ENABLE_CW;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+
+ wr(FIMG2D_OFFSET(clp->x1, clp->y1), FIMG2D_CW_LT_REG);
+ wr(FIMG2D_OFFSET(clp->x2, clp->y2), FIMG2D_CW_RB_REG);
+}
+
+void fimg2d4x_enable_dithering(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_ENABLE_DITHER;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+#define MAX_PRECISION 16
+#define DEFAULT_SCALE_RATIO 0x10000
+
+/**
+ * scale_factor_to_fixed16 - convert scale factor to fixed pint 16
+ * @n: numerator
+ * @d: denominator
+ */
+static inline unsigned long scale_factor_to_fixed16(int n, int d)
+{
+ int i;
+ u32 fixed16;
+
+ if (!d)
+ return DEFAULT_SCALE_RATIO;
+
+ fixed16 = (n/d) << 16;
+ n %= d;
+
+ for (i = 0; i < MAX_PRECISION; i++) {
+ if (!n)
+ break;
+ n <<= 1;
+ if (n/d)
+ fixed16 |= 1 << (15-i);
+ n %= d;
+ }
+
+ return fixed16;
+}
+
+void fimg2d4x_set_src_scaling(struct fimg2d_control *info,
+ struct fimg2d_scale *scl,
+ struct fimg2d_repeat *rep)
+{
+ unsigned long wcfg, hcfg;
+ unsigned long mode;
+
+ /*
+ * scaling ratio in pixels
+ * e.g scale-up: src(1,1)-->dst(2,2), src factor: 0.5 (0x000080000)
+ * scale-down: src(2,2)-->dst(1,1), src factor: 2.0 (0x000200000)
+ */
+
+ /* inversed scaling factor: src is numerator */
+ wcfg = scale_factor_to_fixed16(scl->src_w, scl->dst_w);
+ hcfg = scale_factor_to_fixed16(scl->src_h, scl->dst_h);
+
+ if (wcfg == DEFAULT_SCALE_RATIO && hcfg == DEFAULT_SCALE_RATIO)
+ return;
+
+ wr(wcfg, FIMG2D_SRC_XSCALE_REG);
+ wr(hcfg, FIMG2D_SRC_YSCALE_REG);
+
+ /* scaling algorithm */
+ if (scl->mode == SCALING_NEAREST)
+ mode = FIMG2D_SCALE_MODE_NEAREST;
+ else {
+ /* 0x3: ignore repeat mode at boundary */
+ if (rep->mode == REPEAT_PAD || rep->mode == REPEAT_CLAMP)
+ mode = 0x3; /* hidden */
+ else
+ mode = FIMG2D_SCALE_MODE_BILINEAR;
+ }
+
+ wr(mode, FIMG2D_SRC_SCALE_CTRL_REG);
+}
+
+void fimg2d4x_set_msk_scaling(struct fimg2d_control *info,
+ struct fimg2d_scale *scl,
+ struct fimg2d_repeat *rep)
+{
+ unsigned long wcfg, hcfg;
+ unsigned long mode;
+
+ /*
+ * scaling ratio in pixels
+ * e.g scale-up: src(1,1)-->dst(2,2), msk factor: 0.5 (0x000080000)
+ * scale-down: src(2,2)-->dst(1,1), msk factor: 2.0 (0x000200000)
+ */
+
+ /* inversed scaling factor: src is numerator */
+ wcfg = scale_factor_to_fixed16(scl->src_w, scl->dst_w);
+ hcfg = scale_factor_to_fixed16(scl->src_h, scl->dst_h);
+
+ if (wcfg == DEFAULT_SCALE_RATIO && hcfg == DEFAULT_SCALE_RATIO)
+ return;
+
+ wr(wcfg, FIMG2D_MSK_XSCALE_REG);
+ wr(hcfg, FIMG2D_MSK_YSCALE_REG);
+
+ /* scaling algorithm */
+ if (scl->mode == SCALING_NEAREST)
+ mode = FIMG2D_SCALE_MODE_NEAREST;
+ else {
+ /* 0x3: ignore repeat mode at boundary */
+ if (rep->mode == REPEAT_PAD || rep->mode == REPEAT_CLAMP)
+ mode = 0x3; /* hidden */
+ else
+ mode = FIMG2D_SCALE_MODE_BILINEAR;
+ }
+
+ wr(mode, FIMG2D_MSK_SCALE_CTRL_REG);
+}
+
+void fimg2d4x_set_src_repeat(struct fimg2d_control *info,
+ struct fimg2d_repeat *rep)
+{
+ unsigned long cfg;
+
+ if (rep->mode == NO_REPEAT)
+ return;
+
+ cfg = (rep->mode - REPEAT_NORMAL) << FIMG2D_SRC_REPEAT_SHIFT;
+
+ wr(cfg, FIMG2D_SRC_REPEAT_MODE_REG);
+
+ /* src pad color */
+ if (rep->mode == REPEAT_PAD)
+ wr(rep->pad_color, FIMG2D_SRC_PAD_VALUE_REG);
+}
+
+void fimg2d4x_set_msk_repeat(struct fimg2d_control *info,
+ struct fimg2d_repeat *rep)
+{
+ unsigned long cfg;
+
+ if (rep->mode == NO_REPEAT)
+ return;
+
+ cfg = (rep->mode - REPEAT_NORMAL) << FIMG2D_MSK_REPEAT_SHIFT;
+
+ wr(cfg, FIMG2D_MSK_REPEAT_MODE_REG);
+
+ /* mask pad color */
+ if (rep->mode == REPEAT_PAD)
+ wr(rep->pad_color, FIMG2D_MSK_PAD_VALUE_REG);
+}
+
+void fimg2d4x_set_rotation(struct fimg2d_control *info, enum rotation rot)
+{
+ int rev_rot90; /* counter clockwise, 4.1 specific */
+ unsigned long cfg;
+ enum addressing dirx, diry;
+
+ rev_rot90 = 0;
+ dirx = diry = FORWARD_ADDRESSING;
+
+ switch (rot) {
+ case ROT_90: /* -270 degree */
+ rev_rot90 = 1; /* fall through */
+ case ROT_180:
+ dirx = REVERSE_ADDRESSING;
+ diry = REVERSE_ADDRESSING;
+ break;
+ case ROT_270: /* -90 degree */
+ rev_rot90 = 1;
+ break;
+ case XFLIP:
+ diry = REVERSE_ADDRESSING;
+ break;
+ case YFLIP:
+ dirx = REVERSE_ADDRESSING;
+ break;
+ case ORIGIN:
+ default:
+ break;
+ }
+
+ /* destination direction */
+ if (dirx == REVERSE_ADDRESSING || diry == REVERSE_ADDRESSING) {
+ cfg = rd(FIMG2D_DST_PAT_DIRECT_REG);
+
+ if (dirx == REVERSE_ADDRESSING)
+ cfg |= FIMG2D_DST_X_DIR_NEGATIVE;
+
+ if (diry == REVERSE_ADDRESSING)
+ cfg |= FIMG2D_DST_Y_DIR_NEGATIVE;
+
+ wr(cfg, FIMG2D_DST_PAT_DIRECT_REG);
+ }
+
+ /* rotation -90 */
+ if (rev_rot90) {
+ cfg = rd(FIMG2D_ROTATE_REG);
+ cfg |= FIMG2D_SRC_ROTATE_90;
+ cfg |= FIMG2D_MSK_ROTATE_90;
+
+ wr(cfg, FIMG2D_ROTATE_REG);
+ }
+}
+
+void fimg2d4x_set_fgcolor(struct fimg2d_control *info, unsigned long fg)
+{
+ wr(fg, FIMG2D_FG_COLOR_REG);
+}
+
+void fimg2d4x_set_bgcolor(struct fimg2d_control *info, unsigned long bg)
+{
+ wr(bg, FIMG2D_BG_COLOR_REG);
+}
+
+void fimg2d4x_enable_alpha(struct fimg2d_control *info, unsigned char g_alpha)
+{
+ unsigned long cfg;
+
+ /* enable alpha */
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_ALPHA_BLEND_MODE;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+
+ /*
+ * global(constant) alpha
+ * ex. if global alpha is 0x80, must set 0x80808080
+ */
+ cfg = g_alpha;
+ cfg |= g_alpha << 8;
+ cfg |= g_alpha << 16;
+ cfg |= g_alpha << 24;
+ wr(cfg, FIMG2D_ALPHA_REG);
+}
+
+/**
+ * Four channels of the image are computed with:
+ * R = [ coeff(S)*Sc + coeff(D)*Dc ]
+ * where
+ * Rc is result color or alpha
+ * Sc is source color or alpha
+ * Dc is destination color or alpha
+ *
+ * Caution: supposed that Sc and Dc are perpixel-alpha-premultiplied value
+ *
+ * MODE: Formula
+ * ----------------------------------------------------------------------------
+ * FILL:
+ * CLEAR: R = 0
+ * SRC: R = Sc
+ * DST: R = Dc
+ * SRC_OVER: R = Sc + (1-Sa)*Dc
+ * DST_OVER: R = (1-Da)*Sc + Dc
+ * SRC_IN: R = Da*Sc
+ * DST_IN: R = Sa*Dc
+ * SRC_OUT: R = (1-Da)*Sc
+ * DST_OUT: R = (1-Sa)*Dc
+ * SRC_ATOP: R = Da*Sc + (1-Sa)*Dc
+ * DST_ATOP: R = (1-Da)*Sc + Sa*Dc
+ * XOR: R = (1-Da)*Sc + (1-Sa)*Dc
+ * ADD: R = Sc + Dc
+ * MULTIPLY: R = Sc*Dc
+ * SCREEN: R = Sc + (1-Sc)*Dc
+ * DARKEN: R = (Da*Sc<Sa*Dc)? Sc+(1-Sa)*Dc : (1-Da)*Sc+Dc
+ * LIGHTEN: R = (Da*Sc>Sa*Dc)? Sc+(1-Sa)*Dc : (1-Da)*Sc+Dc
+ * DISJ_SRC_OVER: R = Sc + (min(1,(1-Sa)/Da))*Dc
+ * DISJ_DST_OVER: R = (min(1,(1-Da)/Sa))*Sc + Dc
+ * DISJ_SRC_IN: R = (max(1-(1-Da)/Sa,0))*Sc
+ * DISJ_DST_IN: R = (max(1-(1-Sa)/Da,0))*Dc
+ * DISJ_SRC_OUT: R = (min(1,(1-Da)/Sa))*Sc
+ * DISJ_DST_OUT: R = (min(1,(1-Sa)/Da))*Dc
+ * DISJ_SRC_ATOP: R = (max(1-(1-Da)/Sa,0))*Sc + (min(1,(1-Sa)/Da))*Dc
+ * DISJ_DST_ATOP: R = (min(1,(1-Da)/Sa))*Sc + (max(1-(1-Sa)/Da,0))*Dc
+ * DISJ_XOR: R = (min(1,(1-Da)/Sa))*Sc + (min(1,(1-Sa)/Da))*Dc
+ * CONJ_SRC_OVER: R = Sc + (max(1-Sa/Da,0))*Dc
+ * CONJ_DST_OVER: R = (max(1-Da/Sa,0))*Sc + Dc
+ * CONJ_SRC_IN: R = (min(1,Da/Sa))*Sc
+ * CONJ_DST_IN: R = (min(1,Sa/Da))*Dc
+ * CONJ_SRC_OUT: R = (max(1-Da/Sa,0)*Sc
+ * CONJ_DST_OUT: R = (max(1-Sa/Da,0))*Dc
+ * CONJ_SRC_ATOP: R = (min(1,Da/Sa))*Sc + (max(1-Sa/Da,0))*Dc
+ * CONJ_DST_ATOP: R = (max(1-Da/Sa,0))*Sc + (min(1,Sa/Da))*Dc
+ * CONJ_XOR: R = (max(1-Da/Sa,0))*Sc + (max(1-Sa/Da,0))*Dc
+ */
+static struct fimg2d_blend_coeff const coeff_table[MAX_FIMG2D_BLIT_OP] = {
+ { 0, 0, 0, 0 }, /* FILL */
+ { 0, COEFF_ZERO, 0, COEFF_ZERO }, /* CLEAR */
+ { 0, COEFF_ONE, 0, COEFF_ZERO }, /* SRC */
+ { 0, COEFF_ZERO, 0, COEFF_ONE }, /* DST */
+ { 0, COEFF_ONE, 1, COEFF_SA }, /* SRC_OVER */
+ { 1, COEFF_DA, 0, COEFF_ONE }, /* DST_OVER */
+ { 0, COEFF_DA, 0, COEFF_ZERO }, /* SRC_IN */
+ { 0, COEFF_ZERO, 0, COEFF_SA }, /* DST_IN */
+ { 1, COEFF_DA, 0, COEFF_ZERO }, /* SRC_OUT */
+ { 0, COEFF_ZERO, 1, COEFF_SA }, /* DST_OUT */
+ { 0, COEFF_DA, 1, COEFF_SA }, /* SRC_ATOP */
+ { 1, COEFF_DA, 0, COEFF_SA }, /* DST_ATOP */
+ { 1, COEFF_DA, 1, COEFF_SA }, /* XOR */
+ { 0, COEFF_ONE, 0, COEFF_ONE }, /* ADD */
+ { 0, COEFF_DC, 0, COEFF_ZERO }, /* MULTIPLY */
+ { 0, COEFF_ONE, 1, COEFF_SC }, /* SCREEN */
+ { 0, 0, 0, 0 }, /* DARKEN */
+ { 0, 0, 0, 0 }, /* LIGHTEN */
+ { 0, COEFF_ONE, 0, COEFF_DISJ_S }, /* DISJ_SRC_OVER */
+ { 0, COEFF_DISJ_D, 0, COEFF_ONE }, /* DISJ_DST_OVER */
+ { 1, COEFF_DISJ_D, 0, COEFF_ZERO }, /* DISJ_SRC_IN */
+ { 0, COEFF_ZERO, 1, COEFF_DISJ_S }, /* DISJ_DST_IN */
+ { 0, COEFF_DISJ_D, 0, COEFF_ONE }, /* DISJ_SRC_OUT */
+ { 0, COEFF_ZERO, 0, COEFF_DISJ_S }, /* DISJ_DST_OUT */
+ { 1, COEFF_DISJ_D, 0, COEFF_DISJ_S }, /* DISJ_SRC_ATOP */
+ { 0, COEFF_DISJ_D, 1, COEFF_DISJ_S }, /* DISJ_DST_ATOP */
+ { 0, COEFF_DISJ_D, 0, COEFF_DISJ_S }, /* DISJ_XOR */
+ { 0, COEFF_ONE, 1, COEFF_DISJ_S }, /* CONJ_SRC_OVER */
+ { 1, COEFF_DISJ_D, 0, COEFF_ONE }, /* CONJ_DST_OVER */
+ { 0, COEFF_CONJ_D, 0, COEFF_ONE }, /* CONJ_SRC_IN */
+ { 0, COEFF_ZERO, 0, COEFF_CONJ_S }, /* CONJ_DST_IN */
+ { 1, COEFF_CONJ_D, 0, COEFF_ZERO }, /* CONJ_SRC_OUT */
+ { 0, COEFF_ZERO, 1, COEFF_CONJ_S }, /* CONJ_DST_OUT */
+ { 0, COEFF_CONJ_D, 1, COEFF_CONJ_S }, /* CONJ_SRC_ATOP */
+ { 1, COEFF_CONJ_D, 0, COEFF_CONJ_D }, /* CONJ_DST_ATOP */
+ { 1, COEFF_CONJ_D, 1, COEFF_CONJ_S }, /* CONJ_XOR */
+ { 0, 0, 0, 0 }, /* USER */
+ { 1, COEFF_GA, 1, COEFF_ZERO }, /* USER_SRC_GA */
+};
+
+/*
+ * coefficient table with global (constant) alpha
+ * replace COEFF_ONE with COEFF_GA
+ *
+ * MODE: Formula with Global Alpha (Ga is multiplied to both Sc and Sa)
+ * ----------------------------------------------------------------------------
+ * FILL:
+ * CLEAR: R = 0
+ * SRC: R = Ga*Sc
+ * DST: R = Dc
+ * SRC_OVER: R = Ga*Sc + (1-Sa*Ga)*Dc
+ * DST_OVER: R = (1-Da)*Ga*Sc + Dc --> (W/A) 1st:Ga*Sc, 2nd:DST_OVER
+ * SRC_IN: R = Da*Ga*Sc
+ * DST_IN: R = Sa*Ga*Dc
+ * SRC_OUT: R = (1-Da)*Ga*Sc --> (W/A) 1st: Ga*Sc, 2nd:SRC_OUT
+ * DST_OUT: R = (1-Sa*Ga)*Dc
+ * SRC_ATOP: R = Da*Ga*Sc + (1-Sa*Ga)*Dc
+ * DST_ATOP: R = (1-Da)*Ga*Sc + Sa*Ga*Dc --> (W/A) 1st: Ga*Sc, 2nd:DST_ATOP
+ * XOR: R = (1-Da)*Ga*Sc + (1-Sa*Ga)*Dc --> (W/A) 1st: Ga*Sc, 2nd:XOR
+ * ADD: R = Ga*Sc + Dc
+ * MULTIPLY: R = Ga*Sc*Dc --> (W/A) 1st: Ga*Sc, 2nd: MULTIPLY
+ * SCREEN: R = Ga*Sc + (1-Ga*Sc)*Dc --> (W/A) 1st: Ga*Sc, 2nd: SCREEN
+ * DARKEN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * LIGHTEN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_SRC_OVER: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_DST_OVER: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_SRC_IN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_DST_IN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_SRC_OUT: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_DST_OUT: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_SRC_ATOP: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_DST_ATOP: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_XOR: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_SRC_OVER: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_DST_OVER: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_SRC_IN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_DST_IN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_SRC_OUT: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_DST_OUT: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_SRC_ATOP: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_DST_ATOP: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_XOR: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ */
+static struct fimg2d_blend_coeff const ga_coeff_table[MAX_FIMG2D_BLIT_OP] = {
+ { 0, 0, 0, 0 }, /* FILL */
+ { 0, COEFF_ZERO, 0, COEFF_ZERO }, /* CLEAR */
+ { 0, COEFF_GA, 0, COEFF_ZERO }, /* SRC */
+ { 0, COEFF_ZERO, 0, COEFF_ONE }, /* DST */
+ { 0, COEFF_GA, 1, COEFF_SA }, /* SRC_OVER */
+ { 1, COEFF_DA, 0, COEFF_ONE }, /* DST_OVER (use W/A) */
+ { 0, COEFF_DA, 0, COEFF_ZERO }, /* SRC_IN */
+ { 0, COEFF_ZERO, 0, COEFF_SA }, /* DST_IN */
+ { 1, COEFF_DA, 0, COEFF_ZERO }, /* SRC_OUT (use W/A) */
+ { 0, COEFF_ZERO, 1, COEFF_SA }, /* DST_OUT */
+ { 0, COEFF_DA, 1, COEFF_SA }, /* SRC_ATOP */
+ { 1, COEFF_DA, 0, COEFF_SA }, /* DST_ATOP (use W/A) */
+ { 1, COEFF_DA, 1, COEFF_SA }, /* XOR (use W/A) */
+ { 0, COEFF_GA, 0, COEFF_ONE }, /* ADD */
+ { 0, COEFF_DC, 0, COEFF_ZERO }, /* MULTIPLY (use W/A) */
+ { 0, COEFF_ONE, 1, COEFF_SC }, /* SCREEN (use W/A) */
+ { 0, 0, 0, 0 }, /* DARKEN (use W/A) */
+ { 0, 0, 0, 0 }, /* LIGHTEN (use W/A) */
+ { 0, COEFF_ONE, 0, COEFF_DISJ_S }, /* DISJ_SRC_OVER (use W/A) */
+ { 0, COEFF_DISJ_D, 0, COEFF_ONE }, /* DISJ_DST_OVER (use W/A) */
+ { 1, COEFF_DISJ_D, 0, COEFF_ZERO }, /* DISJ_SRC_IN (use W/A) */
+ { 0, COEFF_ZERO, 1, COEFF_DISJ_S }, /* DISJ_DST_IN (use W/A) */
+ { 0, COEFF_DISJ_D, 0, COEFF_ONE }, /* DISJ_SRC_OUT (use W/A) */
+ { 0, COEFF_ZERO, 0, COEFF_DISJ_S }, /* DISJ_DST_OUT (use W/A) */
+ { 1, COEFF_DISJ_D, 0, COEFF_DISJ_S }, /* DISJ_SRC_ATOP (use W/A) */
+ { 0, COEFF_DISJ_D, 1, COEFF_DISJ_S }, /* DISJ_DST_ATOP (use W/A) */
+ { 0, COEFF_DISJ_D, 0, COEFF_DISJ_S }, /* DISJ_XOR (use W/A) */
+ { 0, COEFF_ONE, 1, COEFF_DISJ_S }, /* CONJ_SRC_OVER (use W/A) */
+ { 1, COEFF_DISJ_D, 0, COEFF_ONE }, /* CONJ_DST_OVER (use W/A) */
+ { 0, COEFF_CONJ_D, 0, COEFF_ONE }, /* CONJ_SRC_IN (use W/A) */
+ { 0, COEFF_ZERO, 0, COEFF_CONJ_S }, /* CONJ_DST_IN (use W/A) */
+ { 1, COEFF_CONJ_D, 0, COEFF_ZERO }, /* CONJ_SRC_OUT (use W/A) */
+ { 0, COEFF_ZERO, 1, COEFF_CONJ_S }, /* CONJ_DST_OUT (use W/A) */
+ { 0, COEFF_CONJ_D, 1, COEFF_CONJ_S }, /* CONJ_SRC_ATOP (use W/A) */
+ { 1, COEFF_CONJ_D, 0, COEFF_CONJ_D }, /* CONJ_DST_ATOP (use W/A) */
+ { 1, COEFF_CONJ_D, 1, COEFF_CONJ_S }, /* CONJ_XOR (use W/A) */
+ { 0, 0, 0, 0 }, /* USER */
+ { 1, COEFF_GA, 1, COEFF_ZERO }, /* USER_SRC_GA */
+};
+
+void fimg2d4x_set_alpha_composite(struct fimg2d_control *info,
+ enum blit_op op, unsigned char g_alpha)
+{
+ int alphamode;
+ unsigned long cfg = 0;
+ struct fimg2d_blend_coeff const *tbl;
+
+ switch (op) {
+ case BLIT_OP_SOLID_FILL:
+ case BLIT_OP_CLR:
+ /* nop */
+ return;
+ case BLIT_OP_DARKEN:
+ cfg |= FIMG2D_DARKEN;
+ break;
+ case BLIT_OP_LIGHTEN:
+ cfg |= FIMG2D_LIGHTEN;
+ break;
+ case BLIT_OP_USER_COEFF:
+ /* TODO */
+ return;
+ default:
+ if (g_alpha < 0xff) { /* with global alpha */
+ tbl = &ga_coeff_table[op];
+ alphamode = ALPHA_PERPIXEL_MUL_GLOBAL;
+ } else {
+ tbl = &coeff_table[op];
+ alphamode = ALPHA_PERPIXEL;
+ }
+
+ /* src coefficient */
+ cfg |= tbl->s_coeff << FIMG2D_SRC_COEFF_SHIFT;
+
+ cfg |= alphamode << FIMG2D_SRC_COEFF_SA_SHIFT;
+ cfg |= alphamode << FIMG2D_SRC_COEFF_DA_SHIFT;
+
+ if (tbl->s_coeff_inv)
+ cfg |= FIMG2D_INV_SRC_COEFF;
+
+ /* dst coefficient */
+ cfg |= tbl->d_coeff << FIMG2D_DST_COEFF_SHIFT;
+
+ cfg |= alphamode << FIMG2D_DST_COEFF_DA_SHIFT;
+ cfg |= alphamode << FIMG2D_DST_COEFF_SA_SHIFT;
+
+ if (tbl->d_coeff_inv)
+ cfg |= FIMG2D_INV_DST_COEFF;
+
+ break;
+ }
+
+ wr(cfg, FIMG2D_BLEND_FUNCTION_REG);
+
+ /* round mode: depremult round mode is not used */
+ cfg = rd(FIMG2D_ROUND_MODE_REG);
+
+ /* premult */
+ cfg &= ~FIMG2D_PREMULT_ROUND_MASK;
+ cfg |= premult_round_mode << FIMG2D_PREMULT_ROUND_SHIFT;
+
+ /* blend */
+ cfg &= ~FIMG2D_BLEND_ROUND_MASK;
+ cfg |= blend_round_mode << FIMG2D_BLEND_ROUND_SHIFT;
+
+ wr(cfg, FIMG2D_ROUND_MODE_REG);
+}
+
+void fimg2d4x_dump_regs(struct fimg2d_control *info)
+{
+ int i, offset;
+ unsigned long table[][2] = {
+ /* start, end */
+ {0x0000, 0x0030}, /* general */
+ {0x0080, 0x00a0}, /* host dma */
+ {0x0100, 0x0110}, /* commands */
+ {0x0200, 0x0210}, /* rotation & direction */
+ {0x0300, 0x0340}, /* source */
+ {0x0400, 0x0420}, /* dest */
+ {0x0500, 0x0550}, /* pattern & mask */
+ {0x0600, 0x0710}, /* clip, rop, alpha and color */
+ {0x0, 0x0}
+ };
+
+ for (i = 0; table[i][1] != 0x0; i++) {
+ offset = table[i][0];
+ do {
+ printk(KERN_INFO "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", offset,
+ rd(offset),
+ rd(offset+0x4),
+ rd(offset+0x8),
+ rd(offset+0xc));
+ offset += 0x10;
+ } while (offset < table[i][1]);
+ }
+}
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_regs.h b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_regs.h
new file mode 100644
index 0000000..91c7ac8
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d4x_regs.h
@@ -0,0 +1,460 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d4x_regs.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register Definitions for Samsung Graphics 2D Hardware
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D4X_REGS_H
+#define __FIMG2D4x_REGS_H __FILE__
+
+/* Macros */
+#define FIMG2D_ADDR(v) ((v) << 0)
+#define FIMG2D_STRIDE(v) (((v) & (0xffff)) << 0)
+#define FIMG2D_OFFSET(x, y) ((((y) & 0x1fff) << 16) | (((x) & 0x1fff) << 0))
+#define FIMG2D_SIZE(w, h) ((((h) & 0x1fff) << 16) | (((w) & 0x1fff) << 0))
+#define FIMG2D_COLOR(v) ((v) << 0)
+
+/* Registers */
+#define FIMG2D_SOFT_RESET_REG (0x000)
+#define FIMG2D_INTEN_REG (0x004)
+#define FIMG2D_INTC_PEND_REG (0x00c)
+#define FIMG2D_FIFO_STAT_REG (0x010)
+#define FIMG2D_AXI_MODE_REG (0x01c)
+#define FIMG2D_DMA_SFR_BASE_ADDR_REG (0x080)
+#define FIMG2D_DMA_COMMAND_REG (0x084)
+#define FIMG2D_DMA_EXE_LIST_NUM_REG (0x088)
+#define FIMG2D_DMA_STATUS_REG (0x08c)
+#define FIMG2D_DMA_HOLD_CMD_REG (0x090)
+#define FIMG2D_BITBLT_START_REG (0x100)
+#define FIMG2D_BITBLT_COMMAND_REG (0x104)
+#define FIMG2D_BLEND_FUNCTION_REG (0x108)
+#define FIMG2D_ROUND_MODE_REG (0x10c)
+#define FIMG2D_ROTATE_REG (0x200)
+#define FIMG2D_SRC_MSK_DIRECT_REG (0x204)
+#define FIMG2D_DST_PAT_DIRECT_REG (0x208)
+#define FIMG2D_SRC_SELECT_REG (0x300)
+#define FIMG2D_SRC_BASE_ADDR_REG (0x304)
+#define FIMG2D_SRC_STRIDE_REG (0x308)
+#define FIMG2D_SRC_COLOR_MODE_REG (0x30c)
+#define FIMG2D_SRC_LEFT_TOP_REG (0x310)
+#define FIMG2D_SRC_RIGHT_BOTTOM_REG (0x314)
+#define FIMG2D_SRC_PLANE2_BASE_ADDR_REG (0x318)
+#define FIMG2D_SRC_REPEAT_MODE_REG (0x31c)
+#define FIMG2D_SRC_PAD_VALUE_REG (0x320)
+#define FIMG2D_SRC_A8_RGB_EXT_REG (0x324)
+#define FIMG2D_SRC_SCALE_CTRL_REG (0x328)
+#define FIMG2D_SRC_XSCALE_REG (0x32c)
+#define FIMG2D_SRC_YSCALE_REG (0x330)
+#define FIMG2D_DST_SELECT_REG (0x400)
+#define FIMG2D_DST_BASE_ADDR_REG (0x404)
+#define FIMG2D_DST_STRIDE_REG (0x408)
+#define FIMG2D_DST_COLOR_MODE_REG (0x40c)
+#define FIMG2D_DST_LEFT_TOP_REG (0x410)
+#define FIMG2D_DST_RIGHT_BOTTOM_REG (0x414)
+#define FIMG2D_DST_PLANE2_BASE_ADDR_REG (0x418)
+#define FIMG2D_DST_A8_RGB_EXT_REG (0x41c)
+#define FIMG2D_PAT_BASE_ADDR_REG (0x500)
+#define FIMG2D_PAT_SIZE_REG (0x504)
+#define FIMG2D_PAT_COLOR_MODE_REG (0x508)
+#define FIMG2D_PAT_OFFSET_REG (0x50c)
+#define FIMG2D_PAT_STRIDE_REG (0x510)
+#define FIMG2D_MSK_BASE_ADDR_REG (0x520)
+#define FIMG2D_MSK_STRIDE_REG (0x524)
+#define FIMG2D_MSK_LEFT_TOP_REG (0x528)
+#define FIMG2D_MSK_RIGHT_BOTTOM_REG (0x52c)
+#define FIMG2D_MSK_MODE_REG (0x530)
+#define FIMG2D_MSK_REPEAT_MODE_REG (0x534)
+#define FIMG2D_MSK_PAD_VALUE_REG (0x538)
+#define FIMG2D_MSK_SCALE_CTRL_REG (0x53c)
+#define FIMG2D_MSK_XSCALE_REG (0x540)
+#define FIMG2D_MSK_YSCALE_REG (0x544)
+#define FIMG2D_CW_LT_REG (0x600)
+#define FIMG2D_CW_RB_REG (0x604)
+#define FIMG2D_THIRD_OPERAND_REG (0x610)
+#define FIMG2D_ROP4_REG (0x614)
+#define FIMG2D_ALPHA_REG (0x618)
+#define FIMG2D_FG_COLOR_REG (0x700)
+#define FIMG2D_BG_COLOR_REG (0x704)
+#define FIMG2D_BS_COLOR_REG (0x708)
+#define FIMG2D_SF_COLOR_REG (0x70c)
+#define FIMG2D_SRC_COLORKEY_CTRL_REG (0x710)
+#define FIMG2D_SRC_COLORKEY_DR_MIN_REG (0x714)
+#define FIMG2D_SRC_COLORKEY_DR_MAX_REG (0x718)
+#define FIMG2D_DST_COLORKEY_CTRL_REG (0x71c)
+#define FIMG2D_DST_COLORKEY_DR_MIN_REG (0x720)
+#define FIMG2D_DST_COLORKEY_DR_MAX_REG (0x724)
+#define FIMG2D_YCBCR_SRC_COLORKEY_CTRL_REG (0x728)
+#define FIMG2D_YCBCR_SRC_COLORKEY_DR_MIN_REG (0x72c)
+#define FIMG2D_YCBCR_SRC_COLORKEY_DR_MAX_REG (0x730)
+#define FIMG2D_YCBCR_DST_COLORKEY_CTRL_REG (0x734)
+#define FIMG2D_YCBCR_DST_COLORKEY_DR_MIN_REG (0x738)
+#define FIMG2D_YCBCR_DST_COLORKEY_DR_MAX_REG (0x73c)
+#define FIMG2D_GAMMA_TABLE0_0_REG (0x800)
+#define FIMG2D_GAMMA_TABLE0_1_REG (0x804)
+#define FIMG2D_GAMMA_TABLE0_2_REG (0x808)
+#define FIMG2D_GAMMA_TABLE0_3_REG (0x80c)
+#define FIMG2D_GAMMA_TABLE0_4_REG (0x810)
+#define FIMG2D_GAMMA_TABLE0_5_REG (0x814)
+#define FIMG2D_GAMMA_TABLE0_6_REG (0x818)
+#define FIMG2D_GAMMA_TABLE0_7_REG (0x81c)
+#define FIMG2D_GAMMA_TABLE0_8_REG (0x820)
+#define FIMG2D_GAMMA_TABLE0_9_REG (0x824)
+#define FIMG2D_GAMMA_TABLE0_10_REG (0x828)
+#define FIMG2D_GAMMA_TABLE0_11_REG (0x82c)
+#define FIMG2D_GAMMA_TABLE0_12_REG (0x830)
+#define FIMG2D_GAMMA_TABLE0_13_REG (0x834)
+#define FIMG2D_GAMMA_TABLE0_14_REG (0x838)
+#define FIMG2D_GAMMA_TABLE0_15_REG (0x83c)
+#define FIMG2D_GAMMA_TABLE1_0_REG (0x840)
+#define FIMG2D_GAMMA_TABLE1_1_REG (0x844)
+#define FIMG2D_GAMMA_TABLE1_2_REG (0x848)
+#define FIMG2D_GAMMA_TABLE1_3_REG (0x84c)
+#define FIMG2D_GAMMA_TABLE1_4_REG (0x850)
+#define FIMG2D_GAMMA_TABLE1_5_REG (0x854)
+#define FIMG2D_GAMMA_TABLE1_6_REG (0x858)
+#define FIMG2D_GAMMA_TABLE1_7_REG (0x85c)
+#define FIMG2D_GAMMA_TABLE1_8_REG (0x860)
+#define FIMG2D_GAMMA_TABLE1_9_REG (0x864)
+#define FIMG2D_GAMMA_TABLE1_10_REG (0x868)
+#define FIMG2D_GAMMA_TABLE1_11_REG (0x86c)
+#define FIMG2D_GAMMA_TABLE1_12_REG (0x870)
+#define FIMG2D_GAMMA_TABLE1_13_REG (0x874)
+#define FIMG2D_GAMMA_TABLE1_14_REG (0x878)
+#define FIMG2D_GAMMA_TABLE1_15_REG (0x87c)
+#define FIMG2D_GAMMA_REF_COLOR_REG (0x880)
+
+/* Bit Definitions */
+
+/* FIMG2D_SOFT_RESET_REG */
+#define FIMG2D_SFR_CLEAR (1 << 1)
+#define FIMG2D_SOFT_RESET (1 << 0)
+
+/* FIMG2D_INTEN_REG */
+#define FIMG2D_INT_TYPE_EDGE (1 << 4)
+#define FIMG2D_INT_TYPE_LEVEL (0 << 4)
+
+#define FIMG2D_ACF_INT_ENABLE (1 << 3)
+#define FIMG2D_UCF_INT_ENABLE (1 << 2)
+#define FIMG2D_GCF_INT_ENABLE (1 << 1)
+#define FIMG2D_BLIT_INT_ENABLE (1 << 0)
+
+/* FIMG2D_INTC_PEND_REG */
+#define FIMG2D_ACMD_INT_FLAG (1 << 3)
+#define FIMG2D_UCMD_INT_FLAG (1 << 2)
+#define FIMG2D_GCMD_INT_FLAG (1 << 1)
+#define FIMG2D_BLIT_INT_FLAG (1 << 0)
+
+/* FIMG2D_FIFO_STAT_REG */
+#define FIMG2D_BLIT_FINISHED (1 << 0)
+
+/* FIMG2D_AXI_MODE_REG */
+#define FIMG2D_MAX_BURST_LEN_2 (0 << 24)
+#define FIMG2D_MAX_BURST_LEN_4 (1 << 24)
+#define FIMG2D_MAX_BURST_LEN_8 (2 << 24)
+#define FIMG2D_MAX_BURST_LEN_16 (3 << 24)
+#define FIMG2D_MAX_BURST_LEN_MASK (3 << 24)
+#define FIMG2D_MAX_BURST_LEN_SHIFT (24)
+
+#define FIMG2D_AXI_AWUSERS_SHIFT (16)
+#define FIMG2D_AXI_ARUSERS_SHIFT (8)
+#define FIMG2D_AXI_AWCACHE_SHIFT (4)
+#define FIMG2D_AXI_ARCACHE_SHIFT (0)
+
+/* FIMG2D_DMA_SFR_BASE_ADDR_REG */
+
+/* FIMG2D_DMA_COMMAND_REG */
+#define FIMG2D_BATCH_BLIT_HALT (1 << 2)
+#define FIMG2D_BATCH_BLIT_CONT (1 << 1)
+#define FIMG2D_BATCH_BLIT_START (1 << 0)
+
+/* FIMG2D_DMA_EXE_LIST_NUM_REG */
+#define FIMG2D_BATCH_BLIT_EXELIST_NUM_MASK (0xff)
+#define FIMG2D_BATCH_BLIT_EXELIST_NUM_SHIFT (0)
+
+/* FIMG2D_DMA_STATUS_REG */
+#define FIMG2D_BATCH_BLIT_DONELIST_CNT_MASK (0xff)
+#define FIMG2D_BATCH_BLIT_DONELIST_CNT_SHIFT (17)
+
+#define FIMG2D_BATCH_BLIT_DONEBLIT_CNT_MASK (0xffff)
+#define FIMG2D_BATCH_BLIT_DONEBLIT_CNT_SHIFT (1)
+
+#define FIMG2D_BATCH_BLIT_DONE_MASK (1)
+#define FIMG2D_BATCH_BLIT_DONE_SHIFT (0)
+
+/* FIMG2D_DMA_HOLD_CMD_REG */
+#define FIMG2D_BATCH_BLIT_USER_HOLD (1 << 2)
+#define FIMG2D_BATCH_BLIT_LIST_HOLD (1 << 1)
+#define FIMG2D_BATCH_BLIT_BLIT_HOLD (1 << 0)
+
+/* FIMG2D_BITBLT_START_REG */
+#define FIMG2D_START_N_HOLD (1 << 1)
+#define FIMG2D_START_BITBLT (1 << 0)
+
+/* FIMG2D_BITBLT_COMMAND_REG */
+#define FIMG2D_SOLID_FILL (1 << 28)
+
+#define FIMG2D_DST_WR_DEPREMULT (1 << 27)
+#define FIMG2D_DST_RD_PREMULT (1 << 26)
+#define FIMG2D_PAT_PREMULT (1 << 25)
+#define FIMG2D_SRC_PREMULT (1 << 24)
+#define FIMG2D_PREMULT_ALL (0xf << 24)
+
+#define FIMG2D_ALPHA_BLEND_MODE (1 << 20)
+
+#define FIMG2D_COLORKEY_SRC_RGBA (1 << 16)
+#define FIMG2D_COLORKEY_DST_RGBA (2 << 16)
+#define FIMG2D_COLORKEY_SRC_YCBCR (4 << 16)
+#define FIMG2D_COLORKEY_DST_YCBCR (8 << 16)
+
+#define FIMG2D_OPAQUE_MODE (0 << 12)
+#define FIMG2D_TRANSP_MODE (1 << 12)
+#define FIMG2D_BLUSCR_MODE (2 << 12)
+
+#define FIMG2D_ENABLE_CW (1 << 8)
+#define FIMG2D_ENABLE_DITHER (1 << 3)
+
+#define FIMG2D_ENABLE_SRC_ALPHA (0 << 2)
+#define FIMG2D_ENABLE_ROP_ALPHA (1 << 2)
+
+#define FIMG2D_ENABLE_ROP4_MSK (1 << 0)
+#define FIMG2D_ENABLE_NORMAL_MSK (2 << 0)
+
+/* FIMG2D_BLEND_FUNCTION_REG */
+#define FIMG2D_WINCE_SRC_OVER (1 << 22)
+#define FIMG2D_DARKEN (1 << 21)
+#define FIMG2D_LIGHTEN (1 << 20)
+#define FIMG2D_INV_DST_COEFF (1 << 18)
+#define FIMG2D_INV_SRC_COEFF (1 << 16)
+
+#define FIMG2D_DST_COEFF_DA_SHIFT (14)
+#define FIMG2D_DST_COEFF_SA_SHIFT (12)
+#define FIMG2D_SRC_COEFF_DA_SHIFT (6)
+#define FIMG2D_SRC_COEFF_SA_SHIFT (4)
+
+#define FIMG2D_DST_COEFF_SHIFT (8)
+#define FIMG2D_SRC_COEFF_SHIFT (0)
+
+/* FIMG2D_ROUND_MODE_REG */
+#define FIMG2D_PREMULT_ROUND_MASK (3 << 4)
+#define FIMG2D_PREMULT_ROUND_SHIFT (4)
+
+#define FIMG2D_BLEND_ROUND_MASK (3 << 0)
+#define FIMG2D_BLEND_ROUND_SHIFT (0)
+
+/* FIMG2D_ROTATE_REG */
+#define FIMG2D_MSK_ROTATE_90 (1 << 8)
+#define FIMG2D_PAT_ROTATE_90 (1 << 4)
+#define FIMG2D_SRC_ROTATE_90 (1 << 0)
+
+/* FIMG2D_SRC_MSK_DIRECT_REG */
+#define FIMG2D_MSK_Y_DIR_NEGATIVE (1 << 5)
+#define FIMG2D_MSK_X_DIR_NEGATIVE (1 << 4)
+
+#define FIMG2D_SRC_Y_DIR_NEGATIVE (1 << 1)
+#define FIMG2D_SRC_X_DIR_NEGATIVE (1 << 0)
+
+/* FIMG2D_DST_PAT_DIRECT_REG */
+#define FIMG2D_PAT_Y_DIR_NEGATIVE (1 << 5)
+#define FIMG2D_PAT_X_DIR_NEGATIVE (1 << 4)
+
+#define FIMG2D_DST_Y_DIR_NEGATIVE (1 << 1)
+#define FIMG2D_DST_X_DIR_NEGATIVE (1 << 0)
+
+/* FIMG2D_SRC_SELECT_REG & FIMG2D_DST_SELECT_REG */
+#define FIMG2D_IMAGE_TYPE_MEMORY (0 << 0)
+#define FIMG2D_IMAGE_TYPE_FGCOLOR (1 << 0)
+#define FIMG2D_IMAGE_TYPE_BGCOLOR (2 << 0)
+
+/* FIMG2D_SRC_BASE_ADDR_REG */
+/* FIMG2D_DST_BASE_ADDR_REG */
+/* FIMG2D_PAT_BASE_ADDR_REG */
+/* FIMG2D_MSK_BASE_ADDR_REG */
+
+/* FIMG2D_SRC_STRIDE_REG */
+/* FIMG2D_DST_STRIDE_REG */
+/* FIMG2D_PAT_STRIDE_REG */
+/* FIMG2D_MSK_STRIDE_REG */
+
+/* FIMG2D_SRC_COLOR_MODE_REG & FIMG2D_DST_COLOR_MODE_REG */
+#define FIMG2D_YCBCR_NARROW (0 << 17)
+#define FIMG2D_YCBCR_WIDE (1 << 17)
+
+#define FIMG2D_CSC_601 (0 << 16)
+#define FIMG2D_CSC_709 (1 << 16)
+
+#define FIMG2D_YCBCR_ORDER_P1_CRY1CBY0 (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P1_CBY1CRY0 (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P1_Y1CRY0CB (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P1_Y1CBY0CR (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P2_CRCB (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P2_CBCR (1 << 12)
+#define FIMG2D_YCBCR_ORDER_SHIFT (12)
+
+#define FIMG2D_YCBCR_1PLANE (0 << 8)
+#define FIMG2D_YCBCR_2PLANE (1 << 8)
+
+#define FIMG2D_RGB_ORDER_AXRGB (0 << 4)
+#define FIMG2D_RGB_ORDER_RGBAX (1 << 4)
+#define FIMG2D_RGB_ORDER_AXBGR (2 << 4)
+#define FIMG2D_RGB_ORDER_BGRAX (3 << 4)
+#define FIMG2D_RGB_ORDER_SHIFT (4)
+
+#define FIMG2D_COLOR_FORMAT_XRGB_8888 (0 << 0)
+#define FIMG2D_COLOR_FORMAT_ARGB_8888 (1 << 0)
+#define FIMG2D_COLOR_FORMAT_RGB_565 (2 << 0)
+#define FIMG2D_COLOR_FORMAT_XRGB_1555 (3 << 0)
+#define FIMG2D_COLOR_FORMAT_ARGB_1555 (4 << 0)
+#define FIMG2D_COLOR_FORMAT_XRGB_4444 (5 << 0)
+#define FIMG2D_COLOR_FORMAT_ARGB_4444 (6 << 0)
+#define FIMG2D_COLOR_FORMAT_PACKED_RGB_888 (7 << 0)
+#define FIMG2D_COLOR_FORMAT_YCBCR_444 (8 << 0)
+#define FIMG2D_COLOR_FORMAT_YCBCR_422 (9 << 0)
+#define FIMG2D_COLOR_FORMAT_YCBCR_420 (10 << 0)
+#define FIMG2D_COLOR_FORMAT_A8 (11 << 0)
+#define FIMG2D_COLOR_FORMAT_L8 (12 << 0)
+#define FIMG2D_COLOR_FORMAT_SHIFT (0)
+
+/* FIMG2D_PAT_COLOR_MODE_REG */
+#define FIMG2D_PAT_ORDER_AXRGB (0 << 4)
+#define FIMG2D_PAT_ORDER_RGBAX (1 << 4)
+#define FIMG2D_PAT_ORDER_AXBGR (2 << 4)
+#define FIMG2D_PAT_ORDER_BGRAX (3 << 4)
+
+#define FIMG2D_PAT_FORMAT_XRGB_8888 (0 << 0)
+#define FIMG2D_PAT_FORMAT_ARGB_8888 (1 << 0)
+#define FIMG2D_PAT_FORMAT_RGB_565 (2 << 0)
+#define FIMG2D_PAT_FORMAT_XRGB_1555 (3 << 0)
+#define FIMG2D_PAT_FORMAT_ARGB_1555 (4 << 0)
+#define FIMG2D_PAT_FORMAT_XRGB_4444 (5 << 0)
+#define FIMG2D_PAT_FORMAT_ARGB_4444 (6 << 0)
+#define FIMG2D_PAT_FORMAT_PACKED_RGB_888 (7 << 0)
+
+/* FIMG2D_SRC_LEFT_TOP_REG & FIMG2D_SRC_RIGHT_BOTTOM_REG */
+/* FIMG2D_DST_LEFT_TOP_REG & FIMG2D_DST_RIGHT_BOTTOM_REG */
+/* FIMG2D_MSK_LEFT_TOP_REG & FIMG2D_MSK_RIGHT_BOTTOM_REG */
+#define FIMG2D_COORD_LT_Y_SHIFT (16)
+#define FIMG2D_COORD_LT_X_SHIFT (0)
+#define FIMG2D_COORD_RB_Y_SHIFT (16)
+#define FIMG2D_COORD_RB_X_SHIFT (0)
+#define FIMG2D_COORD_MAX_HEIGHT (8000)
+#define FIMG2D_COORD_MAX_WIDTH (8000)
+
+/* FIMG2D_SRC_PLANE2_BASE_ADDR_REG */
+/* FIMG2D_DST_PLANE2_BASE_ADDR_REG */
+
+/* FIMG2D_SRC_REPEAT_MODE_REG */
+#define FIMG2D_SRC_REPEAT_NORMAL (0 << 0)
+#define FIMG2D_SRC_REPEAT_PAD (1 << 0)
+#define FIMG2D_SRC_REPEAT_REFLECT (2 << 0)
+#define FIMG2D_SRC_REPEAT_CLAMP (3 << 0)
+#define FIMG2D_SRC_REPEAT_NONE (4 << 0)
+#define FIMG2D_SRC_REPEAT_SHIFT (0)
+
+/* FIMG2D_MSK_REPEAT_MODE_REG */
+#define FIMG2D_MSK_REPEAT_NORMAL (0 << 0)
+#define FIMG2D_MSK_REPEAT_PAD (1 << 0)
+#define FIMG2D_MSK_REPEAT_REFLECT (2 << 0)
+#define FIMG2D_MSK_REPEAT_CLAMP (3 << 0)
+#define FIMG2D_MSK_REPEAT_SHIFT (0)
+
+/* FIMG2D_SRC_PAD_VALUE_REG */
+/* FIMG2D_MSK_PAD_VALUE_REG */
+
+/* FIMG2D_SRC_A8_RGB_EXT_REG */
+/* FIMG2D_DST_A8_RGB_EXT_REG */
+
+/* FIMG2D_SRC_SCALE_CTRL_REG & FIMG2D_MSK_SCALE_CTRL_REG */
+#define FIMG2D_SCALE_MODE_NEAREST (1 << 0)
+#define FIMG2D_SCALE_MODE_BILINEAR (2 << 0)
+
+/* FIMG2D_SRC_XSCALE_REG & FIMG2D_SRC_YSCALE_REG */
+/* FIMG2D_MSK_XSCALE_REG & FIMG2D_MSK_YSCALE_REG */
+#define FIMG2D_SCALE_FACTOR_INTG_SHIFT (16)
+#define FIMG2D_SCALE_FACTOR_FRAC_SHIFT (0)
+
+/* FIMG2D_PAT_SIZE_REG */
+#define FIMG2D_PAT_HEIGHT_SHIFT (16)
+#define FIMG2D_PAT_WIDTH_SHIFT (0)
+#define FIMG2D_MAX_PAT_HEIGHT (8000)
+#define FIMG2D_MAX_PAT_WIDTH (8000)
+
+/* FIMG2D_PAT_OFFSET_REG */
+#define FIMG2D_PAT_Y_OFFSET_SHIFT (16)
+#define FIMG2D_PAT_X_OFFSET_SHIFT (0)
+#define FIMG2D_MAX_PAT_Y_OFFSET (7999)
+#define FIMG2D_MAX_PAT_X_OFFSET (7999)
+
+/* FIMG2D_MSK_MODE_REG */
+#define FIMG2D_MSK_TYPE_ALPHA (0 << 8)
+#define FIMG2D_MSK_TYPE_ARGB (1 << 8)
+#define FIMG2D_MSK_TYPE_MIXED (2 << 8)
+
+#define FIMG2D_MSK_ORDER_AXRGB (0 << 4)
+#define FIMG2D_MSK_ORDER_RGBAX (1 << 4)
+#define FIMG2D_MSK_ORDER_AXBGR (2 << 4)
+#define FIMG2D_MSK_ORDER_BGRAX (3 << 4)
+#define FIMG2D_MSK_ORDER_SHIFT (4)
+
+#define FIMG2D_1BIT_MSK (0 << 0)
+#define FIMG2D_4BIT_MSK (1 << 0)
+#define FIMG2D_8BIT_MSK (2 << 0)
+#define FIMG2D_16BIT_MSK_565 (3 << 0)
+#define FIMG2D_16BIT_MSK_1555 (4 << 0)
+#define FIMG2D_16BIT_MSK_4444 (5 << 0)
+#define FIMG2D_32BIT_MSK_8888 (6 << 0)
+#define FIMG2D_4BIT_MSK_WINCE_AA_FONT (7 << 0)
+#define FIMG2D_MSK_FORMAT_SHIFT (0)
+
+/* FIMG2D_CW_LT_REG */
+#define FIMG2D_CW_COORD_LT_Y_SHIFT (16)
+#define FIMG2D_CW_COORD_LT_X_SHIFT (0)
+#define FIMG2D_CW_COORD_RB_Y_SHIFT (16)
+#define FIMG2D_CW_COORD_RB_X_SHIFT (0)
+
+/* FIMG2D_THIRD_OPERAND_REG */
+#define FIMG2D_OPR3_MSKSEL_PAT (0 << 4)
+#define FIMG2D_OPR3_MSKSEL_FGCOLOR (1 << 4)
+#define FIMG2D_OPR3_MSKSEL_BGCOLOR (2 << 4)
+#define FIMG2D_OPR3_UNMSKSEL_PAT (0 << 0)
+#define FIMG2D_OPR3_UNMSKSEL_FGCOLOR (1 << 0)
+#define FIMG2D_OPR3_UNMSKSEL_BGCOLOR (2 << 0)
+
+/* FIMG2D_ROP4_REG */
+#define FIMG2D_MASKED_ROP3_SHIFT (8)
+#define FIMG2D_UNMASKED_ROP3_SHIFT (0)
+
+/* FIMG2D_ALPHA_REG */
+#define FIMG2D_GCOLOR_RGB_MASK (0xffffff)
+#define FIMG2D_GCOLOR_SHIFT (8)
+
+#define FIMG2D_GALPHA_MASK (0xff)
+#define FIMG2D_GALPHA_SHIFT (0)
+
+/* FIMG2D_FG_COLOR_REG */
+/* FIMG2D_BG_COLOR_REG */
+/* FIMG2D_BS_COLOR_REG */
+/* FIMG2D_SF_COLOR_REG */
+
+/* FIMG2D_SRC_COLORKEY_CTRL_REG */
+/* FIMG2D_SRC_COLORKEY_DR_MIN_REG */
+/* FIMG2D_SRC_COLORKEY_DR_MAX_REG */
+
+/* FIMG2D_DST_COLORKEY_CTRL_REG */
+/* FIMG2D_DST_COLORKEY_DR_MIN_REG */
+/* FIMG2D_DST_COLORKEY_DR_MAX_REG */
+
+/* FIMG2D_YCBCR_SRC_COLORKEY_CTRL_REG */
+/* FIMG2D_YCBCR_SRC_COLORKEY_DR_MIN_REG */
+/* FIMG2D_YCBCR_SRC_COLORKEY_DR_MAX_REG */
+
+/* FIMG2D_YCBCR_DST_COLORKEY_CTRL_REG */
+/* FIMG2D_YCBCR_DST_COLORKEY_DR_MIN_REG */
+/* FIMG2D_YCBCR_DST_COLORKEY_DR_MAX_REG */
+
+#endif /* __FIMG2D4X_REGS_H */
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_cache.c b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_cache.c
new file mode 100644
index 0000000..43489e4
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_cache.c
@@ -0,0 +1,168 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+#include "fimg2d.h"
+#include "fimg2d_cache.h"
+
+#define LV1_SHIFT 20
+#define LV1_PT_SIZE SZ_1M
+#define LV2_PT_SIZE SZ_1K
+#define LV2_BASE_MASK 0x3ff
+#define LV2_PT_MASK 0xff000
+#define LV2_SHIFT 12
+#define LV1_DESC_MASK 0x3
+#define LV2_DESC_MASK 0x2
+
+static inline unsigned long virt2phys(struct mm_struct *mm, unsigned long vaddr)
+{
+ unsigned long *pgd;
+ unsigned long *lv1d, *lv2d;
+
+ pgd = (unsigned long *)mm->pgd;
+
+ lv1d = pgd + (vaddr >> LV1_SHIFT);
+
+ if ((*lv1d & LV1_DESC_MASK) != 0x1) {
+ fimg2d_debug("invalid LV1 descriptor, "
+ "pgd %p lv1d 0x%lx vaddr 0x%lx\n",
+ pgd, *lv1d, vaddr);
+ return 0;
+ }
+
+ lv2d = (unsigned long *)phys_to_virt(*lv1d & ~LV2_BASE_MASK) +
+ ((vaddr & LV2_PT_MASK) >> LV2_SHIFT);
+
+ if ((*lv2d & LV2_DESC_MASK) != 0x2) {
+ fimg2d_debug("invalid LV2 descriptor, "
+ "pgd %p lv2d 0x%lx vaddr 0x%lx\n",
+ pgd, *lv2d, vaddr);
+ return 0;
+ }
+
+ return (*lv2d & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
+}
+
+#ifdef CONFIG_OUTER_CACHE
+void fimg2d_dma_sync_outer(struct mm_struct *mm, unsigned long vaddr,
+ size_t size, enum cache_opr opr)
+{
+ int len;
+ unsigned long cur, end, next, paddr;
+
+ cur = vaddr;
+ end = vaddr + size;
+
+ if (opr == CACHE_CLEAN) {
+ while (cur < end) {
+ next = (cur + PAGE_SIZE) & PAGE_MASK;
+ if (next > end)
+ next = end;
+ len = next - cur;
+
+ paddr = virt2phys(mm, cur);
+ if (paddr)
+ outer_clean_range(paddr, paddr + len);
+ cur += len;
+ }
+ } else if (opr == CACHE_FLUSH) {
+ while (cur < end) {
+ next = (cur + PAGE_SIZE) & PAGE_MASK;
+ if (next > end)
+ next = end;
+ len = next - cur;
+
+ paddr = virt2phys(mm, cur);
+ if (paddr)
+ outer_flush_range(paddr, paddr + len);
+ cur += len;
+ }
+ }
+}
+
+void fimg2d_clean_outer_pagetable(struct mm_struct *mm, unsigned long vaddr,
+ size_t size)
+{
+ unsigned long *pgd;
+ unsigned long *lv1, *lv1end;
+ unsigned long lv2pa;
+
+ pgd = (unsigned long *)mm->pgd;
+
+ lv1 = pgd + (vaddr >> LV1_SHIFT);
+ lv1end = pgd + ((vaddr + size + LV1_PT_SIZE-1) >> LV1_SHIFT);
+
+ /* clean level1 page table */
+ outer_clean_range(virt_to_phys(lv1), virt_to_phys(lv1end));
+
+ do {
+ lv2pa = *lv1 & ~LV2_BASE_MASK; /* lv2 pt base */
+ /* clean level2 page table */
+ outer_clean_range(lv2pa, lv2pa + LV2_PT_SIZE);
+ lv1++;
+ } while (lv1 != lv1end);
+}
+#endif /* CONFIG_OUTER_CACHE */
+
+enum pt_status fimg2d_check_pagetable(struct mm_struct *mm, unsigned long vaddr,
+ size_t size)
+{
+ unsigned long *pgd;
+ unsigned long *lv1d, *lv2d;
+
+ pgd = (unsigned long *)mm->pgd;
+
+ size += offset_in_page(vaddr);
+ size = PAGE_ALIGN(size);
+
+ while ((long)size > 0) {
+ lv1d = pgd + (vaddr >> LV1_SHIFT);
+
+ /*
+ * check level 1 descriptor
+ * lv1 desc[1:0] = 00 --> fault
+ * lv1 desc[1:0] = 01 --> page table
+ * lv1 desc[1:0] = 10 --> section or supersection
+ * lv1 desc[1:0] = 11 --> reserved
+ */
+ if ((*lv1d & LV1_DESC_MASK) != 0x1) {
+ fimg2d_debug("invalid LV1 descriptor, "
+ "pgd %p lv1d 0x%lx vaddr 0x%lx\n",
+ pgd, *lv1d, vaddr);
+ return PT_FAULT;
+ }
+
+ lv2d = (unsigned long *)phys_to_virt(*lv1d & ~LV2_BASE_MASK) +
+ ((vaddr & LV2_PT_MASK) >> LV2_SHIFT);
+
+ /*
+ * check level 2 descriptor
+ * lv2 desc[1:0] = 00 --> fault
+ * lv2 desc[1:0] = 01 --> 64k pgae
+ * lv2 desc[1:0] = 1x --> 4k page
+ */
+ if ((*lv2d & LV2_DESC_MASK) != 0x2) {
+ fimg2d_debug("invalid LV2 descriptor, "
+ "pgd %p lv2d 0x%lx vaddr 0x%lx\n",
+ pgd, *lv2d, vaddr);
+ return PT_FAULT;
+ }
+
+ vaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ return PT_NORMAL;
+}
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_cache.h b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_cache.h
new file mode 100644
index 0000000..f337ea5
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_cache.h
@@ -0,0 +1,96 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <plat/cpu.h>
+#include "fimg2d.h"
+
+#define L1_CACHE_SIZE SZ_64K
+#define L2_CACHE_SIZE SZ_1M
+#define LINE_FLUSH_THRESHOLD SZ_1K
+
+/**
+ * cache_opr - [kernel] cache operation mode
+ * @CACHE_INVAL: do cache invalidate
+ * @CACHE_CLEAN: do cache clean for src and msk image
+ * @CACHE_FLUSH: do cache clean and invalidate for dst image
+ * @CACHE_FLUSH_INNER_ALL: clean and invalidate for innercache
+ * @CACHE_FLUSH_ALL: clean and invalidate for whole caches
+ */
+enum cache_opr {
+ CACHE_INVAL,
+ CACHE_CLEAN,
+ CACHE_FLUSH,
+ CACHE_FLUSH_INNER_ALL,
+ CACHE_FLUSH_ALL
+};
+
+/**
+ * @PT_NORMAL: pagetable exists
+ * @PT_FAULT: invalid pagetable
+ */
+enum pt_status {
+ PT_NORMAL,
+ PT_FAULT,
+};
+
+static inline bool is_inner_flushall(size_t size)
+{
+ if (soc_is_exynos5250())
+ return (size >= SZ_1M * 25) ? true : false;
+ else
+ return (size >= L1_CACHE_SIZE) ? true : false;
+}
+
+static inline bool is_outer_flushall(size_t size)
+{
+ return (size >= L2_CACHE_SIZE) ? true : false;
+}
+
+static inline bool is_inner_flushrange(size_t hole)
+{
+ if (!soc_is_exynos5250())
+ return true;
+ else {
+ if (hole < LINE_FLUSH_THRESHOLD)
+ return true;
+ else
+ return false; /* line-by-line flush */
+ }
+}
+
+static inline bool is_outer_flushrange(size_t hole)
+{
+ if (hole < LINE_FLUSH_THRESHOLD)
+ return true;
+ else
+ return false; /* line-by-line flush */
+}
+
+static inline void fimg2d_dma_sync_inner(unsigned long addr, size_t size, int dir)
+{
+ if (dir == DMA_TO_DEVICE)
+ dmac_map_area((void *)addr, size, dir);
+ else if (dir == DMA_BIDIRECTIONAL)
+ dmac_flush_range((void *)addr, (void *)(addr + size));
+}
+
+static inline void fimg2d_dma_unsync_inner(unsigned long addr, size_t size, int dir)
+{
+ if (dir == DMA_TO_DEVICE)
+ dmac_unmap_area((void *)addr, size, dir);
+}
+
+void fimg2d_clean_outer_pagetable(struct mm_struct *mm, unsigned long addr, size_t size);
+void fimg2d_dma_sync_outer(struct mm_struct *mm, unsigned long addr, size_t size, enum cache_opr opr);
+enum pt_status fimg2d_check_pagetable(struct mm_struct *mm, unsigned long addr, size_t size);
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_clk.c b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_clk.c
new file mode 100644
index 0000000..24a80ae
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_clk.c
@@ -0,0 +1,170 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/clk.h>
+#include <linux/atomic.h>
+#include <linux/sched.h>
+#include <plat/cpu.h>
+#include <plat/fimg2d.h>
+#include "fimg2d.h"
+#include "fimg2d_clk.h"
+
+void fimg2d_clk_on(struct fimg2d_control *info)
+{
+ spin_lock(&info->bltlock);
+ clk_enable(info->clock);
+ atomic_set(&info->clkon, 1);
+ spin_unlock(&info->bltlock);
+
+ fimg2d_debug("clock enable\n");
+}
+
+void fimg2d_clk_off(struct fimg2d_control *info)
+{
+ spin_lock(&info->bltlock);
+ atomic_set(&info->clkon, 0);
+ clk_disable(info->clock);
+ spin_unlock(&info->bltlock);
+
+ fimg2d_debug("clock disable\n");
+}
+
+void fimg2d_clk_save(struct fimg2d_control *info)
+{
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ struct fimg2d_platdata *pdata;
+ struct clk *sclk;
+
+ pdata = to_fimg2d_plat(info->dev);
+
+ spin_lock(&info->bltlock);
+ sclk = clk_get(info->dev, pdata->clkname);
+ clk_set_rate(sclk, 50*MHZ); /* 800MHz/16=50MHz */
+ spin_unlock(&info->bltlock);
+
+ fimg2d_debug("%s clkrate=%lu\n", pdata->clkname, clk_get_rate(sclk));
+ }
+}
+
+void fimg2d_clk_restore(struct fimg2d_control *info)
+{
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ struct fimg2d_platdata *pdata;
+ struct clk *sclk, *pclk;
+
+ pdata = to_fimg2d_plat(info->dev);
+
+ spin_lock(&info->bltlock);
+ sclk = clk_get(info->dev, pdata->clkname);
+ pclk = clk_get(NULL, "pclk_acp");
+ clk_set_rate(sclk, clk_get_rate(pclk) * 2);
+ spin_unlock(&info->bltlock);
+
+ fimg2d_debug("%s(%lu) pclk_acp(%lu)\n", pdata->clkname,
+ clk_get_rate(sclk), clk_get_rate(pclk));
+ }
+}
+
+void fimg2d_clk_dump(struct fimg2d_control *info)
+{
+ struct fimg2d_platdata *pdata;
+ struct clk *sclk, *pclk, *aclk;
+
+ pdata = to_fimg2d_plat(info->dev);
+
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ sclk = clk_get(info->dev, pdata->clkname);
+ pclk = clk_get(NULL, "pclk_acp");
+
+ printk(KERN_INFO "%s(%lu) pclk_acp(%lu)\n",
+ pdata->clkname,
+ clk_get_rate(sclk), clk_get_rate(pclk));
+ } else {
+ aclk = clk_get(NULL, "aclk_acp");
+ pclk = clk_get(NULL, "pclk_acp");
+
+ printk(KERN_INFO "aclk_acp(%lu) pclk_acp(%lu)\n",
+ clk_get_rate(aclk), clk_get_rate(pclk));
+ }
+}
+
+int fimg2d_clk_setup(struct fimg2d_control *info)
+{
+ struct fimg2d_platdata *pdata;
+ struct clk *parent, *sclk;
+ int ret = 0;
+
+ sclk = parent = NULL;
+ pdata = to_fimg2d_plat(info->dev);
+
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ /* clock for setting parent and rate */
+ parent = clk_get(info->dev, pdata->parent_clkname);
+ if (IS_ERR(parent)) {
+ printk(KERN_ERR "FIMG2D failed to get parent clk\n");
+ ret = -ENOENT;
+ goto err_clk1;
+ }
+ fimg2d_debug("parent clk: %s\n", pdata->parent_clkname);
+
+ sclk = clk_get(info->dev, pdata->clkname);
+ if (IS_ERR(sclk)) {
+ printk(KERN_ERR "FIMG2D failed to get sclk\n");
+ ret = -ENOENT;
+ goto err_clk2;
+ }
+ fimg2d_debug("sclk: %s\n", pdata->clkname);
+
+ if (clk_set_parent(sclk, parent))
+ printk(KERN_ERR "FIMG2D failed to set parent\n");
+
+ clk_set_rate(sclk, pdata->clkrate);
+ fimg2d_debug("clkrate: %ld parent clkrate: %ld\n",
+ clk_get_rate(sclk), clk_get_rate(parent));
+ } else {
+ fimg2d_debug("aclk_acp(%lu) pclk_acp(%lu)\n",
+ clk_get_rate(clk_get(NULL, "aclk_acp")),
+ clk_get_rate(clk_get(NULL, "pclk_acp")));
+ }
+
+ /* clock for gating */
+ info->clock = clk_get(info->dev, pdata->gate_clkname);
+ if (IS_ERR(info->clock)) {
+ printk(KERN_ERR "FIMG2D failed to get gate clk\n");
+ ret = -ENOENT;
+ goto err_clk3;
+ }
+ fimg2d_debug("gate clk: %s\n", pdata->gate_clkname);
+ return ret;
+
+err_clk3:
+ if (sclk)
+ clk_put(sclk);
+
+err_clk2:
+ if (parent)
+ clk_put(parent);
+
+err_clk1:
+ return ret;
+}
+
+void fimg2d_clk_release(struct fimg2d_control *info)
+{
+ clk_put(info->clock);
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ struct fimg2d_platdata *pdata;
+ pdata = to_fimg2d_plat(info->dev);
+ clk_put(clk_get(info->dev, pdata->clkname));
+ clk_put(clk_get(info->dev, pdata->parent_clkname));
+ }
+}
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_clk.h b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_clk.h
new file mode 100644
index 0000000..c3fbf67
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_clk.h
@@ -0,0 +1,26 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D_CLK_H__
+#define __FIMG2D_CLK_H__
+
+#include "fimg2d.h"
+
+int fimg2d_clk_setup(struct fimg2d_control *info);
+void fimg2d_clk_release(struct fimg2d_control *info);
+void fimg2d_clk_on(struct fimg2d_control *info);
+void fimg2d_clk_off(struct fimg2d_control *info);
+void fimg2d_clk_save(struct fimg2d_control *info);
+void fimg2d_clk_restore(struct fimg2d_control *info);
+void fimg2d_clk_dump(struct fimg2d_control *info);
+
+#endif /* __FIMG2D_CLK_H__ */
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c
new file mode 100644
index 0000000..26ea56b
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c
@@ -0,0 +1,368 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <plat/fimg2d.h>
+#include "fimg2d.h"
+#include "fimg2d_ctx.h"
+#include "fimg2d_cache.h"
+#include "fimg2d_helper.h"
+
+static int fimg2d_check_params(struct fimg2d_bltcmd *cmd)
+{
+ int w, h, i;
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *img;
+ struct fimg2d_scale *scl;
+ struct fimg2d_clip *clp;
+ struct fimg2d_rect *r;
+
+ /* dst is mandatory */
+ if (!cmd->image[IDST].addr.type)
+ return -1;
+
+ /* DST op makes no effect */
+ if (cmd->op < 0 || cmd->op == BLIT_OP_DST || cmd->op >= BLIT_OP_END)
+ return -1;
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ if (!img->addr.type)
+ continue;
+
+ w = img->width;
+ h = img->height;
+ r = &img->rect;
+
+ /* 8000: max width & height */
+ if (w > 8000 || h > 8000)
+ return -1;
+
+ if (r->x1 < 0 || r->y1 < 0 ||
+ r->x1 >= w || r->y1 >= h ||
+ r->x1 >= r->x2 || r->y1 >= r->y2)
+ return -1;
+ }
+
+ clp = &p->clipping;
+ if (clp->enable) {
+ img = &cmd->image[IDST];
+
+ w = img->width;
+ h = img->height;
+ r = &img->rect;
+
+ if (clp->x1 < 0 || clp->y1 < 0 ||
+ clp->x1 >= w || clp->y1 >= h ||
+ clp->x1 >= clp->x2 || clp->y1 >= clp->y2 ||
+ clp->x1 >= r->x2 || clp->x2 <= r->x1 ||
+ clp->y1 >= r->y2 || clp->y2 <= r->y1)
+ return -1;
+ }
+
+ scl = &p->scaling;
+ if (scl->mode) {
+ if (!scl->src_w || !scl->src_h || !scl->dst_w || !scl->dst_h)
+ return -1;
+ }
+
+ return 0;
+}
+
+static void fimg2d_fixup_params(struct fimg2d_bltcmd *cmd)
+{
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *img;
+ struct fimg2d_scale *scl;
+ struct fimg2d_clip *clp;
+ struct fimg2d_rect *r;
+ int i;
+
+ clp = &p->clipping;
+ scl = &p->scaling;
+
+ /* fix dst/clip rect */
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ if (!img->addr.type)
+ continue;
+
+ r = &img->rect;
+
+ if (i == IMAGE_DST && clp->enable) {
+ if (clp->x2 > img->width)
+ clp->x2 = img->width;
+ if (clp->y2 > img->height)
+ clp->y2 = img->height;
+ } else {
+ if (r->x2 > img->width)
+ r->x2 = img->width;
+ if (r->y2 > img->height)
+ r->y2 = img->height;
+ }
+ }
+
+ /* avoid devided-by-zero */
+ if (scl->mode &&
+ (scl->src_w == scl->dst_w && scl->src_h == scl->dst_h))
+ scl->mode = NO_SCALING;
+}
+
+static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd)
+{
+ struct mm_struct *mm = cmd->ctx->mm;
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *img;
+ struct fimg2d_clip *clp;
+ struct fimg2d_rect *r;
+ struct fimg2d_dma *c;
+ enum pt_status pt;
+ int clip_x, clip_w, clip_h, y, dir, i;
+ unsigned long clip_start;
+
+ clp = &p->clipping;
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ c = &cmd->dma[i];
+ r = &img->rect;
+
+ if (!img->addr.type)
+ continue;
+
+ /* caculate horizontally clipped region */
+ if (i == IMAGE_DST && clp->enable) {
+ c->addr = img->addr.start + (img->stride * clp->y1);
+ c->size = img->stride * (clp->y2 - clp->y1);
+ } else {
+ c->addr = img->addr.start + (img->stride * r->y1);
+ c->size = img->stride * (r->y2 - r->y1);
+ }
+
+ /* check pagetable */
+ if (img->addr.type == ADDR_USER) {
+ pt = fimg2d_check_pagetable(mm, c->addr, c->size);
+ if (pt == PT_FAULT)
+ return -1;
+ }
+
+ if (img->need_cacheopr && i != IMAGE_TMP) {
+ c->cached = c->size;
+ cmd->dma_all += c->cached;
+ }
+ }
+
+#ifdef PERF_PROFILE
+ perf_start(cmd->ctx, PERF_INNERCACHE);
+#endif
+
+ if (is_inner_flushall(cmd->dma_all))
+ flush_all_cpu_caches();
+ else {
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ c = &cmd->dma[i];
+ r = &img->rect;
+
+ if (!img->addr.type || !c->cached)
+ continue;
+
+ if (i == IMAGE_DST)
+ dir = DMA_BIDIRECTIONAL;
+ else
+ dir = DMA_TO_DEVICE;
+
+ if (i == IDST && clp->enable) {
+ clip_w = width2bytes(clp->x2 - clp->x1,
+ img->fmt);
+ clip_x = pixel2offset(clp->x1, img->fmt);
+ clip_h = clp->y2 - clp->y1;
+ } else {
+ clip_w = width2bytes(r->x2 - r->x1, img->fmt);
+ clip_x = pixel2offset(r->x1, img->fmt);
+ clip_h = r->y2 - r->y1;
+ }
+
+ if (is_inner_flushrange(img->stride - clip_w))
+ fimg2d_dma_sync_inner(c->addr, c->cached, dir);
+ else {
+ for (y = 0; y < clip_h; y++) {
+ clip_start = c->addr +
+ (img->stride * y) + clip_x;
+ fimg2d_dma_sync_inner(clip_start,
+ clip_w, dir);
+ }
+ }
+ }
+ }
+#ifdef PERF_PROFILE
+ perf_end(cmd->ctx, PERF_INNERCACHE);
+#endif
+
+#ifdef CONFIG_OUTER_CACHE
+#ifdef PERF_PROFILE
+ perf_start(cmd->ctx, PERF_OUTERCACHE);
+#endif
+ if (is_outer_flushall(cmd->dma_all))
+ outer_flush_all();
+ else {
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ c = &cmd->dma[i];
+ r = &img->rect;
+
+ if (!img->addr.type)
+ continue;
+
+ /* clean pagetable */
+ if (img->addr.type == ADDR_USER)
+ fimg2d_clean_outer_pagetable(mm, c->addr, c->size);
+
+ if (!c->cached)
+ continue;
+
+ if (i == IMAGE_DST)
+ dir = CACHE_FLUSH;
+ else
+ dir = CACHE_CLEAN;
+
+ if (i == IDST && clp->enable) {
+ clip_w = width2bytes(clp->x2 - clp->x1,
+ img->fmt);
+ clip_x = pixel2offset(clp->x1, img->fmt);
+ clip_h = clp->y2 - clp->y1;
+ } else {
+ clip_w = width2bytes(r->x2 - r->x1, img->fmt);
+ clip_x = pixel2offset(r->x1, img->fmt);
+ clip_h = r->y2 - r->y1;
+ }
+
+ if (is_outer_flushrange(img->stride - clip_w))
+ fimg2d_dma_sync_outer(mm, c->addr,
+ c->cached, dir);
+ else {
+ for (y = 0; y < clip_h; y++) {
+ clip_start = c->addr +
+ (img->stride * y) + clip_x;
+ fimg2d_dma_sync_outer(mm, clip_start,
+ clip_w, dir);
+ }
+ }
+ }
+ }
+#ifdef PERF_PROFILE
+ perf_end(cmd->ctx, PERF_OUTERCACHE);
+#endif
+#endif
+
+ return 0;
+}
+
+int fimg2d_add_command(struct fimg2d_control *info, struct fimg2d_context *ctx,
+ struct fimg2d_blit *blit, enum addr_space type)
+{
+ int i, ret;
+ struct fimg2d_image *buf[MAX_IMAGES] = image_table(blit);
+ struct fimg2d_bltcmd *cmd;
+
+ if ((blit->dst) && (type == ADDR_USER))
+ up_write(&page_alloc_slow_rwsem);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+
+ if (!cmd) {
+ if ((blit->dst) && (type == ADDR_USER))
+ if (!down_write_trylock(&page_alloc_slow_rwsem))
+ return -EAGAIN;
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ if (!buf[i])
+ continue;
+
+ if (copy_from_user(&cmd->image[i], buf[i],
+ sizeof(struct fimg2d_image))) {
+ if ((blit->dst) && (type == ADDR_USER))
+ if (!down_write_trylock(&page_alloc_slow_rwsem))
+ return -EAGAIN;
+ ret = -EFAULT;
+ goto err_user;
+ }
+ }
+
+ if ((blit->dst) && (type == ADDR_USER))
+ if (!down_write_trylock(&page_alloc_slow_rwsem))
+ return -EAGAIN;
+
+ cmd->ctx = ctx;
+ cmd->op = blit->op;
+ cmd->sync = blit->sync;
+ cmd->seq_no = blit->seq_no;
+ memcpy(&cmd->param, &blit->param, sizeof(cmd->param));
+
+#ifdef CONFIG_VIDEO_FIMG2D_DEBUG
+ fimg2d_dump_command(cmd);
+#endif
+
+ if (fimg2d_check_params(cmd)) {
+ printk(KERN_ERR "[%s] invalid params\n", __func__);
+ fimg2d_dump_command(cmd);
+ ret = -EINVAL;
+ goto err_user;
+ }
+
+ fimg2d_fixup_params(cmd);
+
+ if (fimg2d_check_dma_sync(cmd)) {
+ ret = -EFAULT;
+ goto err_user;
+ }
+
+ /* add command node and increase ncmd */
+ spin_lock(&info->bltlock);
+ if (atomic_read(&info->suspended)) {
+ fimg2d_debug("fimg2d suspended, do sw fallback\n");
+ spin_unlock(&info->bltlock);
+ ret = -EFAULT;
+ goto err_user;
+ }
+ atomic_inc(&ctx->ncmd);
+ fimg2d_enqueue(&cmd->node, &info->cmd_q);
+ fimg2d_debug("ctx %p pgd %p ncmd(%d) seq_no(%u)\n",
+ cmd->ctx, (unsigned long *)cmd->ctx->mm->pgd,
+ atomic_read(&ctx->ncmd), cmd->seq_no);
+ spin_unlock(&info->bltlock);
+
+ return 0;
+
+err_user:
+ kfree(cmd);
+ return ret;
+}
+
+void fimg2d_add_context(struct fimg2d_control *info, struct fimg2d_context *ctx)
+{
+ atomic_set(&ctx->ncmd, 0);
+ init_waitqueue_head(&ctx->wait_q);
+
+ atomic_inc(&info->nctx);
+ fimg2d_debug("ctx %p nctx(%d)\n", ctx, atomic_read(&info->nctx));
+}
+
+void fimg2d_del_context(struct fimg2d_control *info, struct fimg2d_context *ctx)
+{
+ atomic_dec(&info->nctx);
+ fimg2d_debug("ctx %p nctx(%d)\n", ctx, atomic_read(&info->nctx));
+}
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.h b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.h
new file mode 100644
index 0000000..995303f
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.h
@@ -0,0 +1,42 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include "fimg2d.h"
+#include "fimg2d_helper.h"
+
+static inline void fimg2d_enqueue(struct list_head *node, struct list_head *q)
+{
+ list_add_tail(node, q);
+}
+
+static inline void fimg2d_dequeue(struct list_head *node)
+{
+ list_del(node);
+}
+
+static inline int fimg2d_queue_is_empty(struct list_head *q)
+{
+ return list_empty(q);
+}
+
+static inline struct fimg2d_bltcmd *fimg2d_get_first_command(struct fimg2d_control *info)
+{
+ if (list_empty(&info->cmd_q))
+ return NULL;
+ else
+ return list_first_entry(&info->cmd_q, struct fimg2d_bltcmd, node);
+}
+
+void fimg2d_add_context(struct fimg2d_control *info, struct fimg2d_context *ctx);
+void fimg2d_del_context(struct fimg2d_control *info, struct fimg2d_context *ctx);
+int fimg2d_add_command(struct fimg2d_control *info, struct fimg2d_context *ctx,
+ struct fimg2d_blit *blit, enum addr_space type);
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_drv.c b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_drv.c
new file mode 100644
index 0000000..6ae4d6e
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_drv.c
@@ -0,0 +1,515 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_drv.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <asm/cacheflush.h>
+#include <plat/cpu.h>
+#include <plat/fimg2d.h>
+#include <plat/s5p-sysmmu.h>
+#include <mach/dev.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include "fimg2d.h"
+#include "fimg2d_clk.h"
+#include "fimg2d_ctx.h"
+#include "fimg2d_helper.h"
+
+#define CTX_TIMEOUT msecs_to_jiffies(1000)
+
+static struct fimg2d_control *info;
+
+static void fimg2d_worker(struct work_struct *work)
+{
+ fimg2d_debug("start kernel thread\n");
+ info->blit(info);
+}
+
+static DECLARE_WORK(fimg2d_work, fimg2d_worker);
+
+/**
+ * @irq: irq number
+ * @dev_id: pointer to private data
+ */
+static irqreturn_t fimg2d_irq(int irq, void *dev_id)
+{
+ fimg2d_debug("irq\n");
+ info->stop(info);
+
+ return IRQ_HANDLED;
+}
+
+static int fimg2d_sysmmu_fault_handler(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
+ unsigned long pgtable_base, unsigned long fault_addr)
+{
+ struct fimg2d_bltcmd *cmd;
+
+ if (itype == SYSMMU_PAGEFAULT) {
+ printk(KERN_ERR "[%s] sysmmu page fault(0x%lx), pgd(0x%lx)\n",
+ __func__, fault_addr, pgtable_base);
+ } else {
+ printk(KERN_ERR "[%s] sysmmu interrupt "
+ "type(%d) pgd(0x%lx) addr(0x%lx)\n",
+ __func__, itype, pgtable_base, fault_addr);
+ }
+
+ cmd = fimg2d_get_first_command(info);
+ if (!cmd) {
+ printk(KERN_ERR "[%s] null command\n", __func__);
+ goto next;
+ }
+
+ if (cmd->ctx->mm->pgd != phys_to_virt(pgtable_base)) {
+ printk(KERN_ERR "[%s] pgtable base is different from current command\n",
+ __func__);
+ goto next;
+ }
+
+ fimg2d_dump_command(cmd);
+
+next:
+ fimg2d_clk_dump(info);
+ info->dump(info);
+
+ BUG();
+ return 0;
+}
+
+static void fimg2d_context_wait(struct fimg2d_context *ctx)
+{
+ while (atomic_read(&ctx->ncmd)) {
+ if (!wait_event_timeout(ctx->wait_q, !atomic_read(&ctx->ncmd), CTX_TIMEOUT)) {
+ atomic_set(&info->active, 1);
+ queue_work(info->work_q, &fimg2d_work);
+ printk(KERN_ERR "[%s] ctx %p cmd wait timeout\n", __func__, ctx);
+ if (info->err)
+ break;
+ }
+ }
+}
+
+static void fimg2d_request_bitblt(struct fimg2d_context *ctx)
+{
+ spin_lock(&info->bltlock);
+ if (!atomic_read(&info->active)) {
+ atomic_set(&info->active, 1);
+ fimg2d_debug("dispatch ctx %p to kernel thread\n", ctx);
+ queue_work(info->work_q, &fimg2d_work);
+ }
+ spin_unlock(&info->bltlock);
+ fimg2d_context_wait(ctx);
+}
+
+static int fimg2d_open(struct inode *inode, struct file *file)
+{
+ struct fimg2d_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ printk(KERN_ERR "[%s] not enough memory for ctx\n", __func__);
+ return -ENOMEM;
+ }
+ file->private_data = (void *)ctx;
+
+ ctx->mm = current->mm;
+ fimg2d_debug("ctx %p current pgd %p init_mm pgd %p\n",
+ ctx, (unsigned long *)ctx->mm->pgd,
+ (unsigned long *)init_mm.pgd);
+
+ fimg2d_add_context(info, ctx);
+ return 0;
+}
+
+static int fimg2d_release(struct inode *inode, struct file *file)
+{
+ struct fimg2d_context *ctx = file->private_data;
+
+ fimg2d_debug("ctx %p\n", ctx);
+ while (1) {
+ if (!atomic_read(&ctx->ncmd))
+ break;
+
+ mdelay(2);
+ }
+ fimg2d_del_context(info, ctx);
+
+ kfree(ctx);
+ return 0;
+}
+
+static int fimg2d_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+static unsigned int fimg2d_poll(struct file *file, struct poll_table_struct *wait)
+{
+ return 0;
+}
+
+static long fimg2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct fimg2d_context *ctx;
+ struct fimg2d_platdata *pdata;
+ struct fimg2d_blit blit;
+ struct fimg2d_version ver;
+ struct fimg2d_image dst;
+
+ ctx = file->private_data;
+ if (!ctx) {
+ printk(KERN_ERR "[%s] missing ctx\n", __func__);
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case FIMG2D_BITBLT_BLIT:
+ if (info->err) {
+ printk(KERN_ERR "[%s] device error, do sw fallback\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&blit, (void *)arg, sizeof(blit)))
+ return -EFAULT;
+ if (blit.dst)
+ if (copy_from_user(&dst, (void *)blit.dst, sizeof(dst)))
+ return -EFAULT;
+
+#ifdef CONFIG_BUSFREQ_OPP
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ dev_lock(info->bus_dev, info->dev, 160160);
+#endif
+#endif
+ if ((blit.dst) && (dst.addr.type == ADDR_USER))
+ if (!down_write_trylock(&page_alloc_slow_rwsem))
+ ret = -EAGAIN;
+
+ if (ret != -EAGAIN)
+ ret = fimg2d_add_command(info, ctx, &blit, dst.addr.type);
+
+ if (!ret) {
+ fimg2d_request_bitblt(ctx);
+ }
+
+#ifdef PERF_PROFILE
+ perf_print(ctx, blit.seq_no);
+ perf_clear(ctx);
+#endif
+ if ((blit.dst) && (dst.addr.type == ADDR_USER) && ret != -EAGAIN)
+ up_write(&page_alloc_slow_rwsem);
+
+#ifdef CONFIG_BUSFREQ_OPP
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ dev_unlock(info->bus_dev, info->dev);
+#endif
+#endif
+ break;
+
+ case FIMG2D_BITBLT_SYNC:
+ fimg2d_debug("FIMG2D_BITBLT_SYNC ctx: %p\n", ctx);
+ /* FIXME: */
+ break;
+
+ case FIMG2D_BITBLT_VERSION:
+ pdata = to_fimg2d_plat(info->dev);
+ ver.hw = pdata->hw_ver;
+ ver.sw = 0;
+ fimg2d_debug("fimg2d version, hw: 0x%x sw: 0x%x\n",
+ ver.hw, ver.sw);
+ if (copy_to_user((void *)arg, &ver, sizeof(ver)))
+ return -EFAULT;
+ break;
+
+ default:
+ printk(KERN_ERR "[%s] unknown ioctl\n", __func__);
+ ret = -EFAULT;
+ break;
+ }
+
+ return ret;
+}
+
+/* fops */
+static const struct file_operations fimg2d_fops = {
+ .owner = THIS_MODULE,
+ .open = fimg2d_open,
+ .release = fimg2d_release,
+ .mmap = fimg2d_mmap,
+ .poll = fimg2d_poll,
+ .unlocked_ioctl = fimg2d_ioctl,
+};
+
+/* miscdev */
+static struct miscdevice fimg2d_dev = {
+ .minor = FIMG2D_MINOR,
+ .name = "fimg2d",
+ .fops = &fimg2d_fops,
+};
+
+static int fimg2d_setup_controller(struct fimg2d_control *info)
+{
+ atomic_set(&info->suspended, 0);
+ atomic_set(&info->clkon, 0);
+ atomic_set(&info->busy, 0);
+ atomic_set(&info->nctx, 0);
+ atomic_set(&info->active, 0);
+
+ spin_lock_init(&info->bltlock);
+
+ INIT_LIST_HEAD(&info->cmd_q);
+ init_waitqueue_head(&info->wait_q);
+ fimg2d_register_ops(info);
+
+ info->work_q = create_singlethread_workqueue("kfimg2dd");
+ if (!info->work_q)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int fimg2d_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct fimg2d_platdata *pdata;
+ int ret;
+
+ pdata = to_fimg2d_plat(&pdev->dev);
+ if (!pdata) {
+ printk(KERN_ERR "FIMG2D failed to get platform data\n");
+ ret = -ENOMEM;
+ goto err_plat;
+ }
+
+ /* global structure */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "FIMG2D failed to allocate memory for controller\n");
+ ret = -ENOMEM;
+ goto err_plat;
+ }
+
+ /* setup global info */
+ ret = fimg2d_setup_controller(info);
+ if (ret) {
+ printk(KERN_ERR "FIMG2D failed to setup controller\n");
+ goto err_setup;
+ }
+ info->dev = &pdev->dev;
+
+ /* memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ printk(KERN_ERR "FIMG2D failed to get resource\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ info->mem = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (!info->mem) {
+ printk(KERN_ERR "FIMG2D failed to request memory region\n");
+ ret = -ENOMEM;
+ goto err_region;
+ }
+
+ /* ioremap */
+ info->regs = ioremap(res->start, resource_size(res));
+ if (!info->regs) {
+ printk(KERN_ERR "FIMG2D failed to ioremap for SFR\n");
+ ret = -ENOENT;
+ goto err_map;
+ }
+ fimg2d_debug("device name: %s base address: 0x%lx\n",
+ pdev->name, (unsigned long)res->start);
+
+ /* irq */
+ info->irq = platform_get_irq(pdev, 0);
+ if (!info->irq) {
+ printk(KERN_ERR "FIMG2D failed to get irq resource\n");
+ ret = -ENOENT;
+ goto err_map;
+ }
+ fimg2d_debug("irq: %d\n", info->irq);
+
+ ret = request_irq(info->irq, fimg2d_irq, IRQF_DISABLED, pdev->name, info);
+ if (ret) {
+ printk(KERN_ERR "FIMG2D failed to request irq\n");
+ ret = -ENOENT;
+ goto err_irq;
+ }
+
+ ret = fimg2d_clk_setup(info);
+ if (ret) {
+ printk(KERN_ERR "FIMG2D failed to setup clk\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_enable(info->dev);
+ fimg2d_debug("enable runtime pm\n");
+#endif
+
+#ifdef CONFIG_BUSFREQ_OPP
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ /* To lock bus frequency in OPP mode */
+ info->bus_dev = dev_get("exynos-busfreq");
+#endif
+#endif
+ s5p_sysmmu_set_fault_handler(info->dev, fimg2d_sysmmu_fault_handler);
+ fimg2d_debug("register sysmmu page fault handler\n");
+
+ /* misc register */
+ ret = misc_register(&fimg2d_dev);
+ if (ret) {
+ printk(KERN_ERR "FIMG2D failed to register misc driver\n");
+ goto err_reg;
+ }
+
+ printk(KERN_INFO "Samsung Graphics 2D driver, (c) 2011 Samsung Electronics\n");
+ return 0;
+
+err_reg:
+ fimg2d_clk_release(info);
+
+err_clk:
+ free_irq(info->irq, NULL);
+
+err_irq:
+ iounmap(info->regs);
+
+err_map:
+ kfree(info->mem);
+
+err_region:
+ release_resource(info->mem);
+
+err_res:
+ destroy_workqueue(info->work_q);
+
+err_setup:
+ kfree(info);
+
+err_plat:
+ return ret;
+}
+
+static int fimg2d_remove(struct platform_device *pdev)
+{
+ free_irq(info->irq, NULL);
+
+ if (info->mem) {
+ iounmap(info->regs);
+ release_resource(info->mem);
+ kfree(info->mem);
+ }
+
+ destroy_workqueue(info->work_q);
+ misc_deregister(&fimg2d_dev);
+ kfree(info);
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(&pdev->dev);
+ fimg2d_debug("disable runtime pm\n");
+#endif
+
+ return 0;
+}
+
+static int fimg2d_suspend(struct device *dev)
+{
+ fimg2d_debug("suspend... start\n");
+ atomic_set(&info->suspended, 1);
+ while (1) {
+ if (fimg2d_queue_is_empty(&info->cmd_q))
+ break;
+
+ mdelay(2);
+ }
+ fimg2d_debug("suspend... done\n");
+ return 0;
+}
+
+static int fimg2d_resume(struct device *dev)
+{
+ fimg2d_debug("resume... start\n");
+ atomic_set(&info->suspended, 0);
+ fimg2d_debug("resume... done\n");
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimg2d_runtime_suspend(struct device *dev)
+{
+ fimg2d_debug("runtime suspend... done\n");
+ return 0;
+}
+
+static int fimg2d_runtime_resume(struct device *dev)
+{
+ fimg2d_debug("runtime resume... done\n");
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fimg2d_pm_ops = {
+ .suspend = fimg2d_suspend,
+ .resume = fimg2d_resume,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = fimg2d_runtime_suspend,
+ .runtime_resume = fimg2d_runtime_resume,
+#endif
+};
+
+static struct platform_driver fimg2d_driver = {
+ .probe = fimg2d_probe,
+ .remove = fimg2d_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s5p-fimg2d",
+ .pm = &fimg2d_pm_ops,
+ },
+};
+
+static int __init fimg2d_register(void)
+{
+ return platform_driver_register(&fimg2d_driver);
+}
+
+static void __exit fimg2d_unregister(void)
+{
+ platform_driver_unregister(&fimg2d_driver);
+}
+
+module_init(fimg2d_register);
+module_exit(fimg2d_unregister);
+
+MODULE_AUTHOR("Eunseok Choi <es10.choi@samsung.com>");
+MODULE_AUTHOR("Jinsung Yang <jsgood.yang@samsung.com>");
+MODULE_DESCRIPTION("Samsung Graphics 2D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_helper.c b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_helper.c
new file mode 100644
index 0000000..9346d1b
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_helper.c
@@ -0,0 +1,182 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include "fimg2d.h"
+#include "fimg2d_cache.h"
+#include "fimg2d_helper.h"
+
+static int bpptable[MSK_FORMAT_END+1] = {
+ 32, 32, 16, 16, 16, 16, 16, 24, /* rgb */
+ 8, 8, 8, 8, 8, 0, /* yuv */
+ 1, 4, 8, 16, 16, 16, 32, 0, /* msk */
+};
+
+int pixel2offset(int pixel, enum color_format cf)
+{
+ return (pixel * bpptable[cf]) >> 3;
+}
+
+int width2bytes(int width, enum color_format cf)
+{
+ int bpp = bpptable[cf];
+
+ switch (bpp) {
+ case 1:
+ return (width + 7) >> 3;
+ case 4:
+ return (width + 1) >> 1;
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ return width * bpp >> 3;
+ default:
+ return 0;
+ }
+}
+
+void perf_print(struct fimg2d_context *ctx, int seq_no)
+{
+ int i;
+ long time;
+ struct fimg2d_perf *perf;
+
+ for (i = 0; i < MAX_PERF_DESCS; i++) {
+ perf = &ctx->perf[i];
+ if (perf->valid != 0x11)
+ continue;
+ time = elapsed_usec(ctx, i);
+ printk(KERN_INFO "[FIMG2D PERF (%8s)] ctx(0x%08x) seq(%d) "
+ "%8ld usec\n",
+ perfname(i), (unsigned int)ctx, seq_no, time);
+ }
+ printk(KERN_INFO "[FIMG2D PERF **]\n");
+}
+
+void fimg2d_print_params(struct fimg2d_blit __user *u)
+{
+ int i;
+ struct fimg2d_param *p = &u->param;
+ struct fimg2d_image *img, *buf[MAX_IMAGES] = image_table(u);
+ struct fimg2d_rect *r;
+
+ printk(KERN_INFO "op: %d\n", u->op);
+ printk(KERN_INFO "solid color: 0x%lx\n", p->solid_color);
+ printk(KERN_INFO "g_alpha: 0x%x\n", p->g_alpha);
+ printk(KERN_INFO "premultiplied: %d\n", p->premult);
+ printk(KERN_INFO "dither: %d\n", p->dither);
+ printk(KERN_INFO "rotate: %d\n", p->rotate);
+ printk(KERN_INFO "repeat mode: %d, pad color: 0x%lx\n",
+ p->repeat.mode, p->repeat.pad_color);
+ printk(KERN_INFO "bluescreen mode: %d, bs_color: 0x%lx "
+ "bg_color: 0x%lx\n",
+ p->bluscr.mode,
+ p->bluscr.bs_color, p->bluscr.bg_color);
+ printk(KERN_INFO "scaling mode: %d, src:%d,%d dst:%d,%d\n",
+ p->scaling.mode,
+ p->scaling.src_w, p->scaling.src_h,
+ p->scaling.dst_w, p->scaling.dst_h);
+ printk(KERN_INFO "clipping mode: %d, LT(%d,%d) RB(%d,%d)\n",
+ p->clipping.enable,
+ p->clipping.x1, p->clipping.y1,
+ p->clipping.x2, p->clipping.y2);
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ if (!buf[i])
+ continue;
+
+ img = buf[i];
+ r = &img->rect;
+
+ printk(KERN_INFO "%s type: %d addr: 0x%lx\n",
+ imagename(i), img->addr.type,
+ img->addr.start);
+ printk(KERN_INFO "%s width: %d height: %d "
+ "stride: %d order: %d format: %d\n",
+ imagename(i), img->width, img->height,
+ img->stride, img->order, img->fmt);
+ printk(KERN_INFO "%s rect LT(%d,%d) RB(%d,%d) WH(%d,%d)\n",
+ imagename(i), r->x1, r->y1, r->x2, r->y2,
+ rect_w(r), rect_h(r));
+ printk(KERN_INFO "%s cacheopr: %d\n",
+ imagename(i), img->need_cacheopr);
+ }
+}
+
+void fimg2d_dump_command(struct fimg2d_bltcmd *cmd)
+{
+ int i;
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *img;
+ struct fimg2d_rect *r;
+ struct fimg2d_dma *c;
+
+ printk(KERN_INFO " op: %d\n", cmd->op);
+ printk(KERN_INFO " solid color: 0x%lx\n", p->solid_color);
+ printk(KERN_INFO " g_alpha: 0x%x\n", p->g_alpha);
+ printk(KERN_INFO " premultiplied: %d\n", p->premult);
+ if (p->dither)
+ printk(KERN_INFO " dither: %d\n", p->dither);
+ if (p->rotate)
+ printk(KERN_INFO " rotate: %d\n", p->rotate);
+ if (p->repeat.mode) {
+ printk(KERN_INFO " repeat mode: %d, pad color: 0x%lx\n",
+ p->repeat.mode, p->repeat.pad_color);
+ }
+ if (p->bluscr.mode) {
+ printk(KERN_INFO " bluescreen mode: %d, bs_color: 0x%lx "
+ "bg_color: 0x%lx\n",
+ p->bluscr.mode, p->bluscr.bs_color,
+ p->bluscr.bg_color);
+ }
+ if (p->scaling.mode) {
+ printk(KERN_INFO " scaling mode: %d, s:%d,%d d:%d,%d\n",
+ p->scaling.mode,
+ p->scaling.src_w, p->scaling.src_h,
+ p->scaling.dst_w, p->scaling.dst_h);
+ }
+ if (p->clipping.enable) {
+ printk(KERN_INFO " clipping mode: %d, LT(%d,%d) RB(%d,%d)\n",
+ p->clipping.enable,
+ p->clipping.x1, p->clipping.y1,
+ p->clipping.x2, p->clipping.y2);
+ }
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ if (!img->addr.type)
+ continue;
+
+ c = &cmd->dma[i];
+ r = &img->rect;
+
+ printk(KERN_INFO " %s type: %d addr: 0x%lx\n",
+ imagename(i), img->addr.type,
+ img->addr.start);
+ printk(KERN_INFO " %s width: %d height: %d "
+ "stride: %d order: %d format: %d\n",
+ imagename(i), img->width, img->height,
+ img->stride, img->order, img->fmt);
+ printk(KERN_INFO " %s rect LT(%d,%d) RB(%d,%d) WH(%d,%d)\n",
+ imagename(i), r->x1, r->y1, r->x2, r->y2,
+ rect_w(r), rect_h(r));
+ printk(KERN_INFO " %s dma addr: 0x%lx "
+ "size: 0x%x cached: 0x%x\n",
+ imagename(i), c->addr, c->size, c->cached);
+ }
+
+ if (cmd->dma_all) {
+ printk(KERN_INFO " dma size all: 0x%x bytes\n", cmd->dma_all);
+ printk(KERN_INFO " ctx: %p seq_no(%u) sync(%d)\n",
+ cmd->ctx, cmd->seq_no, cmd->sync);
+ }
+}
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_helper.h b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_helper.h
new file mode 100644
index 0000000..ebe9eae
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_helper.h
@@ -0,0 +1,151 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D_HELPER_H
+#define __FIMG2D_HELPER_H
+
+#include <linux/sched.h>
+#include "fimg2d.h"
+
+static inline char *perfname(enum perf_desc id)
+{
+ switch (id) {
+ case PERF_INNERCACHE:
+ return "INNER$";
+ case PERF_OUTERCACHE:
+ return "OUTER$";
+ case PERF_BLIT:
+ return "BITBLT";
+ default:
+ return "";
+ }
+}
+
+static inline char *imagename(enum image_object image)
+{
+ switch (image) {
+ case IDST:
+ return "DST";
+ case ISRC:
+ return "SRC";
+ case IMSK:
+ return "MSK";
+ default:
+ return NULL;
+ }
+}
+
+static inline int is_opaque(enum color_format fmt)
+{
+ switch (fmt) {
+ case CF_ARGB_8888:
+ case CF_ARGB_1555:
+ case CF_ARGB_4444:
+ return 0;
+
+ case CF_XRGB_8888:
+ case CF_XRGB_1555:
+ case CF_XRGB_4444:
+ return 1;
+
+ case CF_RGB_565:
+ case CF_RGB_888:
+ return 1;
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+static inline unsigned int rect_w(struct fimg2d_rect *r)
+{
+ return r->x2 - r->x1;
+}
+
+static inline unsigned int rect_h(struct fimg2d_rect *r)
+{
+ return r->y2 - r->y1;
+}
+
+static inline long elapsed_usec(struct fimg2d_context *ctx, enum perf_desc desc)
+{
+ struct fimg2d_perf *perf = &ctx->perf[desc];
+#ifdef PERF_TIMEVAL
+ struct timeval *start = &perf->start;
+ struct timeval *end = &perf->end;
+ long sec, usec;
+
+ sec = end->tv_sec - start->tv_sec;
+ if (end->tv_usec >= start->tv_usec) {
+ usec = end->tv_usec - start->tv_usec;
+ } else {
+ usec = end->tv_usec + 1000000 - start->tv_usec;
+ sec--;
+ }
+ return sec * 1000000 + usec;
+#else
+ return (long)(perf->end - perf->start)/1000;
+#endif
+}
+
+static inline void perf_start(struct fimg2d_context *ctx, enum perf_desc desc)
+{
+ struct fimg2d_perf *perf = &ctx->perf[desc];
+
+ if (!perf->valid) {
+#ifdef PERF_TIMEVAL
+ struct timeval time;
+ do_gettimeofday(&time);
+ perf->start = time;
+#else
+ long time;
+ perf->start = sched_clock();
+ time = perf->start / 1000;
+#endif
+ perf->valid = 0x01;
+ }
+}
+
+static inline void perf_end(struct fimg2d_context *ctx, enum perf_desc desc)
+{
+ struct fimg2d_perf *perf = &ctx->perf[desc];
+
+ if (perf->valid == 0x01) {
+#ifdef PERF_TIMEVAL
+ struct timeval time;
+ do_gettimeofday(&time);
+ perf->end = time;
+#else
+ long time;
+ perf->end = sched_clock();
+ time = perf->end / 1000;
+#endif
+ perf->valid |= 0x10;
+ }
+}
+
+static inline void perf_clear(struct fimg2d_context *ctx)
+{
+ int i;
+ for (i = 0; i < MAX_PERF_DESCS; i++)
+ ctx->perf[i].valid = 0;
+}
+
+int pixel2offset(int pixel, enum color_format cf);
+int width2bytes(int width, enum color_format cf);
+void perf_print(struct fimg2d_context *ctx, int seq_no);
+void fimg2d_print_params(struct fimg2d_blit __user *u);
+void fimg2d_dump_command(struct fimg2d_bltcmd *cmd);
+
+#endif /* __FIMG2D_HELPER_H */
diff --git a/drivers/media/video/samsung/fimg2d4x/Kconfig b/drivers/media/video/samsung/fimg2d4x/Kconfig
new file mode 100644
index 0000000..fc6430c
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/Kconfig
@@ -0,0 +1,23 @@
+# drivers/media/video/samsung/fimg2d4x/Kconfig
+#
+# Copyright (c) 2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+config VIDEO_FIMG2D4X
+ bool "Samsung Graphics 2D Driver"
+ select VIDEO_FIMG2D
+ depends on VIDEO_SAMSUNG && (CPU_EXYNOS4212 || CPU_EXYNOS4412 || CPU_EXYNOS5210 || CPU_EXYNOS5250)
+ default n
+ ---help---
+ This is a graphics 2D (FIMG2D 4.x) driver for Samsung ARM based SoC.
+
+config VIDEO_FIMG2D4X_DEBUG
+ bool "Enables FIMG2D debug messages"
+ select VIDEO_FIMG2D_DEBUG
+ depends on VIDEO_FIMG2D4X
+ default n
+ ---help---
+ This enables FIMG2D driver debug messages.
+
diff --git a/drivers/media/video/samsung/fimg2d4x/Makefile b/drivers/media/video/samsung/fimg2d4x/Makefile
new file mode 100644
index 0000000..40b93a9
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/Makefile
@@ -0,0 +1,18 @@
+# drivers/media/video/samsung/fimg2d4x/Makefile
+#
+# Copyright (c) 2011 Samsung Electronics Co., Ltd.
+# http://www.samsung.com/
+#
+# Licensed under GPLv2
+
+obj-y :=
+obj-m :=
+obj-n :=
+obj- :=
+
+obj-$(CONFIG_VIDEO_FIMG2D) += fimg2d_drv.o fimg2d_ctx.o fimg2d_cache.o fimg2d_clk.o fimg2d_helper.o
+obj-$(CONFIG_VIDEO_FIMG2D4X) += fimg2d4x_blt.o fimg2d4x_hw.o
+
+ifeq ($(CONFIG_VIDEO_FIMG2D_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d.h b/drivers/media/video/samsung/fimg2d4x/fimg2d.h
new file mode 100644
index 0000000..250dffd
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d.h
@@ -0,0 +1,514 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D_H
+#define __FIMG2D_H __FILE__
+
+#ifdef __KERNEL__
+
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#define FIMG2D_MINOR (240)
+#define to_fimg2d_plat(d) (to_platform_device(d)->dev.platform_data)
+
+#ifdef CONFIG_VIDEO_FIMG2D_DEBUG
+#define fimg2d_debug(fmt, arg...) printk(KERN_INFO "[%s] " fmt, __func__, ## arg)
+#else
+#define fimg2d_debug(fmt, arg...) do { } while (0)
+#endif
+
+#endif /* __KERNEL__ */
+
+/* ioctl commands */
+#define FIMG2D_IOCTL_MAGIC 'F'
+#define FIMG2D_BITBLT_BLIT _IOWR(FIMG2D_IOCTL_MAGIC, 0, struct fimg2d_blit)
+#define FIMG2D_BITBLT_SYNC _IOW(FIMG2D_IOCTL_MAGIC, 1, int)
+#define FIMG2D_BITBLT_VERSION _IOR(FIMG2D_IOCTL_MAGIC, 2, struct fimg2d_version)
+
+struct fimg2d_version {
+ unsigned int hw;
+ unsigned int sw;
+};
+
+/**
+ * @BLIT_SYNC: sync mode, to wait for blit done irq
+ * @BLIT_ASYNC: async mode, not to wait for blit done irq
+ *
+ */
+enum blit_sync {
+ BLIT_SYNC,
+ BLIT_ASYNC,
+};
+
+/**
+ * @ADDR_PHYS: physical address
+ * @ADDR_USER: user virtual address (physically Non-contiguous)
+ * @ADDR_USER_CONTIG: user virtual address (physically Contiguous)
+ * @ADDR_DEVICE: specific device virtual address
+ */
+enum addr_space {
+ ADDR_NONE,
+ ADDR_PHYS,
+ ADDR_KERN,
+ ADDR_USER,
+ ADDR_USER_CONTIG,
+ ADDR_DEVICE,
+};
+
+/**
+ * Pixel order complies with little-endian style
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum pixel_order {
+ AX_RGB = 0,
+ RGB_AX,
+ AX_BGR,
+ BGR_AX,
+ ARGB_ORDER_END,
+
+ P1_CRY1CBY0,
+ P1_CBY1CRY0,
+ P1_Y1CRY0CB,
+ P1_Y1CBY0CR,
+ P1_ORDER_END,
+
+ P2_CRCB,
+ P2_CBCR,
+ P2_ORDER_END,
+};
+
+/**
+ * DO NOT CHANGE THIS ORDER
+ */
+enum color_format {
+ CF_XRGB_8888 = 0,
+ CF_ARGB_8888,
+ CF_RGB_565,
+ CF_XRGB_1555,
+ CF_ARGB_1555,
+ CF_XRGB_4444,
+ CF_ARGB_4444,
+ CF_RGB_888,
+ CF_YCBCR_444,
+ CF_YCBCR_422,
+ CF_YCBCR_420,
+ CF_A8,
+ CF_L8,
+ SRC_DST_FORMAT_END,
+
+ CF_MSK_1BIT,
+ CF_MSK_4BIT,
+ CF_MSK_8BIT,
+ CF_MSK_16BIT_565,
+ CF_MSK_16BIT_1555,
+ CF_MSK_16BIT_4444,
+ CF_MSK_32BIT_8888,
+ MSK_FORMAT_END,
+};
+
+enum rotation {
+ ORIGIN,
+ ROT_90, /* clockwise */
+ ROT_180,
+ ROT_270,
+ XFLIP, /* x-axis flip */
+ YFLIP, /* y-axis flip */
+};
+
+/**
+ * @NO_REPEAT: no effect
+ * @REPEAT_NORMAL: repeat horizontally and vertically
+ * @REPEAT_PAD: pad with pad color
+ * @REPEAT_REFLECT: reflect horizontally and vertically
+ * @REPEAT_CLAMP: pad with edge color of original image
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum repeat {
+ NO_REPEAT = 0,
+ REPEAT_NORMAL, /* default setting */
+ REPEAT_PAD,
+ REPEAT_REFLECT, REPEAT_MIRROR = REPEAT_REFLECT,
+ REPEAT_CLAMP,
+};
+
+enum scaling {
+ NO_SCALING,
+ SCALING_NEAREST,
+ SCALING_BILINEAR,
+};
+
+/**
+ * @SCALING_PIXELS: ratio in pixels
+ * @SCALING_RATIO: ratio in fixed point 16
+ */
+enum scaling_factor {
+ SCALING_PIXELS,
+ SCALING_RATIO,
+};
+
+/**
+ * premultiplied alpha
+ */
+enum premultiplied {
+ PREMULTIPLIED,
+ NON_PREMULTIPLIED,
+};
+
+/**
+ * @TRANSP: discard bluescreen color
+ * @BLUSCR: replace bluescreen color with background color
+ */
+enum bluescreen {
+ OPAQUE,
+ TRANSP,
+ BLUSCR,
+};
+
+/**
+ * DO NOT CHANGE THIS ORDER
+ */
+enum blit_op {
+ BLIT_OP_SOLID_FILL = 0,
+
+ BLIT_OP_CLR,
+ BLIT_OP_SRC, BLIT_OP_SRC_COPY = BLIT_OP_SRC,
+ BLIT_OP_DST,
+ BLIT_OP_SRC_OVER,
+ BLIT_OP_DST_OVER, BLIT_OP_OVER_REV = BLIT_OP_DST_OVER,
+ BLIT_OP_SRC_IN,
+ BLIT_OP_DST_IN, BLIT_OP_IN_REV = BLIT_OP_DST_IN,
+ BLIT_OP_SRC_OUT,
+ BLIT_OP_DST_OUT, BLIT_OP_OUT_REV = BLIT_OP_DST_OUT,
+ BLIT_OP_SRC_ATOP,
+ BLIT_OP_DST_ATOP, BLIT_OP_ATOP_REV = BLIT_OP_DST_ATOP,
+ BLIT_OP_XOR,
+
+ BLIT_OP_ADD,
+ BLIT_OP_MULTIPLY,
+ BLIT_OP_SCREEN,
+ BLIT_OP_DARKEN,
+ BLIT_OP_LIGHTEN,
+
+ BLIT_OP_DISJ_SRC_OVER,
+ BLIT_OP_DISJ_DST_OVER, BLIT_OP_SATURATE = BLIT_OP_DISJ_DST_OVER,
+ BLIT_OP_DISJ_SRC_IN,
+ BLIT_OP_DISJ_DST_IN, BLIT_OP_DISJ_IN_REV = BLIT_OP_DISJ_DST_IN,
+ BLIT_OP_DISJ_SRC_OUT,
+ BLIT_OP_DISJ_DST_OUT, BLIT_OP_DISJ_OUT_REV = BLIT_OP_DISJ_DST_OUT,
+ BLIT_OP_DISJ_SRC_ATOP,
+ BLIT_OP_DISJ_DST_ATOP, BLIT_OP_DISJ_ATOP_REV = BLIT_OP_DISJ_DST_ATOP,
+ BLIT_OP_DISJ_XOR,
+
+ BLIT_OP_CONJ_SRC_OVER,
+ BLIT_OP_CONJ_DST_OVER, BLIT_OP_CONJ_OVER_REV = BLIT_OP_CONJ_DST_OVER,
+ BLIT_OP_CONJ_SRC_IN,
+ BLIT_OP_CONJ_DST_IN, BLIT_OP_CONJ_IN_REV = BLIT_OP_CONJ_DST_IN,
+ BLIT_OP_CONJ_SRC_OUT,
+ BLIT_OP_CONJ_DST_OUT, BLIT_OP_CONJ_OUT_REV = BLIT_OP_CONJ_DST_OUT,
+ BLIT_OP_CONJ_SRC_ATOP,
+ BLIT_OP_CONJ_DST_ATOP, BLIT_OP_CONJ_ATOP_REV = BLIT_OP_CONJ_DST_ATOP,
+ BLIT_OP_CONJ_XOR,
+
+ /* user select coefficient manually */
+ BLIT_OP_USER_COEFF,
+
+ BLIT_OP_USER_SRC_GA,
+
+ /* Add new operation type here */
+
+ /* end of blit operation */
+ BLIT_OP_END,
+};
+#define MAX_FIMG2D_BLIT_OP (int)BLIT_OP_END
+
+#ifdef __KERNEL__
+
+/**
+ * @TMP: temporary buffer for 2-step blit at a single command
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum image_object {
+ IMAGE_SRC = 0,
+ IMAGE_MSK,
+ IMAGE_TMP,
+ IMAGE_DST,
+ IMAGE_END,
+};
+#define MAX_IMAGES IMAGE_END
+#define ISRC IMAGE_SRC
+#define IMSK IMAGE_MSK
+#define ITMP IMAGE_TMP
+#define IDST IMAGE_DST
+#define image_table(u) \
+ { \
+ (u)->src, \
+ (u)->msk, \
+ (u)->tmp, \
+ (u)->dst \
+ }
+
+/**
+ * @size: dma size of image
+ * @cached: cached dma size of image
+ */
+struct fimg2d_dma {
+ unsigned long addr;
+ size_t size;
+ size_t cached;
+};
+
+struct fimg2d_dma_group {
+ struct fimg2d_dma base;
+ struct fimg2d_dma plane2;
+};
+
+#endif /* __KERNEL__ */
+
+/**
+ * @start: start address or unique id of image
+ */
+struct fimg2d_addr {
+ enum addr_space type;
+ unsigned long start;
+};
+
+struct fimg2d_rect {
+ int x1;
+ int y1;
+ int x2; /* x1 + width */
+ int y2; /* y1 + height */
+};
+
+/**
+ * pixels can be different from src, dst or clip rect
+ */
+struct fimg2d_scale {
+ enum scaling mode;
+
+ /* ratio in pixels */
+ int src_w, src_h;
+ int dst_w, dst_h;
+};
+
+struct fimg2d_clip {
+ bool enable;
+ int x1;
+ int y1;
+ int x2; /* x1 + width */
+ int y2; /* y1 + height */
+};
+
+struct fimg2d_repeat {
+ enum repeat mode;
+ unsigned long pad_color;
+};
+
+/**
+ * @bg_color: bg_color is valid only if bluescreen mode is BLUSCR.
+ */
+struct fimg2d_bluscr {
+ enum bluescreen mode;
+ unsigned long bs_color;
+ unsigned long bg_color;
+};
+
+/**
+ * @plane2: address info for CbCr in YCbCr 2plane mode
+ * @rect: crop/clip rect
+ * @need_cacheopr: true if cache coherency is required
+ */
+struct fimg2d_image {
+ int width;
+ int height;
+ int stride;
+ enum pixel_order order;
+ enum color_format fmt;
+ struct fimg2d_addr addr;
+ struct fimg2d_addr plane2;
+ struct fimg2d_rect rect;
+ bool need_cacheopr;
+};
+
+/**
+ * @solid_color:
+ * src color instead of src image
+ * color format and order must be ARGB8888(A is MSB).
+ * @g_alpha: global(constant) alpha. 0xff is opaque, 0 is transparnet
+ * @dither: dithering
+ * @rotate: rotation degree in clockwise
+ * @premult: alpha premultiplied mode for read & write
+ * @scaling: common scaling info for src and mask image.
+ * @repeat: repeat type (tile mode)
+ * @bluscr: blue screen and transparent mode
+ * @clipping: clipping rect within dst rect
+ */
+struct fimg2d_param {
+ unsigned long solid_color;
+ unsigned char g_alpha;
+ bool dither;
+ enum rotation rotate;
+ enum premultiplied premult;
+ struct fimg2d_scale scaling;
+ struct fimg2d_repeat repeat;
+ struct fimg2d_bluscr bluscr;
+ struct fimg2d_clip clipping;
+};
+
+/**
+ * @op: blit operation mode
+ * @src: set when using src image
+ * @msk: set when using mask image
+ * @tmp: set when using 2-step blit at a single command
+ * @dst: dst must not be null
+ * * tmp image must be the same to dst except memory address
+ * @seq_no: user debugging info.
+ * for example, user can set sequence number or pid.
+ */
+struct fimg2d_blit {
+ enum blit_op op;
+ struct fimg2d_param param;
+ struct fimg2d_image *src;
+ struct fimg2d_image *msk;
+ struct fimg2d_image *tmp;
+ struct fimg2d_image *dst;
+ enum blit_sync sync;
+ unsigned int seq_no;
+};
+
+#ifdef __KERNEL__
+
+/**
+ * Enables definition to estimate performance.
+ * These debug codes includes printk, so perf
+ * data are unreliable under multi instance environment
+ */
+#undef PERF_PROFILE
+#define PERF_TIMEVAL
+
+enum perf_desc {
+ PERF_INNERCACHE,
+ PERF_OUTERCACHE,
+ PERF_BLIT,
+ PERF_END
+};
+#define MAX_PERF_DESCS PERF_END
+
+struct fimg2d_perf {
+ int valid;
+#ifdef PERF_TIMEVAL
+ struct timeval start;
+ struct timeval end;
+#else
+ unsigned long long start;
+ unsigned long long end;
+#endif
+};
+
+/**
+ * @pgd: base address of arm mmu pagetable
+ * @ncmd: request count in blit command queue
+ * @wait_q: conext wait queue head
+*/
+struct fimg2d_context {
+ struct mm_struct *mm;
+ atomic_t ncmd;
+ wait_queue_head_t wait_q;
+ struct fimg2d_perf perf[MAX_PERF_DESCS];
+};
+
+/**
+ * @op: blit operation mode
+ * @sync: sync/async blit mode (currently support sync mode only)
+ * @image: array of image object.
+ * [0] is for src image
+ * [1] is for mask image
+ * [2] is for temporary buffer
+ * set when using 2-step blit at a single command
+ * [3] is for dst, dst must not be null
+ * * tmp image must be the same to dst except memory address
+ * @seq_no: user debugging info.
+ * for example, user can set sequence number or pid.
+ * @dma_all: total dma size of src, msk, dst
+ * @dma: array of dma info for each src, msk, tmp and dst
+ * @ctx: context is created when user open fimg2d device.
+ * @node: list head of blit command queue
+ */
+struct fimg2d_bltcmd {
+ enum blit_op op;
+ enum blit_sync sync;
+ unsigned int seq_no;
+ size_t dma_all;
+ struct fimg2d_param param;
+ struct fimg2d_image image[MAX_IMAGES];
+ struct fimg2d_dma_group dma[MAX_IMAGES];
+ struct fimg2d_context *ctx;
+ struct list_head node;
+};
+
+/**
+ * @suspended: in suspend mode
+ * @clkon: power status for runtime pm
+ * @mem: resource platform device
+ * @regs: base address of hardware
+ * @dev: pointer to device struct
+ * @err: true if hardware is timed out while blitting
+ * @irq: irq number
+ * @nctx: context count
+ * @busy: 1 if hardware is running
+ * @bltlock: spinlock for blit
+ * @wait_q: blit wait queue head
+ * @cmd_q: blit command queue
+ * @workqueue: workqueue_struct for kfimg2dd
+*/
+struct fimg2d_control {
+ atomic_t suspended;
+ atomic_t clkon;
+ struct clk *clock;
+ struct device *dev;
+ struct device *bus_dev;
+ struct resource *mem;
+ void __iomem *regs;
+
+ bool err;
+ int irq;
+ atomic_t nctx;
+ atomic_t busy;
+ atomic_t active;
+ spinlock_t bltlock;
+ wait_queue_head_t wait_q;
+ struct list_head cmd_q;
+ struct workqueue_struct *work_q;
+
+ void (*blit)(struct fimg2d_control *info);
+ int (*configure)(struct fimg2d_control *info,
+ struct fimg2d_bltcmd *cmd);
+ void (*run)(struct fimg2d_control *info);
+ void (*stop)(struct fimg2d_control *info);
+ void (*dump)(struct fimg2d_control *info);
+ void (*finalize)(struct fimg2d_control *info);
+};
+
+int fimg2d_register_ops(struct fimg2d_control *info);
+
+#endif /* __KERNEL__ */
+
+#endif /* __FIMG2D_H__ */
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d4x.h b/drivers/media/video/samsung/fimg2d4x/fimg2d4x.h
new file mode 100644
index 0000000..0b9128f
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d4x.h
@@ -0,0 +1,225 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d4x.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D4X_H
+#define __FIMG2D4X_H __FILE__
+
+#include "fimg2d4x_regs.h"
+
+/**
+ * @IMG_MEMORY: read from external memory
+ * @IMG_FGCOLOR: read from foreground color
+ * @IMG_BGCOLOR: read from background color
+ */
+enum image_sel {
+ IMG_MEMORY,
+ IMG_FGCOLOR,
+ IMG_BGCOLOR,
+};
+
+/**
+ * @FORWARD_ADDRESSING: read data in forward direction
+ * @REVERSE_ADDRESSING: read data in reverse direction
+ */
+enum addressing {
+ FORWARD_ADDRESSING,
+ REVERSE_ADDRESSING,
+};
+
+/**
+ * The other addressing modes can cause data corruption,
+ * if src and dst are overlapped.
+ */
+enum dir_addressing {
+ UP_FORWARD,
+ DOWN_REVERSE,
+ LEFT_FORWARD,
+ RIGHT_REVERSE,
+ VALID_ADDRESSING_END,
+};
+
+/**
+ * DO NOT CHANGE THIS ORDER
+ */
+enum max_burst_len {
+ MAX_BURST_2 = 0,
+ MAX_BURST_4,
+ MAX_BURST_8, /* initial value */
+ MAX_BURST_16,
+};
+
+#define DEFAULT_MAX_BURST_LEN MAX_BURST_8
+
+/**
+ * mask operation type for 16-bpp, 32-bpp mask image
+ * @MSK_ALPHA: use mask alpha for src argb
+ * @MSK_ARGB: use mask argb for src argb
+ * @MSK_MIXED: use mask alpha for src alpha and mask rgb for src rgb
+ */
+enum mask_opr {
+ MSK_ALPHA, /* initial value */
+ MSK_ARGB,
+ MSK_MIXED,
+};
+
+#define DEFAULT_MSK_OPR MSK_ALPHA
+
+/**
+ * @ALPHA_PERPIXEL: perpixel alpha
+ * @ALPHA_PERPIXEL_SUM_GLOBAL: perpixel + global
+ * @ALPHA_PERPIXEL_MUL_GLOBAL: perpixel x global
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum alpha_opr {
+ ALPHA_PERPIXEL = 0, /* initial value */
+ ALPHA_PERPIXEL_SUM_GLOBAL,
+ ALPHA_PERPIXEL_MUL_GLOBAL,
+};
+
+#define DEFAULT_ALPHA_OPR ALPHA_PERPIXEL
+
+/**
+ * sampling policy at boundary for bilinear scaling
+ * @FOLLOW_REPEAT_MODE: sampling 1 or 2 pixels within bounds
+ * @IGNORE_REPEAT_MODE: sampling 4 pixels according to repeat mode
+ */
+enum boundary_sampling_policy {
+ FOLLOW_REPEAT_MODE,
+ IGNORE_REPEAT_MODE,
+};
+
+#define DEFAULT_BOUNDARY_SAMPLING FOLLOW_REPEAT_MODE
+
+/**
+ * @COEFF_ONE: 1
+ * @COEFF_ZERO: 0
+ * @COEFF_SA: src alpha
+ * @COEFF_SC: src color
+ * @COEFF_DA: dst alpha
+ * @COEFF_DC: dst color
+ * @COEFF_GA: global(constant) alpha
+ * @COEFF_GC: global(constant) color
+ * @COEFF_DISJ_S:
+ * @COEFF_DISJ_D:
+ * @COEFF_CONJ_S:
+ * @COEFF_CONJ_D:
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum fimg2d_coeff {
+ COEFF_ONE = 0,
+ COEFF_ZERO,
+ COEFF_SA,
+ COEFF_SC,
+ COEFF_DA,
+ COEFF_DC,
+ COEFF_GA,
+ COEFF_GC,
+ COEFF_DISJ_S,
+ COEFF_DISJ_D,
+ COEFF_CONJ_S,
+ COEFF_CONJ_D,
+};
+
+/**
+ * @PREMULT_ROUND_0: (A*B) >> 8
+ * @PREMULT_ROUND_1: (A+1)*B) >> 8
+ * @PREMULT_ROUND_2: (A+(A>>7))* B) >> 8
+ * @PREMULT_ROUND_3: TMP= A*8 + 0x80, (TMP + (TMP >> 8)) >> 8
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum premult_round {
+ PREMULT_ROUND_0 = 0,
+ PREMULT_ROUND_1,
+ PREMULT_ROUND_2,
+ PREMULT_ROUND_3, /* initial value */
+};
+
+#define DEFAULT_PREMULT_ROUND_MODE PREMULT_ROUND_3
+
+/**
+ * @BLEND_ROUND_0: (A+1)*B) >> 8
+ * @BLEND_ROUND_1: (A+(A>>7))* B) >> 8
+ * @BLEND_ROUND_2: TMP= A*8 + 0x80, (TMP + (TMP >> 8)) >> 8
+ * @BLEND_ROUND_3: TMP= (A*B + C*D + 0x80), (TMP + (TMP >> 8)) >> 8
+ *
+ * DO NOT CHANGE THIS ORDER
+ */
+enum blend_round {
+ BLEND_ROUND_0 = 0,
+ BLEND_ROUND_1,
+ BLEND_ROUND_2,
+ BLEND_ROUND_3, /* initial value */
+};
+
+#define DEFAULT_BLEND_ROUND_MODE BLEND_ROUND_3
+
+struct fimg2d_blend_coeff {
+ bool s_coeff_inv;
+ enum fimg2d_coeff s_coeff;
+ bool d_coeff_inv;
+ enum fimg2d_coeff d_coeff;
+};
+
+void fimg2d4x_reset(struct fimg2d_control *info);
+void fimg2d4x_enable_irq(struct fimg2d_control *info);
+void fimg2d4x_disable_irq(struct fimg2d_control *info);
+void fimg2d4x_clear_irq(struct fimg2d_control *info);
+int fimg2d4x_is_blit_done(struct fimg2d_control *info);
+int fimg2d4x_blit_done_status(struct fimg2d_control *info);
+void fimg2d4x_start_blit(struct fimg2d_control *info);
+void fimg2d4x_set_max_burst_length(struct fimg2d_control *info,
+ enum max_burst_len len);
+void fimg2d4x_set_src_type(struct fimg2d_control *info, enum image_sel type);
+void fimg2d4x_set_src_image(struct fimg2d_control *info,
+ struct fimg2d_image *s);
+void fimg2d4x_set_src_rect(struct fimg2d_control *info, struct fimg2d_rect *r);
+void fimg2d4x_set_dst_type(struct fimg2d_control *info, enum image_sel type);
+void fimg2d4x_set_dst_image(struct fimg2d_control *info,
+ struct fimg2d_image *d);
+void fimg2d4x_set_dst_rect(struct fimg2d_control *info, struct fimg2d_rect *r);
+void fimg2d4x_enable_msk(struct fimg2d_control *info);
+void fimg2d4x_set_msk_image(struct fimg2d_control *info,
+ struct fimg2d_image *m);
+void fimg2d4x_set_msk_rect(struct fimg2d_control *info, struct fimg2d_rect *r);
+void fimg2d4x_set_color_fill(struct fimg2d_control *info, unsigned long color);
+void fimg2d4x_set_premultiplied(struct fimg2d_control *info);
+void fimg2d4x_src_premultiply(struct fimg2d_control *info);
+void fimg2d4x_dst_premultiply(struct fimg2d_control *info);
+void fimg2d4x_dst_depremultiply(struct fimg2d_control *info);
+void fimg2d4x_enable_transparent(struct fimg2d_control *info);
+void fimg2d4x_set_bluescreen(struct fimg2d_control *info,
+ struct fimg2d_bluscr *bluscr);
+void fimg2d4x_enable_clipping(struct fimg2d_control *info,
+ struct fimg2d_clip *clp);
+void fimg2d4x_enable_dithering(struct fimg2d_control *info);
+void fimg2d4x_set_src_scaling(struct fimg2d_control *info,
+ struct fimg2d_scale *scl,
+ struct fimg2d_repeat *rep);
+void fimg2d4x_set_msk_scaling(struct fimg2d_control *info,
+ struct fimg2d_scale *scl,
+ struct fimg2d_repeat *rep);
+void fimg2d4x_set_src_repeat(struct fimg2d_control *info,
+ struct fimg2d_repeat *rep);
+void fimg2d4x_set_msk_repeat(struct fimg2d_control *info,
+ struct fimg2d_repeat *rep);
+void fimg2d4x_set_rotation(struct fimg2d_control *info, enum rotation rot);
+void fimg2d4x_set_fgcolor(struct fimg2d_control *info, unsigned long fg);
+void fimg2d4x_set_bgcolor(struct fimg2d_control *info, unsigned long bg);
+void fimg2d4x_enable_alpha(struct fimg2d_control *info, unsigned char g_alpha);
+void fimg2d4x_set_alpha_composite(struct fimg2d_control *info,
+ enum blit_op op, unsigned char g_alpha);
+void fimg2d4x_dump_regs(struct fimg2d_control *info);
+
+#endif /* __FIMG2D4X_H__ */
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d4x_blt.c b/drivers/media/video/samsung/fimg2d4x/fimg2d4x_blt.c
new file mode 100644
index 0000000..97ea4c1
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d4x_blt.c
@@ -0,0 +1,334 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d4x_blt.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <plat/sysmmu.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <plat/devs.h>
+#include <linux/pm_runtime.h>
+#endif
+#include "fimg2d.h"
+#include "fimg2d_clk.h"
+#include "fimg2d4x.h"
+#include "fimg2d_ctx.h"
+#include "fimg2d_cache.h"
+#include "fimg2d_helper.h"
+
+#define BLIT_TIMEOUT msecs_to_jiffies(500)
+
+static inline void fimg2d4x_blit_wait(struct fimg2d_control *info, struct fimg2d_bltcmd *cmd)
+{
+ if (!wait_event_timeout(info->wait_q, !atomic_read(&info->busy), BLIT_TIMEOUT)) {
+ printk(KERN_ERR "[%s] blit wait timeout\n", __func__);
+ fimg2d_dump_command(cmd);
+
+ if (!fimg2d4x_blit_done_status(info))
+ info->err = true; /* device error */
+ }
+}
+
+static void fimg2d4x_pre_bitblt(struct fimg2d_control *info, struct fimg2d_bltcmd *cmd)
+{
+ /* TODO */
+}
+
+void fimg2d4x_bitblt(struct fimg2d_control *info)
+{
+ struct fimg2d_context *ctx;
+ struct fimg2d_bltcmd *cmd;
+ unsigned long *pgd;
+ int ret;
+
+ fimg2d_debug("enter blitter\n");
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_get_sync(info->dev);
+ fimg2d_debug("pm_runtime_get_sync\n");
+#endif
+ fimg2d_clk_on(info);
+
+ while ((cmd = fimg2d_get_first_command(info))) {
+ ctx = cmd->ctx;
+ if (info->err) {
+ printk(KERN_ERR "[%s] device error\n", __func__);
+ goto blitend;
+ }
+
+ atomic_set(&info->busy, 1);
+
+ ret = info->configure(info, cmd);
+ if (ret)
+ goto blitend;
+
+ if (cmd->image[IDST].addr.type != ADDR_PHYS) {
+ pgd = (unsigned long *)ctx->mm->pgd;
+ exynos_sysmmu_enable(info->dev,
+ (unsigned long)virt_to_phys(pgd));
+ fimg2d_debug("sysmmu enable: pgd %p ctx %p seq_no(%u)\n",
+ pgd, ctx, cmd->seq_no);
+ }
+
+ fimg2d4x_pre_bitblt(info, cmd);
+
+#ifdef PERF_PROFILE
+ perf_start(cmd->ctx, PERF_BLIT);
+#endif
+ /* start blit */
+ info->run(info);
+ fimg2d4x_blit_wait(info, cmd);
+
+#ifdef PERF_PROFILE
+ perf_end(cmd->ctx, PERF_BLIT);
+#endif
+ if (cmd->image[IDST].addr.type != ADDR_PHYS) {
+ exynos_sysmmu_disable(info->dev);
+ fimg2d_debug("sysmmu disable\n");
+ }
+blitend:
+ fimg2d_del_command(info, cmd);
+
+ /* wake up context */
+ if (!atomic_read(&ctx->ncmd))
+ wake_up(&ctx->wait_q);
+ }
+
+ atomic_set(&info->active, 0);
+
+ fimg2d_clk_off(info);
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_put_sync(info->dev);
+ fimg2d_debug("pm_runtime_put_sync\n");
+#endif
+
+ fimg2d_debug("exit blitter\n");
+}
+
+static inline int is_opaque(enum color_format fmt)
+{
+ switch (fmt) {
+ case CF_ARGB_8888:
+ case CF_ARGB_1555:
+ case CF_ARGB_4444:
+ return 0;
+
+ default:
+ return 1;
+ }
+}
+
+static int fast_op(struct fimg2d_bltcmd *cmd)
+{
+ int sa, da, ga;
+ int fop = cmd->op;
+ struct fimg2d_image *src, *msk, *dst;
+ struct fimg2d_param *p = &cmd->param;
+
+ src = &cmd->image[ISRC];
+ msk = &cmd->image[IMSK];
+ dst = &cmd->image[IDST];
+
+ if (msk->addr.type)
+ return fop;
+
+ ga = p->g_alpha;
+ da = is_opaque(dst->fmt) ? 0xff : 0;
+
+ if (!src->addr.type)
+ sa = (p->solid_color >> 24) & 0xff;
+ else
+ sa = is_opaque(src->fmt) ? 0xff : 0;
+
+ switch (cmd->op) {
+ case BLIT_OP_SRC_OVER:
+ /* Sc + (1-Sa)*Dc = Sc */
+ if (sa == 0xff && ga == 0xff)
+ fop = BLIT_OP_SRC;
+ break;
+ case BLIT_OP_DST_OVER:
+ /* (1-Da)*Sc + Dc = Dc */
+ if (da == 0xff)
+ fop = BLIT_OP_DST; /* nop */
+ break;
+ case BLIT_OP_SRC_IN:
+ /* Da*Sc = Sc */
+ if (da == 0xff)
+ fop = BLIT_OP_SRC;
+ break;
+ case BLIT_OP_DST_IN:
+ /* Sa*Dc = Dc */
+ if (sa == 0xff && ga == 0xff)
+ fop = BLIT_OP_DST; /* nop */
+ break;
+ case BLIT_OP_SRC_OUT:
+ /* (1-Da)*Sc = 0 */
+ if (da == 0xff)
+ fop = BLIT_OP_CLR;
+ break;
+ case BLIT_OP_DST_OUT:
+ /* (1-Sa)*Dc = 0 */
+ if (sa == 0xff && ga == 0xff)
+ fop = BLIT_OP_CLR;
+ break;
+ case BLIT_OP_SRC_ATOP:
+ /* Da*Sc + (1-Sa)*Dc = Sc */
+ if (sa == 0xff && da == 0xff && ga == 0xff)
+ fop = BLIT_OP_SRC;
+ break;
+ case BLIT_OP_DST_ATOP:
+ /* (1-Da)*Sc + Sa*Dc = Dc */
+ if (sa == 0xff && da == 0xff && ga == 0xff)
+ fop = BLIT_OP_DST; /* nop */
+ break;
+ default:
+ break;
+ }
+
+ if (fop == BLIT_OP_SRC && !src->addr.type && ga == 0xff)
+ fop = BLIT_OP_SOLID_FILL;
+
+ return fop;
+}
+
+static int fimg2d4x_configure(struct fimg2d_control *info,
+ struct fimg2d_bltcmd *cmd)
+{
+ int op;
+ enum image_sel srcsel, dstsel;
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *src, *msk, *dst;
+
+ fimg2d_debug("ctx %p seq_no(%u)\n", cmd->ctx, cmd->seq_no);
+
+ src = &cmd->image[ISRC];
+ msk = &cmd->image[IMSK];
+ dst = &cmd->image[IDST];
+
+ /* TODO: batch blit */
+ fimg2d4x_reset(info);
+
+ /* src and dst select */
+ srcsel = dstsel = IMG_MEMORY;
+
+ op = fast_op(cmd);
+
+ switch (op) {
+ case BLIT_OP_SOLID_FILL:
+ srcsel = dstsel = IMG_FGCOLOR;
+ fimg2d4x_set_fgcolor(info, p->solid_color);
+ break;
+ case BLIT_OP_CLR:
+ srcsel = dstsel = IMG_FGCOLOR;
+ fimg2d4x_set_color_fill(info, 0);
+ break;
+ case BLIT_OP_DST:
+ return -1; /* nop */
+ default:
+ if (!src->addr.type) {
+ srcsel = IMG_FGCOLOR;
+ fimg2d4x_set_fgcolor(info, p->solid_color);
+ }
+
+ if (op == BLIT_OP_SRC)
+ dstsel = IMG_FGCOLOR;
+
+ fimg2d4x_enable_alpha(info, p->g_alpha);
+ fimg2d4x_set_alpha_composite(info, op, p->g_alpha);
+ if (p->premult == NON_PREMULTIPLIED)
+ fimg2d4x_set_premultiplied(info);
+ break;
+ }
+
+ fimg2d4x_set_src_type(info, srcsel);
+ fimg2d4x_set_dst_type(info, dstsel);
+
+ /* src */
+ if (src->addr.type) {
+ fimg2d4x_set_src_image(info, src);
+ fimg2d4x_set_src_rect(info, &src->rect);
+ fimg2d4x_set_src_repeat(info, &p->repeat);
+ if (p->scaling.mode)
+ fimg2d4x_set_src_scaling(info, &p->scaling, &p->repeat);
+ }
+
+ /* msk */
+ if (msk->addr.type) {
+ fimg2d4x_enable_msk(info);
+ fimg2d4x_set_msk_image(info, msk);
+ fimg2d4x_set_msk_rect(info, &msk->rect);
+ fimg2d4x_set_msk_repeat(info, &p->repeat);
+ if (p->scaling.mode)
+ fimg2d4x_set_msk_scaling(info, &p->scaling, &p->repeat);
+ }
+
+ /* dst */
+ if (dst->addr.type) {
+ fimg2d4x_set_dst_image(info, dst);
+ fimg2d4x_set_dst_rect(info, &dst->rect);
+ if (p->clipping.enable)
+ fimg2d4x_enable_clipping(info, &p->clipping);
+ }
+
+ /* bluescreen */
+ if (p->bluscr.mode)
+ fimg2d4x_set_bluescreen(info, &p->bluscr);
+
+ /* rotation */
+ if (p->rotate)
+ fimg2d4x_set_rotation(info, p->rotate);
+
+ /* dithering */
+ if (p->dither)
+ fimg2d4x_enable_dithering(info);
+
+ return 0;
+}
+
+static void fimg2d4x_run(struct fimg2d_control *info)
+{
+ fimg2d_debug("start blit\n");
+ fimg2d4x_enable_irq(info);
+ fimg2d4x_clear_irq(info);
+ fimg2d4x_start_blit(info);
+}
+
+static void fimg2d4x_stop(struct fimg2d_control *info)
+{
+ if (fimg2d4x_is_blit_done(info)) {
+ fimg2d_debug("blit done\n");
+ fimg2d4x_disable_irq(info);
+ fimg2d4x_clear_irq(info);
+ atomic_set(&info->busy, 0);
+ wake_up(&info->wait_q);
+ }
+}
+
+static void fimg2d4x_dump(struct fimg2d_control *info)
+{
+ fimg2d4x_dump_regs(info);
+}
+
+int fimg2d_register_ops(struct fimg2d_control *info)
+{
+ info->blit = fimg2d4x_bitblt;
+ info->configure = fimg2d4x_configure;
+ info->run = fimg2d4x_run;
+ info->dump = fimg2d4x_dump;
+ info->stop = fimg2d4x_stop;
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d4x_hw.c b/drivers/media/video/samsung/fimg2d4x/fimg2d4x_hw.c
new file mode 100644
index 0000000..155c67b
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d4x_hw.c
@@ -0,0 +1,839 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d4x_hw.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/io.h>
+#include <linux/sched.h>
+
+#include "fimg2d.h"
+#include "fimg2d4x.h"
+#include "fimg2d_clk.h"
+
+#define wr(d, a) writel((d), info->regs + (a))
+#define rd(a) readl(info->regs + (a))
+
+#undef SOFT_RESET_ENABLED
+#undef FIMG2D_RESET_WA
+
+static const int a8_rgbcolor = (int)0x0;
+static const int msk_oprmode = (int)MSK_ARGB;
+static const int premult_round_mode = (int)PREMULT_ROUND_1; /* (A+1)*B) >> 8 */
+static const int blend_round_mode = (int)BLEND_ROUND_0; /* (A+1)*B) >> 8 */
+
+void fimg2d4x_reset(struct fimg2d_control *info)
+{
+#ifdef SOFT_RESET_ENABLED
+#ifdef FIMG2D_RESET_WA
+ fimg2d_clk_save(info);
+#endif
+ wr(FIMG2D_SOFT_RESET, FIMG2D_SOFT_RESET_REG);
+#ifdef FIMG2D_RESET_WA
+ fimg2d_clk_restore(info);
+#endif
+#else
+ wr(FIMG2D_SFR_CLEAR, FIMG2D_SOFT_RESET_REG);
+#endif
+ /* turn off wince option */
+ wr(0x0, FIMG2D_BLEND_FUNCTION_REG);
+
+ /* set default repeat mode to reflect(mirror) */
+ wr(FIMG2D_SRC_REPEAT_REFLECT, FIMG2D_SRC_REPEAT_MODE_REG);
+ wr(FIMG2D_MSK_REPEAT_REFLECT, FIMG2D_MSK_REPEAT_MODE_REG);
+}
+
+void fimg2d4x_enable_irq(struct fimg2d_control *info)
+{
+ wr(FIMG2D_BLIT_INT_ENABLE, FIMG2D_INTEN_REG);
+}
+
+void fimg2d4x_disable_irq(struct fimg2d_control *info)
+{
+ wr(0, FIMG2D_INTEN_REG);
+}
+
+void fimg2d4x_clear_irq(struct fimg2d_control *info)
+{
+ wr(FIMG2D_BLIT_INT_FLAG, FIMG2D_INTC_PEND_REG);
+}
+
+int fimg2d4x_is_blit_done(struct fimg2d_control *info)
+{
+ return rd(FIMG2D_INTC_PEND_REG) & FIMG2D_BLIT_INT_FLAG;
+}
+
+int fimg2d4x_blit_done_status(struct fimg2d_control *info)
+{
+ volatile unsigned long sts;
+
+ /* read twice */
+ sts = rd(FIMG2D_FIFO_STAT_REG);
+ sts = rd(FIMG2D_FIFO_STAT_REG);
+
+ return (int)(sts & FIMG2D_BLIT_FINISHED);
+}
+
+void fimg2d4x_start_blit(struct fimg2d_control *info)
+{
+ wr(FIMG2D_START_BITBLT, FIMG2D_BITBLT_START_REG);
+}
+
+void fimg2d4x_set_max_burst_length(struct fimg2d_control *info, enum max_burst_len len)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_AXI_MODE_REG);
+
+ cfg &= ~FIMG2D_MAX_BURST_LEN_MASK;
+ cfg |= len << FIMG2D_MAX_BURST_LEN_SHIFT;
+}
+
+void fimg2d4x_set_src_type(struct fimg2d_control *info, enum image_sel type)
+{
+ unsigned long cfg;
+
+ if (type == IMG_MEMORY)
+ cfg = FIMG2D_IMAGE_TYPE_MEMORY;
+ else if (type == IMG_FGCOLOR)
+ cfg = FIMG2D_IMAGE_TYPE_FGCOLOR;
+ else
+ cfg = FIMG2D_IMAGE_TYPE_BGCOLOR;
+
+ wr(cfg, FIMG2D_SRC_SELECT_REG);
+}
+
+void fimg2d4x_set_src_image(struct fimg2d_control *info, struct fimg2d_image *s)
+{
+ unsigned long cfg;
+
+ wr(FIMG2D_ADDR(s->addr.start), FIMG2D_SRC_BASE_ADDR_REG);
+ wr(FIMG2D_STRIDE(s->stride), FIMG2D_SRC_STRIDE_REG);
+
+ if (s->order < ARGB_ORDER_END) { /* argb */
+ cfg = s->order << FIMG2D_RGB_ORDER_SHIFT;
+ if (s->fmt == CF_A8)
+ wr(a8_rgbcolor, FIMG2D_SRC_A8_RGB_EXT_REG);
+ } else if (s->order < P1_ORDER_END) { /* YCbC1 1plane */
+ cfg = (s->order - P1_CRY1CBY0) << FIMG2D_YCBCR_ORDER_SHIFT;
+ } else { /* YCbCr 2plane */
+ cfg = (s->order - P2_CRCB) << FIMG2D_YCBCR_ORDER_SHIFT;
+ cfg |= FIMG2D_YCBCR_2PLANE;
+
+ wr(FIMG2D_ADDR(s->plane2.start),
+ FIMG2D_SRC_PLANE2_BASE_ADDR_REG);
+ }
+
+ cfg |= s->fmt << FIMG2D_COLOR_FORMAT_SHIFT;
+
+ wr(cfg, FIMG2D_SRC_COLOR_MODE_REG);
+}
+
+void fimg2d4x_set_src_rect(struct fimg2d_control *info, struct fimg2d_rect *r)
+{
+ wr(FIMG2D_OFFSET(r->x1, r->y1), FIMG2D_SRC_LEFT_TOP_REG);
+ wr(FIMG2D_OFFSET(r->x2, r->y2), FIMG2D_SRC_RIGHT_BOTTOM_REG);
+}
+
+void fimg2d4x_set_dst_type(struct fimg2d_control *info, enum image_sel type)
+{
+ unsigned long cfg;
+
+ if (type == IMG_MEMORY)
+ cfg = FIMG2D_IMAGE_TYPE_MEMORY;
+ else if (type == IMG_FGCOLOR)
+ cfg = FIMG2D_IMAGE_TYPE_FGCOLOR;
+ else
+ cfg = FIMG2D_IMAGE_TYPE_BGCOLOR;
+
+ wr(cfg, FIMG2D_DST_SELECT_REG);
+}
+
+/**
+ * @d: set base address, stride, color format, order
+ */
+void fimg2d4x_set_dst_image(struct fimg2d_control *info, struct fimg2d_image *d)
+{
+ unsigned long cfg;
+
+ wr(FIMG2D_ADDR(d->addr.start), FIMG2D_DST_BASE_ADDR_REG);
+ wr(FIMG2D_STRIDE(d->stride), FIMG2D_DST_STRIDE_REG);
+
+ if (d->order < ARGB_ORDER_END) {
+ cfg = d->order << FIMG2D_RGB_ORDER_SHIFT;
+ if (d->fmt == CF_A8)
+ wr(a8_rgbcolor, FIMG2D_DST_A8_RGB_EXT_REG);
+ } else if (d->order < P1_ORDER_END) {
+ cfg = (d->order - P1_CRY1CBY0) << FIMG2D_YCBCR_ORDER_SHIFT;
+ } else {
+ cfg = (d->order - P2_CRCB) << FIMG2D_YCBCR_ORDER_SHIFT;
+ cfg |= FIMG2D_YCBCR_2PLANE;
+
+ wr(FIMG2D_ADDR(d->plane2.start),
+ FIMG2D_DST_PLANE2_BASE_ADDR_REG);
+ }
+
+ cfg |= d->fmt << FIMG2D_COLOR_FORMAT_SHIFT;
+
+ wr(cfg, FIMG2D_DST_COLOR_MODE_REG);
+}
+
+void fimg2d4x_set_dst_rect(struct fimg2d_control *info, struct fimg2d_rect *r)
+{
+ wr(FIMG2D_OFFSET(r->x1, r->y1), FIMG2D_DST_LEFT_TOP_REG);
+ wr(FIMG2D_OFFSET(r->x2, r->y2), FIMG2D_DST_RIGHT_BOTTOM_REG);
+}
+
+void fimg2d4x_enable_msk(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_ENABLE_NORMAL_MSK;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+void fimg2d4x_set_msk_image(struct fimg2d_control *info, struct fimg2d_image *m)
+{
+ unsigned long cfg;
+
+ wr(FIMG2D_ADDR(m->addr.start), FIMG2D_MSK_BASE_ADDR_REG);
+ wr(FIMG2D_STRIDE(m->stride), FIMG2D_MSK_STRIDE_REG);
+
+ cfg = m->order << FIMG2D_MSK_ORDER_SHIFT;
+ cfg |= (m->fmt - CF_MSK_1BIT) << FIMG2D_MSK_FORMAT_SHIFT;
+
+ /* 16, 32bit mask only */
+ if (m->fmt >= CF_MSK_16BIT_565) {
+ if (msk_oprmode == MSK_ALPHA)
+ cfg |= FIMG2D_MSK_TYPE_ALPHA;
+ else if (msk_oprmode == MSK_ARGB)
+ cfg |= FIMG2D_MSK_TYPE_ARGB;
+ else
+ cfg |= FIMG2D_MSK_TYPE_MIXED;
+ }
+
+ wr(cfg, FIMG2D_MSK_MODE_REG);
+}
+
+void fimg2d4x_set_msk_rect(struct fimg2d_control *info, struct fimg2d_rect *r)
+{
+ wr(FIMG2D_OFFSET(r->x1, r->y1), FIMG2D_MSK_LEFT_TOP_REG);
+ wr(FIMG2D_OFFSET(r->x2, r->y2), FIMG2D_MSK_RIGHT_BOTTOM_REG);
+}
+
+/**
+ * If solid color fill is enabled, other blit command is ignored.
+ * Color format of solid color is considered to be
+ * the same as destination color format
+ * Channel order of solid color is A-R-G-B or Y-Cb-Cr
+ */
+void fimg2d4x_set_color_fill(struct fimg2d_control *info, unsigned long color)
+{
+ wr(FIMG2D_SOLID_FILL, FIMG2D_BITBLT_COMMAND_REG);
+
+ /* sf color */
+ wr(color, FIMG2D_SF_COLOR_REG);
+
+ /* set 16 burst for performance */
+ fimg2d4x_set_max_burst_length(info, MAX_BURST_16);
+}
+
+/**
+ * set alpha-multiply mode for src, dst, pat read (pre-bitblt)
+ * set alpha-demultiply for dst write (post-bitblt)
+ */
+void fimg2d4x_set_premultiplied(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_PREMULT_ALL;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+void fimg2d4x_src_premultiply(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_SRC_PREMULT;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+void fimg2d4x_dst_premultiply(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_DST_RD_PREMULT;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+void fimg2d4x_dst_depremultiply(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_DST_WR_DEPREMULT;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+/**
+ * set transp/bluscr mode, bs color, bg color
+ */
+void fimg2d4x_set_bluescreen(struct fimg2d_control *info,
+ struct fimg2d_bluscr *bluscr)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+
+ if (bluscr->mode == TRANSP)
+ cfg |= FIMG2D_TRANSP_MODE;
+ else if (bluscr->mode == BLUSCR)
+ cfg |= FIMG2D_BLUSCR_MODE;
+ else /* opaque: initial value */
+ return;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+
+ /* bs color */
+ if (bluscr->bs_color)
+ wr(bluscr->bs_color, FIMG2D_BS_COLOR_REG);
+
+ /* bg color */
+ if (bluscr->mode == BLUSCR && bluscr->bg_color)
+ wr(bluscr->bg_color, FIMG2D_BG_COLOR_REG);
+}
+
+/**
+ * @c: destination clipping region
+ */
+void fimg2d4x_enable_clipping(struct fimg2d_control *info,
+ struct fimg2d_clip *clp)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_ENABLE_CW;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+
+ wr(FIMG2D_OFFSET(clp->x1, clp->y1), FIMG2D_CW_LT_REG);
+ wr(FIMG2D_OFFSET(clp->x2, clp->y2), FIMG2D_CW_RB_REG);
+}
+
+void fimg2d4x_enable_dithering(struct fimg2d_control *info)
+{
+ unsigned long cfg;
+
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_ENABLE_DITHER;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+}
+
+#define MAX_PRECISION 16
+#define DEFAULT_SCALE_RATIO 0x10000
+
+/**
+ * scale_factor_to_fixed16 - convert scale factor to fixed pint 16
+ * @n: numerator
+ * @d: denominator
+ */
+static inline unsigned long scale_factor_to_fixed16(int n, int d)
+{
+ int i;
+ u32 fixed16;
+
+ if (!d)
+ return DEFAULT_SCALE_RATIO;
+
+ fixed16 = (n/d) << 16;
+ n %= d;
+
+ for (i = 0; i < MAX_PRECISION; i++) {
+ if (!n)
+ break;
+ n <<= 1;
+ if (n/d)
+ fixed16 |= 1 << (15-i);
+ n %= d;
+ }
+
+ return fixed16;
+}
+
+void fimg2d4x_set_src_scaling(struct fimg2d_control *info,
+ struct fimg2d_scale *scl,
+ struct fimg2d_repeat *rep)
+{
+ unsigned long wcfg, hcfg;
+ unsigned long mode;
+
+ /*
+ * scaling ratio in pixels
+ * e.g scale-up: src(1,1)-->dst(2,2), src factor: 0.5 (0x000080000)
+ * scale-down: src(2,2)-->dst(1,1), src factor: 2.0 (0x000200000)
+ */
+
+ /* inversed scaling factor: src is numerator */
+ wcfg = scale_factor_to_fixed16(scl->src_w, scl->dst_w);
+ hcfg = scale_factor_to_fixed16(scl->src_h, scl->dst_h);
+
+ if (wcfg == DEFAULT_SCALE_RATIO && hcfg == DEFAULT_SCALE_RATIO)
+ return;
+
+ wr(wcfg, FIMG2D_SRC_XSCALE_REG);
+ wr(hcfg, FIMG2D_SRC_YSCALE_REG);
+
+ /* scaling algorithm */
+ if (scl->mode == SCALING_NEAREST)
+ mode = FIMG2D_SCALE_MODE_NEAREST;
+ else {
+ /* 0x3: ignore repeat mode at boundary */
+ if (rep->mode == REPEAT_PAD || rep->mode == REPEAT_CLAMP)
+ mode = 0x3; /* hidden */
+ else
+ mode = FIMG2D_SCALE_MODE_BILINEAR;
+ }
+
+ wr(mode, FIMG2D_SRC_SCALE_CTRL_REG);
+}
+
+void fimg2d4x_set_msk_scaling(struct fimg2d_control *info,
+ struct fimg2d_scale *scl,
+ struct fimg2d_repeat *rep)
+{
+ unsigned long wcfg, hcfg;
+ unsigned long mode;
+
+ /*
+ * scaling ratio in pixels
+ * e.g scale-up: src(1,1)-->dst(2,2), msk factor: 0.5 (0x000080000)
+ * scale-down: src(2,2)-->dst(1,1), msk factor: 2.0 (0x000200000)
+ */
+
+ /* inversed scaling factor: src is numerator */
+ wcfg = scale_factor_to_fixed16(scl->src_w, scl->dst_w);
+ hcfg = scale_factor_to_fixed16(scl->src_h, scl->dst_h);
+
+ if (wcfg == DEFAULT_SCALE_RATIO && hcfg == DEFAULT_SCALE_RATIO)
+ return;
+
+ wr(wcfg, FIMG2D_MSK_XSCALE_REG);
+ wr(hcfg, FIMG2D_MSK_YSCALE_REG);
+
+ /* scaling algorithm */
+ if (scl->mode == SCALING_NEAREST)
+ mode = FIMG2D_SCALE_MODE_NEAREST;
+ else {
+ /* 0x3: ignore repeat mode at boundary */
+ if (rep->mode == REPEAT_PAD || rep->mode == REPEAT_CLAMP)
+ mode = 0x3; /* hidden */
+ else
+ mode = FIMG2D_SCALE_MODE_BILINEAR;
+ }
+
+ wr(mode, FIMG2D_MSK_SCALE_CTRL_REG);
+}
+
+void fimg2d4x_set_src_repeat(struct fimg2d_control *info,
+ struct fimg2d_repeat *rep)
+{
+ unsigned long cfg;
+
+ if (rep->mode == NO_REPEAT)
+ return;
+
+ cfg = (rep->mode - REPEAT_NORMAL) << FIMG2D_SRC_REPEAT_SHIFT;
+
+ wr(cfg, FIMG2D_SRC_REPEAT_MODE_REG);
+
+ /* src pad color */
+ if (rep->mode == REPEAT_PAD)
+ wr(rep->pad_color, FIMG2D_SRC_PAD_VALUE_REG);
+}
+
+void fimg2d4x_set_msk_repeat(struct fimg2d_control *info,
+ struct fimg2d_repeat *rep)
+{
+ unsigned long cfg;
+
+ if (rep->mode == NO_REPEAT)
+ return;
+
+ cfg = (rep->mode - REPEAT_NORMAL) << FIMG2D_MSK_REPEAT_SHIFT;
+
+ wr(cfg, FIMG2D_MSK_REPEAT_MODE_REG);
+
+ /* mask pad color */
+ if (rep->mode == REPEAT_PAD)
+ wr(rep->pad_color, FIMG2D_MSK_PAD_VALUE_REG);
+}
+
+void fimg2d4x_set_rotation(struct fimg2d_control *info, enum rotation rot)
+{
+ int rev_rot90; /* counter clockwise, 4.1 specific */
+ unsigned long cfg;
+ enum addressing dirx, diry;
+
+ rev_rot90 = 0;
+ dirx = diry = FORWARD_ADDRESSING;
+
+ switch (rot) {
+ case ROT_90: /* -270 degree */
+ rev_rot90 = 1; /* fall through */
+ case ROT_180:
+ dirx = REVERSE_ADDRESSING;
+ diry = REVERSE_ADDRESSING;
+ break;
+ case ROT_270: /* -90 degree */
+ rev_rot90 = 1;
+ break;
+ case XFLIP:
+ diry = REVERSE_ADDRESSING;
+ break;
+ case YFLIP:
+ dirx = REVERSE_ADDRESSING;
+ break;
+ case ORIGIN:
+ default:
+ break;
+ }
+
+ /* destination direction */
+ if (dirx == REVERSE_ADDRESSING || diry == REVERSE_ADDRESSING) {
+ cfg = rd(FIMG2D_DST_PAT_DIRECT_REG);
+
+ if (dirx == REVERSE_ADDRESSING)
+ cfg |= FIMG2D_DST_X_DIR_NEGATIVE;
+
+ if (diry == REVERSE_ADDRESSING)
+ cfg |= FIMG2D_DST_Y_DIR_NEGATIVE;
+
+ wr(cfg, FIMG2D_DST_PAT_DIRECT_REG);
+ }
+
+ /* rotation -90 */
+ if (rev_rot90) {
+ cfg = rd(FIMG2D_ROTATE_REG);
+ cfg |= FIMG2D_SRC_ROTATE_90;
+ cfg |= FIMG2D_MSK_ROTATE_90;
+
+ wr(cfg, FIMG2D_ROTATE_REG);
+ }
+}
+
+void fimg2d4x_set_fgcolor(struct fimg2d_control *info, unsigned long fg)
+{
+ wr(fg, FIMG2D_FG_COLOR_REG);
+}
+
+void fimg2d4x_set_bgcolor(struct fimg2d_control *info, unsigned long bg)
+{
+ wr(bg, FIMG2D_BG_COLOR_REG);
+}
+
+void fimg2d4x_enable_alpha(struct fimg2d_control *info, unsigned char g_alpha)
+{
+ unsigned long cfg;
+
+ /* enable alpha */
+ cfg = rd(FIMG2D_BITBLT_COMMAND_REG);
+ cfg |= FIMG2D_ALPHA_BLEND_MODE;
+
+ wr(cfg, FIMG2D_BITBLT_COMMAND_REG);
+
+ /*
+ * global(constant) alpha
+ * ex. if global alpha is 0x80, must set 0x80808080
+ */
+ cfg = g_alpha;
+ cfg |= g_alpha << 8;
+ cfg |= g_alpha << 16;
+ cfg |= g_alpha << 24;
+ wr(cfg, FIMG2D_ALPHA_REG);
+}
+
+/**
+ * Four channels of the image are computed with:
+ * R = [ coeff(S)*Sc + coeff(D)*Dc ]
+ * where
+ * Rc is result color or alpha
+ * Sc is source color or alpha
+ * Dc is destination color or alpha
+ *
+ * Caution: supposed that Sc and Dc are perpixel-alpha-premultiplied value
+ *
+ * MODE: Formula
+ * ----------------------------------------------------------------------------
+ * FILL:
+ * CLEAR: R = 0
+ * SRC: R = Sc
+ * DST: R = Dc
+ * SRC_OVER: R = Sc + (1-Sa)*Dc
+ * DST_OVER: R = (1-Da)*Sc + Dc
+ * SRC_IN: R = Da*Sc
+ * DST_IN: R = Sa*Dc
+ * SRC_OUT: R = (1-Da)*Sc
+ * DST_OUT: R = (1-Sa)*Dc
+ * SRC_ATOP: R = Da*Sc + (1-Sa)*Dc
+ * DST_ATOP: R = (1-Da)*Sc + Sa*Dc
+ * XOR: R = (1-Da)*Sc + (1-Sa)*Dc
+ * ADD: R = Sc + Dc
+ * MULTIPLY: R = Sc*Dc
+ * SCREEN: R = Sc + (1-Sc)*Dc
+ * DARKEN: R = (Da*Sc<Sa*Dc)? Sc+(1-Sa)*Dc : (1-Da)*Sc+Dc
+ * LIGHTEN: R = (Da*Sc>Sa*Dc)? Sc+(1-Sa)*Dc : (1-Da)*Sc+Dc
+ * DISJ_SRC_OVER: R = Sc + (min(1,(1-Sa)/Da))*Dc
+ * DISJ_DST_OVER: R = (min(1,(1-Da)/Sa))*Sc + Dc
+ * DISJ_SRC_IN: R = (max(1-(1-Da)/Sa,0))*Sc
+ * DISJ_DST_IN: R = (max(1-(1-Sa)/Da,0))*Dc
+ * DISJ_SRC_OUT: R = (min(1,(1-Da)/Sa))*Sc
+ * DISJ_DST_OUT: R = (min(1,(1-Sa)/Da))*Dc
+ * DISJ_SRC_ATOP: R = (max(1-(1-Da)/Sa,0))*Sc + (min(1,(1-Sa)/Da))*Dc
+ * DISJ_DST_ATOP: R = (min(1,(1-Da)/Sa))*Sc + (max(1-(1-Sa)/Da,0))*Dc
+ * DISJ_XOR: R = (min(1,(1-Da)/Sa))*Sc + (min(1,(1-Sa)/Da))*Dc
+ * CONJ_SRC_OVER: R = Sc + (max(1-Sa/Da,0))*Dc
+ * CONJ_DST_OVER: R = (max(1-Da/Sa,0))*Sc + Dc
+ * CONJ_SRC_IN: R = (min(1,Da/Sa))*Sc
+ * CONJ_DST_IN: R = (min(1,Sa/Da))*Dc
+ * CONJ_SRC_OUT: R = (max(1-Da/Sa,0)*Sc
+ * CONJ_DST_OUT: R = (max(1-Sa/Da,0))*Dc
+ * CONJ_SRC_ATOP: R = (min(1,Da/Sa))*Sc + (max(1-Sa/Da,0))*Dc
+ * CONJ_DST_ATOP: R = (max(1-Da/Sa,0))*Sc + (min(1,Sa/Da))*Dc
+ * CONJ_XOR: R = (max(1-Da/Sa,0))*Sc + (max(1-Sa/Da,0))*Dc
+ */
+static struct fimg2d_blend_coeff const coeff_table[MAX_FIMG2D_BLIT_OP] = {
+ { 0, 0, 0, 0 }, /* FILL */
+ { 0, COEFF_ZERO, 0, COEFF_ZERO }, /* CLEAR */
+ { 0, COEFF_ONE, 0, COEFF_ZERO }, /* SRC */
+ { 0, COEFF_ZERO, 0, COEFF_ONE }, /* DST */
+ { 0, COEFF_ONE, 1, COEFF_SA }, /* SRC_OVER */
+ { 1, COEFF_DA, 0, COEFF_ONE }, /* DST_OVER */
+ { 0, COEFF_DA, 0, COEFF_ZERO }, /* SRC_IN */
+ { 0, COEFF_ZERO, 0, COEFF_SA }, /* DST_IN */
+ { 1, COEFF_DA, 0, COEFF_ZERO }, /* SRC_OUT */
+ { 0, COEFF_ZERO, 1, COEFF_SA }, /* DST_OUT */
+ { 0, COEFF_DA, 1, COEFF_SA }, /* SRC_ATOP */
+ { 1, COEFF_DA, 0, COEFF_SA }, /* DST_ATOP */
+ { 1, COEFF_DA, 1, COEFF_SA }, /* XOR */
+ { 0, COEFF_ONE, 0, COEFF_ONE }, /* ADD */
+ { 0, COEFF_DC, 0, COEFF_ZERO }, /* MULTIPLY */
+ { 0, COEFF_ONE, 1, COEFF_SC }, /* SCREEN */
+ { 0, 0, 0, 0 }, /* DARKEN */
+ { 0, 0, 0, 0 }, /* LIGHTEN */
+ { 0, COEFF_ONE, 0, COEFF_DISJ_S }, /* DISJ_SRC_OVER */
+ { 0, COEFF_DISJ_D, 0, COEFF_ONE }, /* DISJ_DST_OVER */
+ { 1, COEFF_DISJ_D, 0, COEFF_ZERO }, /* DISJ_SRC_IN */
+ { 0, COEFF_ZERO, 1, COEFF_DISJ_S }, /* DISJ_DST_IN */
+ { 0, COEFF_DISJ_D, 0, COEFF_ONE }, /* DISJ_SRC_OUT */
+ { 0, COEFF_ZERO, 0, COEFF_DISJ_S }, /* DISJ_DST_OUT */
+ { 1, COEFF_DISJ_D, 0, COEFF_DISJ_S }, /* DISJ_SRC_ATOP */
+ { 0, COEFF_DISJ_D, 1, COEFF_DISJ_S }, /* DISJ_DST_ATOP */
+ { 0, COEFF_DISJ_D, 0, COEFF_DISJ_S }, /* DISJ_XOR */
+ { 0, COEFF_ONE, 1, COEFF_DISJ_S }, /* CONJ_SRC_OVER */
+ { 1, COEFF_DISJ_D, 0, COEFF_ONE }, /* CONJ_DST_OVER */
+ { 0, COEFF_CONJ_D, 0, COEFF_ONE }, /* CONJ_SRC_IN */
+ { 0, COEFF_ZERO, 0, COEFF_CONJ_S }, /* CONJ_DST_IN */
+ { 1, COEFF_CONJ_D, 0, COEFF_ZERO }, /* CONJ_SRC_OUT */
+ { 0, COEFF_ZERO, 1, COEFF_CONJ_S }, /* CONJ_DST_OUT */
+ { 0, COEFF_CONJ_D, 1, COEFF_CONJ_S }, /* CONJ_SRC_ATOP */
+ { 1, COEFF_CONJ_D, 0, COEFF_CONJ_D }, /* CONJ_DST_ATOP */
+ { 1, COEFF_CONJ_D, 1, COEFF_CONJ_S }, /* CONJ_XOR */
+ { 0, 0, 0, 0 }, /* USER */
+ { 1, COEFF_GA, 1, COEFF_ZERO }, /* USER_SRC_GA */
+};
+
+/*
+ * coefficient table with global (constant) alpha
+ * replace COEFF_ONE with COEFF_GA
+ *
+ * MODE: Formula with Global Alpha (Ga is multiplied to both Sc and Sa)
+ * ----------------------------------------------------------------------------
+ * FILL:
+ * CLEAR: R = 0
+ * SRC: R = Ga*Sc
+ * DST: R = Dc
+ * SRC_OVER: R = Ga*Sc + (1-Sa*Ga)*Dc
+ * DST_OVER: R = (1-Da)*Ga*Sc + Dc --> (W/A) 1st:Ga*Sc, 2nd:DST_OVER
+ * SRC_IN: R = Da*Ga*Sc
+ * DST_IN: R = Sa*Ga*Dc
+ * SRC_OUT: R = (1-Da)*Ga*Sc --> (W/A) 1st: Ga*Sc, 2nd:SRC_OUT
+ * DST_OUT: R = (1-Sa*Ga)*Dc
+ * SRC_ATOP: R = Da*Ga*Sc + (1-Sa*Ga)*Dc
+ * DST_ATOP: R = (1-Da)*Ga*Sc + Sa*Ga*Dc --> (W/A) 1st: Ga*Sc, 2nd:DST_ATOP
+ * XOR: R = (1-Da)*Ga*Sc + (1-Sa*Ga)*Dc --> (W/A) 1st: Ga*Sc, 2nd:XOR
+ * ADD: R = Ga*Sc + Dc
+ * MULTIPLY: R = Ga*Sc*Dc --> (W/A) 1st: Ga*Sc, 2nd: MULTIPLY
+ * SCREEN: R = Ga*Sc + (1-Ga*Sc)*Dc --> (W/A) 1st: Ga*Sc, 2nd: SCREEN
+ * DARKEN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * LIGHTEN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_SRC_OVER: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_DST_OVER: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_SRC_IN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_DST_IN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_SRC_OUT: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_DST_OUT: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_SRC_ATOP: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_DST_ATOP: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * DISJ_XOR: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_SRC_OVER: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_DST_OVER: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_SRC_IN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_DST_IN: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_SRC_OUT: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_DST_OUT: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_SRC_ATOP: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_DST_ATOP: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ * CONJ_XOR: R = (W/A) 1st: Ga*Sc, 2nd: OP
+ */
+static struct fimg2d_blend_coeff const ga_coeff_table[MAX_FIMG2D_BLIT_OP] = {
+ { 0, 0, 0, 0 }, /* FILL */
+ { 0, COEFF_ZERO, 0, COEFF_ZERO }, /* CLEAR */
+ { 0, COEFF_GA, 0, COEFF_ZERO }, /* SRC */
+ { 0, COEFF_ZERO, 0, COEFF_ONE }, /* DST */
+ { 0, COEFF_GA, 1, COEFF_SA }, /* SRC_OVER */
+ { 1, COEFF_DA, 0, COEFF_ONE }, /* DST_OVER (use W/A) */
+ { 0, COEFF_DA, 0, COEFF_ZERO }, /* SRC_IN */
+ { 0, COEFF_ZERO, 0, COEFF_SA }, /* DST_IN */
+ { 1, COEFF_DA, 0, COEFF_ZERO }, /* SRC_OUT (use W/A) */
+ { 0, COEFF_ZERO, 1, COEFF_SA }, /* DST_OUT */
+ { 0, COEFF_DA, 1, COEFF_SA }, /* SRC_ATOP */
+ { 1, COEFF_DA, 0, COEFF_SA }, /* DST_ATOP (use W/A) */
+ { 1, COEFF_DA, 1, COEFF_SA }, /* XOR (use W/A) */
+ { 0, COEFF_GA, 0, COEFF_ONE }, /* ADD */
+ { 0, COEFF_DC, 0, COEFF_ZERO }, /* MULTIPLY (use W/A) */
+ { 0, COEFF_ONE, 1, COEFF_SC }, /* SCREEN (use W/A) */
+ { 0, 0, 0, 0 }, /* DARKEN (use W/A) */
+ { 0, 0, 0, 0 }, /* LIGHTEN (use W/A) */
+ { 0, COEFF_ONE, 0, COEFF_DISJ_S }, /* DISJ_SRC_OVER (use W/A) */
+ { 0, COEFF_DISJ_D, 0, COEFF_ONE }, /* DISJ_DST_OVER (use W/A) */
+ { 1, COEFF_DISJ_D, 0, COEFF_ZERO }, /* DISJ_SRC_IN (use W/A) */
+ { 0, COEFF_ZERO, 1, COEFF_DISJ_S }, /* DISJ_DST_IN (use W/A) */
+ { 0, COEFF_DISJ_D, 0, COEFF_ONE }, /* DISJ_SRC_OUT (use W/A) */
+ { 0, COEFF_ZERO, 0, COEFF_DISJ_S }, /* DISJ_DST_OUT (use W/A) */
+ { 1, COEFF_DISJ_D, 0, COEFF_DISJ_S }, /* DISJ_SRC_ATOP (use W/A) */
+ { 0, COEFF_DISJ_D, 1, COEFF_DISJ_S }, /* DISJ_DST_ATOP (use W/A) */
+ { 0, COEFF_DISJ_D, 0, COEFF_DISJ_S }, /* DISJ_XOR (use W/A) */
+ { 0, COEFF_ONE, 1, COEFF_DISJ_S }, /* CONJ_SRC_OVER (use W/A) */
+ { 1, COEFF_DISJ_D, 0, COEFF_ONE }, /* CONJ_DST_OVER (use W/A) */
+ { 0, COEFF_CONJ_D, 0, COEFF_ONE }, /* CONJ_SRC_IN (use W/A) */
+ { 0, COEFF_ZERO, 0, COEFF_CONJ_S }, /* CONJ_DST_IN (use W/A) */
+ { 1, COEFF_CONJ_D, 0, COEFF_ZERO }, /* CONJ_SRC_OUT (use W/A) */
+ { 0, COEFF_ZERO, 1, COEFF_CONJ_S }, /* CONJ_DST_OUT (use W/A) */
+ { 0, COEFF_CONJ_D, 1, COEFF_CONJ_S }, /* CONJ_SRC_ATOP (use W/A) */
+ { 1, COEFF_CONJ_D, 0, COEFF_CONJ_D }, /* CONJ_DST_ATOP (use W/A) */
+ { 1, COEFF_CONJ_D, 1, COEFF_CONJ_S }, /* CONJ_XOR (use W/A) */
+ { 0, 0, 0, 0 }, /* USER */
+ { 1, COEFF_GA, 1, COEFF_ZERO }, /* USER_SRC_GA */
+};
+
+void fimg2d4x_set_alpha_composite(struct fimg2d_control *info,
+ enum blit_op op, unsigned char g_alpha)
+{
+ int alphamode;
+ unsigned long cfg = 0;
+ struct fimg2d_blend_coeff const *tbl;
+
+ switch (op) {
+ case BLIT_OP_SOLID_FILL:
+ case BLIT_OP_CLR:
+ /* nop */
+ return;
+ case BLIT_OP_DARKEN:
+ cfg |= FIMG2D_DARKEN;
+ break;
+ case BLIT_OP_LIGHTEN:
+ cfg |= FIMG2D_LIGHTEN;
+ break;
+ case BLIT_OP_USER_COEFF:
+ /* TODO */
+ return;
+ default:
+ if (g_alpha < 0xff) { /* with global alpha */
+ tbl = &ga_coeff_table[op];
+ alphamode = ALPHA_PERPIXEL_MUL_GLOBAL;
+ } else {
+ tbl = &coeff_table[op];
+ alphamode = ALPHA_PERPIXEL;
+ }
+
+ /* src coefficient */
+ cfg |= tbl->s_coeff << FIMG2D_SRC_COEFF_SHIFT;
+
+ cfg |= alphamode << FIMG2D_SRC_COEFF_SA_SHIFT;
+ cfg |= alphamode << FIMG2D_SRC_COEFF_DA_SHIFT;
+
+ if (tbl->s_coeff_inv)
+ cfg |= FIMG2D_INV_SRC_COEFF;
+
+ /* dst coefficient */
+ cfg |= tbl->d_coeff << FIMG2D_DST_COEFF_SHIFT;
+
+ cfg |= alphamode << FIMG2D_DST_COEFF_DA_SHIFT;
+ cfg |= alphamode << FIMG2D_DST_COEFF_SA_SHIFT;
+
+ if (tbl->d_coeff_inv)
+ cfg |= FIMG2D_INV_DST_COEFF;
+
+ break;
+ }
+
+ wr(cfg, FIMG2D_BLEND_FUNCTION_REG);
+
+ /* round mode: depremult round mode is not used */
+ cfg = rd(FIMG2D_ROUND_MODE_REG);
+
+ /* premult */
+ cfg &= ~FIMG2D_PREMULT_ROUND_MASK;
+ cfg |= premult_round_mode << FIMG2D_PREMULT_ROUND_SHIFT;
+
+ /* blend */
+ cfg &= ~FIMG2D_BLEND_ROUND_MASK;
+ cfg |= blend_round_mode << FIMG2D_BLEND_ROUND_SHIFT;
+
+ wr(cfg, FIMG2D_ROUND_MODE_REG);
+}
+
+void fimg2d4x_dump_regs(struct fimg2d_control *info)
+{
+ int i, offset;
+ unsigned long table[][2] = {
+ /* start, end */
+ {0x0000, 0x0030}, /* general */
+ {0x0080, 0x00a0}, /* host dma */
+ {0x0100, 0x0110}, /* commands */
+ {0x0200, 0x0210}, /* rotation & direction */
+ {0x0300, 0x0340}, /* source */
+ {0x0400, 0x0420}, /* dest */
+ {0x0500, 0x0550}, /* pattern & mask */
+ {0x0600, 0x0710}, /* clip, rop, alpha and color */
+ {0x0, 0x0}
+ };
+
+ for (i = 0; table[i][1] != 0x0; i++) {
+ offset = table[i][0];
+ do {
+ printk(KERN_INFO "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", offset,
+ rd(offset),
+ rd(offset+0x4),
+ rd(offset+0x8),
+ rd(offset+0xc));
+ offset += 0x10;
+ } while (offset < table[i][1]);
+ }
+}
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d4x_regs.h b/drivers/media/video/samsung/fimg2d4x/fimg2d4x_regs.h
new file mode 100644
index 0000000..91c7ac8
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d4x_regs.h
@@ -0,0 +1,460 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d4x_regs.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register Definitions for Samsung Graphics 2D Hardware
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D4X_REGS_H
+#define __FIMG2D4x_REGS_H __FILE__
+
+/* Macros */
+#define FIMG2D_ADDR(v) ((v) << 0)
+#define FIMG2D_STRIDE(v) (((v) & (0xffff)) << 0)
+#define FIMG2D_OFFSET(x, y) ((((y) & 0x1fff) << 16) | (((x) & 0x1fff) << 0))
+#define FIMG2D_SIZE(w, h) ((((h) & 0x1fff) << 16) | (((w) & 0x1fff) << 0))
+#define FIMG2D_COLOR(v) ((v) << 0)
+
+/* Registers */
+#define FIMG2D_SOFT_RESET_REG (0x000)
+#define FIMG2D_INTEN_REG (0x004)
+#define FIMG2D_INTC_PEND_REG (0x00c)
+#define FIMG2D_FIFO_STAT_REG (0x010)
+#define FIMG2D_AXI_MODE_REG (0x01c)
+#define FIMG2D_DMA_SFR_BASE_ADDR_REG (0x080)
+#define FIMG2D_DMA_COMMAND_REG (0x084)
+#define FIMG2D_DMA_EXE_LIST_NUM_REG (0x088)
+#define FIMG2D_DMA_STATUS_REG (0x08c)
+#define FIMG2D_DMA_HOLD_CMD_REG (0x090)
+#define FIMG2D_BITBLT_START_REG (0x100)
+#define FIMG2D_BITBLT_COMMAND_REG (0x104)
+#define FIMG2D_BLEND_FUNCTION_REG (0x108)
+#define FIMG2D_ROUND_MODE_REG (0x10c)
+#define FIMG2D_ROTATE_REG (0x200)
+#define FIMG2D_SRC_MSK_DIRECT_REG (0x204)
+#define FIMG2D_DST_PAT_DIRECT_REG (0x208)
+#define FIMG2D_SRC_SELECT_REG (0x300)
+#define FIMG2D_SRC_BASE_ADDR_REG (0x304)
+#define FIMG2D_SRC_STRIDE_REG (0x308)
+#define FIMG2D_SRC_COLOR_MODE_REG (0x30c)
+#define FIMG2D_SRC_LEFT_TOP_REG (0x310)
+#define FIMG2D_SRC_RIGHT_BOTTOM_REG (0x314)
+#define FIMG2D_SRC_PLANE2_BASE_ADDR_REG (0x318)
+#define FIMG2D_SRC_REPEAT_MODE_REG (0x31c)
+#define FIMG2D_SRC_PAD_VALUE_REG (0x320)
+#define FIMG2D_SRC_A8_RGB_EXT_REG (0x324)
+#define FIMG2D_SRC_SCALE_CTRL_REG (0x328)
+#define FIMG2D_SRC_XSCALE_REG (0x32c)
+#define FIMG2D_SRC_YSCALE_REG (0x330)
+#define FIMG2D_DST_SELECT_REG (0x400)
+#define FIMG2D_DST_BASE_ADDR_REG (0x404)
+#define FIMG2D_DST_STRIDE_REG (0x408)
+#define FIMG2D_DST_COLOR_MODE_REG (0x40c)
+#define FIMG2D_DST_LEFT_TOP_REG (0x410)
+#define FIMG2D_DST_RIGHT_BOTTOM_REG (0x414)
+#define FIMG2D_DST_PLANE2_BASE_ADDR_REG (0x418)
+#define FIMG2D_DST_A8_RGB_EXT_REG (0x41c)
+#define FIMG2D_PAT_BASE_ADDR_REG (0x500)
+#define FIMG2D_PAT_SIZE_REG (0x504)
+#define FIMG2D_PAT_COLOR_MODE_REG (0x508)
+#define FIMG2D_PAT_OFFSET_REG (0x50c)
+#define FIMG2D_PAT_STRIDE_REG (0x510)
+#define FIMG2D_MSK_BASE_ADDR_REG (0x520)
+#define FIMG2D_MSK_STRIDE_REG (0x524)
+#define FIMG2D_MSK_LEFT_TOP_REG (0x528)
+#define FIMG2D_MSK_RIGHT_BOTTOM_REG (0x52c)
+#define FIMG2D_MSK_MODE_REG (0x530)
+#define FIMG2D_MSK_REPEAT_MODE_REG (0x534)
+#define FIMG2D_MSK_PAD_VALUE_REG (0x538)
+#define FIMG2D_MSK_SCALE_CTRL_REG (0x53c)
+#define FIMG2D_MSK_XSCALE_REG (0x540)
+#define FIMG2D_MSK_YSCALE_REG (0x544)
+#define FIMG2D_CW_LT_REG (0x600)
+#define FIMG2D_CW_RB_REG (0x604)
+#define FIMG2D_THIRD_OPERAND_REG (0x610)
+#define FIMG2D_ROP4_REG (0x614)
+#define FIMG2D_ALPHA_REG (0x618)
+#define FIMG2D_FG_COLOR_REG (0x700)
+#define FIMG2D_BG_COLOR_REG (0x704)
+#define FIMG2D_BS_COLOR_REG (0x708)
+#define FIMG2D_SF_COLOR_REG (0x70c)
+#define FIMG2D_SRC_COLORKEY_CTRL_REG (0x710)
+#define FIMG2D_SRC_COLORKEY_DR_MIN_REG (0x714)
+#define FIMG2D_SRC_COLORKEY_DR_MAX_REG (0x718)
+#define FIMG2D_DST_COLORKEY_CTRL_REG (0x71c)
+#define FIMG2D_DST_COLORKEY_DR_MIN_REG (0x720)
+#define FIMG2D_DST_COLORKEY_DR_MAX_REG (0x724)
+#define FIMG2D_YCBCR_SRC_COLORKEY_CTRL_REG (0x728)
+#define FIMG2D_YCBCR_SRC_COLORKEY_DR_MIN_REG (0x72c)
+#define FIMG2D_YCBCR_SRC_COLORKEY_DR_MAX_REG (0x730)
+#define FIMG2D_YCBCR_DST_COLORKEY_CTRL_REG (0x734)
+#define FIMG2D_YCBCR_DST_COLORKEY_DR_MIN_REG (0x738)
+#define FIMG2D_YCBCR_DST_COLORKEY_DR_MAX_REG (0x73c)
+#define FIMG2D_GAMMA_TABLE0_0_REG (0x800)
+#define FIMG2D_GAMMA_TABLE0_1_REG (0x804)
+#define FIMG2D_GAMMA_TABLE0_2_REG (0x808)
+#define FIMG2D_GAMMA_TABLE0_3_REG (0x80c)
+#define FIMG2D_GAMMA_TABLE0_4_REG (0x810)
+#define FIMG2D_GAMMA_TABLE0_5_REG (0x814)
+#define FIMG2D_GAMMA_TABLE0_6_REG (0x818)
+#define FIMG2D_GAMMA_TABLE0_7_REG (0x81c)
+#define FIMG2D_GAMMA_TABLE0_8_REG (0x820)
+#define FIMG2D_GAMMA_TABLE0_9_REG (0x824)
+#define FIMG2D_GAMMA_TABLE0_10_REG (0x828)
+#define FIMG2D_GAMMA_TABLE0_11_REG (0x82c)
+#define FIMG2D_GAMMA_TABLE0_12_REG (0x830)
+#define FIMG2D_GAMMA_TABLE0_13_REG (0x834)
+#define FIMG2D_GAMMA_TABLE0_14_REG (0x838)
+#define FIMG2D_GAMMA_TABLE0_15_REG (0x83c)
+#define FIMG2D_GAMMA_TABLE1_0_REG (0x840)
+#define FIMG2D_GAMMA_TABLE1_1_REG (0x844)
+#define FIMG2D_GAMMA_TABLE1_2_REG (0x848)
+#define FIMG2D_GAMMA_TABLE1_3_REG (0x84c)
+#define FIMG2D_GAMMA_TABLE1_4_REG (0x850)
+#define FIMG2D_GAMMA_TABLE1_5_REG (0x854)
+#define FIMG2D_GAMMA_TABLE1_6_REG (0x858)
+#define FIMG2D_GAMMA_TABLE1_7_REG (0x85c)
+#define FIMG2D_GAMMA_TABLE1_8_REG (0x860)
+#define FIMG2D_GAMMA_TABLE1_9_REG (0x864)
+#define FIMG2D_GAMMA_TABLE1_10_REG (0x868)
+#define FIMG2D_GAMMA_TABLE1_11_REG (0x86c)
+#define FIMG2D_GAMMA_TABLE1_12_REG (0x870)
+#define FIMG2D_GAMMA_TABLE1_13_REG (0x874)
+#define FIMG2D_GAMMA_TABLE1_14_REG (0x878)
+#define FIMG2D_GAMMA_TABLE1_15_REG (0x87c)
+#define FIMG2D_GAMMA_REF_COLOR_REG (0x880)
+
+/* Bit Definitions */
+
+/* FIMG2D_SOFT_RESET_REG */
+#define FIMG2D_SFR_CLEAR (1 << 1)
+#define FIMG2D_SOFT_RESET (1 << 0)
+
+/* FIMG2D_INTEN_REG */
+#define FIMG2D_INT_TYPE_EDGE (1 << 4)
+#define FIMG2D_INT_TYPE_LEVEL (0 << 4)
+
+#define FIMG2D_ACF_INT_ENABLE (1 << 3)
+#define FIMG2D_UCF_INT_ENABLE (1 << 2)
+#define FIMG2D_GCF_INT_ENABLE (1 << 1)
+#define FIMG2D_BLIT_INT_ENABLE (1 << 0)
+
+/* FIMG2D_INTC_PEND_REG */
+#define FIMG2D_ACMD_INT_FLAG (1 << 3)
+#define FIMG2D_UCMD_INT_FLAG (1 << 2)
+#define FIMG2D_GCMD_INT_FLAG (1 << 1)
+#define FIMG2D_BLIT_INT_FLAG (1 << 0)
+
+/* FIMG2D_FIFO_STAT_REG */
+#define FIMG2D_BLIT_FINISHED (1 << 0)
+
+/* FIMG2D_AXI_MODE_REG */
+#define FIMG2D_MAX_BURST_LEN_2 (0 << 24)
+#define FIMG2D_MAX_BURST_LEN_4 (1 << 24)
+#define FIMG2D_MAX_BURST_LEN_8 (2 << 24)
+#define FIMG2D_MAX_BURST_LEN_16 (3 << 24)
+#define FIMG2D_MAX_BURST_LEN_MASK (3 << 24)
+#define FIMG2D_MAX_BURST_LEN_SHIFT (24)
+
+#define FIMG2D_AXI_AWUSERS_SHIFT (16)
+#define FIMG2D_AXI_ARUSERS_SHIFT (8)
+#define FIMG2D_AXI_AWCACHE_SHIFT (4)
+#define FIMG2D_AXI_ARCACHE_SHIFT (0)
+
+/* FIMG2D_DMA_SFR_BASE_ADDR_REG */
+
+/* FIMG2D_DMA_COMMAND_REG */
+#define FIMG2D_BATCH_BLIT_HALT (1 << 2)
+#define FIMG2D_BATCH_BLIT_CONT (1 << 1)
+#define FIMG2D_BATCH_BLIT_START (1 << 0)
+
+/* FIMG2D_DMA_EXE_LIST_NUM_REG */
+#define FIMG2D_BATCH_BLIT_EXELIST_NUM_MASK (0xff)
+#define FIMG2D_BATCH_BLIT_EXELIST_NUM_SHIFT (0)
+
+/* FIMG2D_DMA_STATUS_REG */
+#define FIMG2D_BATCH_BLIT_DONELIST_CNT_MASK (0xff)
+#define FIMG2D_BATCH_BLIT_DONELIST_CNT_SHIFT (17)
+
+#define FIMG2D_BATCH_BLIT_DONEBLIT_CNT_MASK (0xffff)
+#define FIMG2D_BATCH_BLIT_DONEBLIT_CNT_SHIFT (1)
+
+#define FIMG2D_BATCH_BLIT_DONE_MASK (1)
+#define FIMG2D_BATCH_BLIT_DONE_SHIFT (0)
+
+/* FIMG2D_DMA_HOLD_CMD_REG */
+#define FIMG2D_BATCH_BLIT_USER_HOLD (1 << 2)
+#define FIMG2D_BATCH_BLIT_LIST_HOLD (1 << 1)
+#define FIMG2D_BATCH_BLIT_BLIT_HOLD (1 << 0)
+
+/* FIMG2D_BITBLT_START_REG */
+#define FIMG2D_START_N_HOLD (1 << 1)
+#define FIMG2D_START_BITBLT (1 << 0)
+
+/* FIMG2D_BITBLT_COMMAND_REG */
+#define FIMG2D_SOLID_FILL (1 << 28)
+
+#define FIMG2D_DST_WR_DEPREMULT (1 << 27)
+#define FIMG2D_DST_RD_PREMULT (1 << 26)
+#define FIMG2D_PAT_PREMULT (1 << 25)
+#define FIMG2D_SRC_PREMULT (1 << 24)
+#define FIMG2D_PREMULT_ALL (0xf << 24)
+
+#define FIMG2D_ALPHA_BLEND_MODE (1 << 20)
+
+#define FIMG2D_COLORKEY_SRC_RGBA (1 << 16)
+#define FIMG2D_COLORKEY_DST_RGBA (2 << 16)
+#define FIMG2D_COLORKEY_SRC_YCBCR (4 << 16)
+#define FIMG2D_COLORKEY_DST_YCBCR (8 << 16)
+
+#define FIMG2D_OPAQUE_MODE (0 << 12)
+#define FIMG2D_TRANSP_MODE (1 << 12)
+#define FIMG2D_BLUSCR_MODE (2 << 12)
+
+#define FIMG2D_ENABLE_CW (1 << 8)
+#define FIMG2D_ENABLE_DITHER (1 << 3)
+
+#define FIMG2D_ENABLE_SRC_ALPHA (0 << 2)
+#define FIMG2D_ENABLE_ROP_ALPHA (1 << 2)
+
+#define FIMG2D_ENABLE_ROP4_MSK (1 << 0)
+#define FIMG2D_ENABLE_NORMAL_MSK (2 << 0)
+
+/* FIMG2D_BLEND_FUNCTION_REG */
+#define FIMG2D_WINCE_SRC_OVER (1 << 22)
+#define FIMG2D_DARKEN (1 << 21)
+#define FIMG2D_LIGHTEN (1 << 20)
+#define FIMG2D_INV_DST_COEFF (1 << 18)
+#define FIMG2D_INV_SRC_COEFF (1 << 16)
+
+#define FIMG2D_DST_COEFF_DA_SHIFT (14)
+#define FIMG2D_DST_COEFF_SA_SHIFT (12)
+#define FIMG2D_SRC_COEFF_DA_SHIFT (6)
+#define FIMG2D_SRC_COEFF_SA_SHIFT (4)
+
+#define FIMG2D_DST_COEFF_SHIFT (8)
+#define FIMG2D_SRC_COEFF_SHIFT (0)
+
+/* FIMG2D_ROUND_MODE_REG */
+#define FIMG2D_PREMULT_ROUND_MASK (3 << 4)
+#define FIMG2D_PREMULT_ROUND_SHIFT (4)
+
+#define FIMG2D_BLEND_ROUND_MASK (3 << 0)
+#define FIMG2D_BLEND_ROUND_SHIFT (0)
+
+/* FIMG2D_ROTATE_REG */
+#define FIMG2D_MSK_ROTATE_90 (1 << 8)
+#define FIMG2D_PAT_ROTATE_90 (1 << 4)
+#define FIMG2D_SRC_ROTATE_90 (1 << 0)
+
+/* FIMG2D_SRC_MSK_DIRECT_REG */
+#define FIMG2D_MSK_Y_DIR_NEGATIVE (1 << 5)
+#define FIMG2D_MSK_X_DIR_NEGATIVE (1 << 4)
+
+#define FIMG2D_SRC_Y_DIR_NEGATIVE (1 << 1)
+#define FIMG2D_SRC_X_DIR_NEGATIVE (1 << 0)
+
+/* FIMG2D_DST_PAT_DIRECT_REG */
+#define FIMG2D_PAT_Y_DIR_NEGATIVE (1 << 5)
+#define FIMG2D_PAT_X_DIR_NEGATIVE (1 << 4)
+
+#define FIMG2D_DST_Y_DIR_NEGATIVE (1 << 1)
+#define FIMG2D_DST_X_DIR_NEGATIVE (1 << 0)
+
+/* FIMG2D_SRC_SELECT_REG & FIMG2D_DST_SELECT_REG */
+#define FIMG2D_IMAGE_TYPE_MEMORY (0 << 0)
+#define FIMG2D_IMAGE_TYPE_FGCOLOR (1 << 0)
+#define FIMG2D_IMAGE_TYPE_BGCOLOR (2 << 0)
+
+/* FIMG2D_SRC_BASE_ADDR_REG */
+/* FIMG2D_DST_BASE_ADDR_REG */
+/* FIMG2D_PAT_BASE_ADDR_REG */
+/* FIMG2D_MSK_BASE_ADDR_REG */
+
+/* FIMG2D_SRC_STRIDE_REG */
+/* FIMG2D_DST_STRIDE_REG */
+/* FIMG2D_PAT_STRIDE_REG */
+/* FIMG2D_MSK_STRIDE_REG */
+
+/* FIMG2D_SRC_COLOR_MODE_REG & FIMG2D_DST_COLOR_MODE_REG */
+#define FIMG2D_YCBCR_NARROW (0 << 17)
+#define FIMG2D_YCBCR_WIDE (1 << 17)
+
+#define FIMG2D_CSC_601 (0 << 16)
+#define FIMG2D_CSC_709 (1 << 16)
+
+#define FIMG2D_YCBCR_ORDER_P1_CRY1CBY0 (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P1_CBY1CRY0 (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P1_Y1CRY0CB (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P1_Y1CBY0CR (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P2_CRCB (0 << 12)
+#define FIMG2D_YCBCR_ORDER_P2_CBCR (1 << 12)
+#define FIMG2D_YCBCR_ORDER_SHIFT (12)
+
+#define FIMG2D_YCBCR_1PLANE (0 << 8)
+#define FIMG2D_YCBCR_2PLANE (1 << 8)
+
+#define FIMG2D_RGB_ORDER_AXRGB (0 << 4)
+#define FIMG2D_RGB_ORDER_RGBAX (1 << 4)
+#define FIMG2D_RGB_ORDER_AXBGR (2 << 4)
+#define FIMG2D_RGB_ORDER_BGRAX (3 << 4)
+#define FIMG2D_RGB_ORDER_SHIFT (4)
+
+#define FIMG2D_COLOR_FORMAT_XRGB_8888 (0 << 0)
+#define FIMG2D_COLOR_FORMAT_ARGB_8888 (1 << 0)
+#define FIMG2D_COLOR_FORMAT_RGB_565 (2 << 0)
+#define FIMG2D_COLOR_FORMAT_XRGB_1555 (3 << 0)
+#define FIMG2D_COLOR_FORMAT_ARGB_1555 (4 << 0)
+#define FIMG2D_COLOR_FORMAT_XRGB_4444 (5 << 0)
+#define FIMG2D_COLOR_FORMAT_ARGB_4444 (6 << 0)
+#define FIMG2D_COLOR_FORMAT_PACKED_RGB_888 (7 << 0)
+#define FIMG2D_COLOR_FORMAT_YCBCR_444 (8 << 0)
+#define FIMG2D_COLOR_FORMAT_YCBCR_422 (9 << 0)
+#define FIMG2D_COLOR_FORMAT_YCBCR_420 (10 << 0)
+#define FIMG2D_COLOR_FORMAT_A8 (11 << 0)
+#define FIMG2D_COLOR_FORMAT_L8 (12 << 0)
+#define FIMG2D_COLOR_FORMAT_SHIFT (0)
+
+/* FIMG2D_PAT_COLOR_MODE_REG */
+#define FIMG2D_PAT_ORDER_AXRGB (0 << 4)
+#define FIMG2D_PAT_ORDER_RGBAX (1 << 4)
+#define FIMG2D_PAT_ORDER_AXBGR (2 << 4)
+#define FIMG2D_PAT_ORDER_BGRAX (3 << 4)
+
+#define FIMG2D_PAT_FORMAT_XRGB_8888 (0 << 0)
+#define FIMG2D_PAT_FORMAT_ARGB_8888 (1 << 0)
+#define FIMG2D_PAT_FORMAT_RGB_565 (2 << 0)
+#define FIMG2D_PAT_FORMAT_XRGB_1555 (3 << 0)
+#define FIMG2D_PAT_FORMAT_ARGB_1555 (4 << 0)
+#define FIMG2D_PAT_FORMAT_XRGB_4444 (5 << 0)
+#define FIMG2D_PAT_FORMAT_ARGB_4444 (6 << 0)
+#define FIMG2D_PAT_FORMAT_PACKED_RGB_888 (7 << 0)
+
+/* FIMG2D_SRC_LEFT_TOP_REG & FIMG2D_SRC_RIGHT_BOTTOM_REG */
+/* FIMG2D_DST_LEFT_TOP_REG & FIMG2D_DST_RIGHT_BOTTOM_REG */
+/* FIMG2D_MSK_LEFT_TOP_REG & FIMG2D_MSK_RIGHT_BOTTOM_REG */
+#define FIMG2D_COORD_LT_Y_SHIFT (16)
+#define FIMG2D_COORD_LT_X_SHIFT (0)
+#define FIMG2D_COORD_RB_Y_SHIFT (16)
+#define FIMG2D_COORD_RB_X_SHIFT (0)
+#define FIMG2D_COORD_MAX_HEIGHT (8000)
+#define FIMG2D_COORD_MAX_WIDTH (8000)
+
+/* FIMG2D_SRC_PLANE2_BASE_ADDR_REG */
+/* FIMG2D_DST_PLANE2_BASE_ADDR_REG */
+
+/* FIMG2D_SRC_REPEAT_MODE_REG */
+#define FIMG2D_SRC_REPEAT_NORMAL (0 << 0)
+#define FIMG2D_SRC_REPEAT_PAD (1 << 0)
+#define FIMG2D_SRC_REPEAT_REFLECT (2 << 0)
+#define FIMG2D_SRC_REPEAT_CLAMP (3 << 0)
+#define FIMG2D_SRC_REPEAT_NONE (4 << 0)
+#define FIMG2D_SRC_REPEAT_SHIFT (0)
+
+/* FIMG2D_MSK_REPEAT_MODE_REG */
+#define FIMG2D_MSK_REPEAT_NORMAL (0 << 0)
+#define FIMG2D_MSK_REPEAT_PAD (1 << 0)
+#define FIMG2D_MSK_REPEAT_REFLECT (2 << 0)
+#define FIMG2D_MSK_REPEAT_CLAMP (3 << 0)
+#define FIMG2D_MSK_REPEAT_SHIFT (0)
+
+/* FIMG2D_SRC_PAD_VALUE_REG */
+/* FIMG2D_MSK_PAD_VALUE_REG */
+
+/* FIMG2D_SRC_A8_RGB_EXT_REG */
+/* FIMG2D_DST_A8_RGB_EXT_REG */
+
+/* FIMG2D_SRC_SCALE_CTRL_REG & FIMG2D_MSK_SCALE_CTRL_REG */
+#define FIMG2D_SCALE_MODE_NEAREST (1 << 0)
+#define FIMG2D_SCALE_MODE_BILINEAR (2 << 0)
+
+/* FIMG2D_SRC_XSCALE_REG & FIMG2D_SRC_YSCALE_REG */
+/* FIMG2D_MSK_XSCALE_REG & FIMG2D_MSK_YSCALE_REG */
+#define FIMG2D_SCALE_FACTOR_INTG_SHIFT (16)
+#define FIMG2D_SCALE_FACTOR_FRAC_SHIFT (0)
+
+/* FIMG2D_PAT_SIZE_REG */
+#define FIMG2D_PAT_HEIGHT_SHIFT (16)
+#define FIMG2D_PAT_WIDTH_SHIFT (0)
+#define FIMG2D_MAX_PAT_HEIGHT (8000)
+#define FIMG2D_MAX_PAT_WIDTH (8000)
+
+/* FIMG2D_PAT_OFFSET_REG */
+#define FIMG2D_PAT_Y_OFFSET_SHIFT (16)
+#define FIMG2D_PAT_X_OFFSET_SHIFT (0)
+#define FIMG2D_MAX_PAT_Y_OFFSET (7999)
+#define FIMG2D_MAX_PAT_X_OFFSET (7999)
+
+/* FIMG2D_MSK_MODE_REG */
+#define FIMG2D_MSK_TYPE_ALPHA (0 << 8)
+#define FIMG2D_MSK_TYPE_ARGB (1 << 8)
+#define FIMG2D_MSK_TYPE_MIXED (2 << 8)
+
+#define FIMG2D_MSK_ORDER_AXRGB (0 << 4)
+#define FIMG2D_MSK_ORDER_RGBAX (1 << 4)
+#define FIMG2D_MSK_ORDER_AXBGR (2 << 4)
+#define FIMG2D_MSK_ORDER_BGRAX (3 << 4)
+#define FIMG2D_MSK_ORDER_SHIFT (4)
+
+#define FIMG2D_1BIT_MSK (0 << 0)
+#define FIMG2D_4BIT_MSK (1 << 0)
+#define FIMG2D_8BIT_MSK (2 << 0)
+#define FIMG2D_16BIT_MSK_565 (3 << 0)
+#define FIMG2D_16BIT_MSK_1555 (4 << 0)
+#define FIMG2D_16BIT_MSK_4444 (5 << 0)
+#define FIMG2D_32BIT_MSK_8888 (6 << 0)
+#define FIMG2D_4BIT_MSK_WINCE_AA_FONT (7 << 0)
+#define FIMG2D_MSK_FORMAT_SHIFT (0)
+
+/* FIMG2D_CW_LT_REG */
+#define FIMG2D_CW_COORD_LT_Y_SHIFT (16)
+#define FIMG2D_CW_COORD_LT_X_SHIFT (0)
+#define FIMG2D_CW_COORD_RB_Y_SHIFT (16)
+#define FIMG2D_CW_COORD_RB_X_SHIFT (0)
+
+/* FIMG2D_THIRD_OPERAND_REG */
+#define FIMG2D_OPR3_MSKSEL_PAT (0 << 4)
+#define FIMG2D_OPR3_MSKSEL_FGCOLOR (1 << 4)
+#define FIMG2D_OPR3_MSKSEL_BGCOLOR (2 << 4)
+#define FIMG2D_OPR3_UNMSKSEL_PAT (0 << 0)
+#define FIMG2D_OPR3_UNMSKSEL_FGCOLOR (1 << 0)
+#define FIMG2D_OPR3_UNMSKSEL_BGCOLOR (2 << 0)
+
+/* FIMG2D_ROP4_REG */
+#define FIMG2D_MASKED_ROP3_SHIFT (8)
+#define FIMG2D_UNMASKED_ROP3_SHIFT (0)
+
+/* FIMG2D_ALPHA_REG */
+#define FIMG2D_GCOLOR_RGB_MASK (0xffffff)
+#define FIMG2D_GCOLOR_SHIFT (8)
+
+#define FIMG2D_GALPHA_MASK (0xff)
+#define FIMG2D_GALPHA_SHIFT (0)
+
+/* FIMG2D_FG_COLOR_REG */
+/* FIMG2D_BG_COLOR_REG */
+/* FIMG2D_BS_COLOR_REG */
+/* FIMG2D_SF_COLOR_REG */
+
+/* FIMG2D_SRC_COLORKEY_CTRL_REG */
+/* FIMG2D_SRC_COLORKEY_DR_MIN_REG */
+/* FIMG2D_SRC_COLORKEY_DR_MAX_REG */
+
+/* FIMG2D_DST_COLORKEY_CTRL_REG */
+/* FIMG2D_DST_COLORKEY_DR_MIN_REG */
+/* FIMG2D_DST_COLORKEY_DR_MAX_REG */
+
+/* FIMG2D_YCBCR_SRC_COLORKEY_CTRL_REG */
+/* FIMG2D_YCBCR_SRC_COLORKEY_DR_MIN_REG */
+/* FIMG2D_YCBCR_SRC_COLORKEY_DR_MAX_REG */
+
+/* FIMG2D_YCBCR_DST_COLORKEY_CTRL_REG */
+/* FIMG2D_YCBCR_DST_COLORKEY_DR_MIN_REG */
+/* FIMG2D_YCBCR_DST_COLORKEY_DR_MAX_REG */
+
+#endif /* __FIMG2D4X_REGS_H */
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.c b/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.c
new file mode 100644
index 0000000..b494175
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.c
@@ -0,0 +1,168 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/dma-mapping.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+
+#include "fimg2d.h"
+#include "fimg2d_cache.h"
+
+#define LV1_SHIFT 20
+#define LV1_PT_SIZE SZ_1M
+#define LV2_PT_SIZE SZ_1K
+#define LV2_BASE_MASK 0x3ff
+#define LV2_PT_MASK 0xff000
+#define LV2_SHIFT 12
+#define LV1_DESC_MASK 0x3
+#define LV2_DESC_MASK 0x2
+
+static inline unsigned long virt2phys(struct mm_struct *mm, unsigned long vaddr)
+{
+ unsigned long *pgd;
+ unsigned long *lv1d, *lv2d;
+
+ pgd = (unsigned long *)mm->pgd;
+
+ lv1d = pgd + (vaddr >> LV1_SHIFT);
+
+ if ((*lv1d & LV1_DESC_MASK) != 0x1) {
+ fimg2d_debug("invalid LV1 descriptor, "
+ "pgd %p lv1d 0x%lx vaddr 0x%lx\n",
+ pgd, *lv1d, vaddr);
+ return 0;
+ }
+
+ lv2d = (unsigned long *)phys_to_virt(*lv1d & ~LV2_BASE_MASK) +
+ ((vaddr & LV2_PT_MASK) >> LV2_SHIFT);
+
+ if ((*lv2d & LV2_DESC_MASK) != 0x2) {
+ fimg2d_debug("invalid LV2 descriptor, "
+ "pgd %p lv2d 0x%lx vaddr 0x%lx\n",
+ pgd, *lv2d, vaddr);
+ return 0;
+ }
+
+ return (*lv2d & PAGE_MASK) | (vaddr & (PAGE_SIZE-1));
+}
+
+#ifdef CONFIG_OUTER_CACHE
+void fimg2d_dma_sync_outer(struct mm_struct *mm, unsigned long vaddr,
+ size_t size, enum cache_opr opr)
+{
+ int len;
+ unsigned long cur, end, next, paddr;
+
+ cur = vaddr;
+ end = vaddr + size;
+
+ if (opr == CACHE_CLEAN) {
+ while (cur < end) {
+ next = (cur + PAGE_SIZE) & PAGE_MASK;
+ if (next > end)
+ next = end;
+ len = next - cur;
+
+ paddr = virt2phys(mm, cur);
+ if (paddr)
+ outer_clean_range(paddr, paddr + len);
+ cur += len;
+ }
+ } else if (opr == CACHE_FLUSH) {
+ while (cur < end) {
+ next = (cur + PAGE_SIZE) & PAGE_MASK;
+ if (next > end)
+ next = end;
+ len = next - cur;
+
+ paddr = virt2phys(mm, cur);
+ if (paddr)
+ outer_flush_range(paddr, paddr + len);
+ cur += len;
+ }
+ }
+}
+
+void fimg2d_clean_outer_pagetable(struct mm_struct *mm, unsigned long vaddr,
+ size_t size)
+{
+ unsigned long *pgd;
+ unsigned long *lv1, *lv1end;
+ unsigned long lv2pa;
+
+ pgd = (unsigned long *)mm->pgd;
+
+ lv1 = pgd + (vaddr >> LV1_SHIFT);
+ lv1end = pgd + ((vaddr + size + LV1_PT_SIZE-1) >> LV1_SHIFT);
+
+ /* clean level1 page table */
+ outer_clean_range(virt_to_phys(lv1), virt_to_phys(lv1end));
+
+ do {
+ lv2pa = *lv1 & ~LV2_BASE_MASK; /* lv2 pt base */
+ /* clean level2 page table */
+ outer_clean_range(lv2pa, lv2pa + LV2_PT_SIZE);
+ lv1++;
+ } while (lv1 != lv1end);
+}
+#endif /* CONFIG_OUTER_CACHE */
+
+enum pt_status fimg2d_check_pagetable(struct mm_struct *mm,
+ unsigned long vaddr, size_t size)
+{
+ unsigned long *pgd;
+ unsigned long *lv1d, *lv2d;
+
+ pgd = (unsigned long *)mm->pgd;
+
+ size += offset_in_page(vaddr);
+ size = PAGE_ALIGN(size);
+
+ while ((long)size > 0) {
+ lv1d = pgd + (vaddr >> LV1_SHIFT);
+
+ /*
+ * check level 1 descriptor
+ * lv1 desc[1:0] = 00 --> fault
+ * lv1 desc[1:0] = 01 --> page table
+ * lv1 desc[1:0] = 10 --> section or supersection
+ * lv1 desc[1:0] = 11 --> reserved
+ */
+ if ((*lv1d & LV1_DESC_MASK) != 0x1) {
+ fimg2d_debug("invalid LV1 descriptor, "
+ "pgd %p lv1d 0x%lx vaddr 0x%lx\n",
+ pgd, *lv1d, vaddr);
+ return PT_FAULT;
+ }
+
+ lv2d = (unsigned long *)phys_to_virt(*lv1d & ~LV2_BASE_MASK) +
+ ((vaddr & LV2_PT_MASK) >> LV2_SHIFT);
+
+ /*
+ * check level 2 descriptor
+ * lv2 desc[1:0] = 00 --> fault
+ * lv2 desc[1:0] = 01 --> 64k pgae
+ * lv2 desc[1:0] = 1x --> 4k page
+ */
+ if ((*lv2d & LV2_DESC_MASK) != 0x2) {
+ fimg2d_debug("invalid LV2 descriptor, "
+ "pgd %p lv2d 0x%lx vaddr 0x%lx\n",
+ pgd, *lv2d, vaddr);
+ return PT_FAULT;
+ }
+
+ vaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ return PT_NORMAL;
+}
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.h b/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.h
new file mode 100644
index 0000000..ac3cbe4
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.h
@@ -0,0 +1,101 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_cache.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+#include <plat/cpu.h>
+#include "fimg2d.h"
+
+#define L1_CACHE_SIZE SZ_64K
+#define L2_CACHE_SIZE SZ_1M
+#define LINE_FLUSH_THRESHOLD SZ_1K
+
+/**
+ * cache_opr - [kernel] cache operation mode
+ * @CACHE_INVAL: do cache invalidate
+ * @CACHE_CLEAN: do cache clean for src and msk image
+ * @CACHE_FLUSH: do cache clean and invalidate for dst image
+ * @CACHE_FLUSH_INNER_ALL: clean and invalidate for innercache
+ * @CACHE_FLUSH_ALL: clean and invalidate for whole caches
+ */
+enum cache_opr {
+ CACHE_INVAL,
+ CACHE_CLEAN,
+ CACHE_FLUSH,
+ CACHE_FLUSH_INNER_ALL,
+ CACHE_FLUSH_ALL
+};
+
+/**
+ * @PT_NORMAL: pagetable exists
+ * @PT_FAULT: invalid pagetable
+ */
+enum pt_status {
+ PT_NORMAL,
+ PT_FAULT,
+};
+
+static inline bool is_inner_flushall(size_t size)
+{
+ if (soc_is_exynos5250())
+ return (size >= SZ_1M * 25) ? true : false;
+ else
+ return (size >= L1_CACHE_SIZE) ? true : false;
+}
+
+static inline bool is_outer_flushall(size_t size)
+{
+ return (size >= L2_CACHE_SIZE) ? true : false;
+}
+
+static inline bool is_inner_flushrange(size_t hole)
+{
+ if (!soc_is_exynos5250())
+ return true;
+ else {
+ if (hole < LINE_FLUSH_THRESHOLD)
+ return true;
+ else
+ return false; /* line-by-line flush */
+ }
+}
+
+static inline bool is_outer_flushrange(size_t hole)
+{
+ if (hole < LINE_FLUSH_THRESHOLD)
+ return true;
+ else
+ return false; /* line-by-line flush */
+}
+
+static inline void fimg2d_dma_sync_inner(unsigned long addr, size_t size,
+ int dir)
+{
+ if (dir == DMA_TO_DEVICE)
+ dmac_map_area((void *)addr, size, dir);
+ else if (dir == DMA_BIDIRECTIONAL)
+ dmac_flush_range((void *)addr, (void *)(addr + size));
+}
+
+static inline void fimg2d_dma_unsync_inner(unsigned long addr, size_t size,
+ int dir)
+{
+ if (dir == DMA_TO_DEVICE)
+ dmac_unmap_area((void *)addr, size, dir);
+}
+
+void fimg2d_clean_outer_pagetable(struct mm_struct *mm, unsigned long addr,
+ size_t size);
+void fimg2d_dma_sync_outer(struct mm_struct *mm, unsigned long addr,
+ size_t size, enum cache_opr opr);
+enum pt_status fimg2d_check_pagetable(struct mm_struct *mm, unsigned long addr,
+ size_t size);
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.c b/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.c
new file mode 100644
index 0000000..24a80ae
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.c
@@ -0,0 +1,170 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/clk.h>
+#include <linux/atomic.h>
+#include <linux/sched.h>
+#include <plat/cpu.h>
+#include <plat/fimg2d.h>
+#include "fimg2d.h"
+#include "fimg2d_clk.h"
+
+void fimg2d_clk_on(struct fimg2d_control *info)
+{
+ spin_lock(&info->bltlock);
+ clk_enable(info->clock);
+ atomic_set(&info->clkon, 1);
+ spin_unlock(&info->bltlock);
+
+ fimg2d_debug("clock enable\n");
+}
+
+void fimg2d_clk_off(struct fimg2d_control *info)
+{
+ spin_lock(&info->bltlock);
+ atomic_set(&info->clkon, 0);
+ clk_disable(info->clock);
+ spin_unlock(&info->bltlock);
+
+ fimg2d_debug("clock disable\n");
+}
+
+void fimg2d_clk_save(struct fimg2d_control *info)
+{
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ struct fimg2d_platdata *pdata;
+ struct clk *sclk;
+
+ pdata = to_fimg2d_plat(info->dev);
+
+ spin_lock(&info->bltlock);
+ sclk = clk_get(info->dev, pdata->clkname);
+ clk_set_rate(sclk, 50*MHZ); /* 800MHz/16=50MHz */
+ spin_unlock(&info->bltlock);
+
+ fimg2d_debug("%s clkrate=%lu\n", pdata->clkname, clk_get_rate(sclk));
+ }
+}
+
+void fimg2d_clk_restore(struct fimg2d_control *info)
+{
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ struct fimg2d_platdata *pdata;
+ struct clk *sclk, *pclk;
+
+ pdata = to_fimg2d_plat(info->dev);
+
+ spin_lock(&info->bltlock);
+ sclk = clk_get(info->dev, pdata->clkname);
+ pclk = clk_get(NULL, "pclk_acp");
+ clk_set_rate(sclk, clk_get_rate(pclk) * 2);
+ spin_unlock(&info->bltlock);
+
+ fimg2d_debug("%s(%lu) pclk_acp(%lu)\n", pdata->clkname,
+ clk_get_rate(sclk), clk_get_rate(pclk));
+ }
+}
+
+void fimg2d_clk_dump(struct fimg2d_control *info)
+{
+ struct fimg2d_platdata *pdata;
+ struct clk *sclk, *pclk, *aclk;
+
+ pdata = to_fimg2d_plat(info->dev);
+
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ sclk = clk_get(info->dev, pdata->clkname);
+ pclk = clk_get(NULL, "pclk_acp");
+
+ printk(KERN_INFO "%s(%lu) pclk_acp(%lu)\n",
+ pdata->clkname,
+ clk_get_rate(sclk), clk_get_rate(pclk));
+ } else {
+ aclk = clk_get(NULL, "aclk_acp");
+ pclk = clk_get(NULL, "pclk_acp");
+
+ printk(KERN_INFO "aclk_acp(%lu) pclk_acp(%lu)\n",
+ clk_get_rate(aclk), clk_get_rate(pclk));
+ }
+}
+
+int fimg2d_clk_setup(struct fimg2d_control *info)
+{
+ struct fimg2d_platdata *pdata;
+ struct clk *parent, *sclk;
+ int ret = 0;
+
+ sclk = parent = NULL;
+ pdata = to_fimg2d_plat(info->dev);
+
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ /* clock for setting parent and rate */
+ parent = clk_get(info->dev, pdata->parent_clkname);
+ if (IS_ERR(parent)) {
+ printk(KERN_ERR "FIMG2D failed to get parent clk\n");
+ ret = -ENOENT;
+ goto err_clk1;
+ }
+ fimg2d_debug("parent clk: %s\n", pdata->parent_clkname);
+
+ sclk = clk_get(info->dev, pdata->clkname);
+ if (IS_ERR(sclk)) {
+ printk(KERN_ERR "FIMG2D failed to get sclk\n");
+ ret = -ENOENT;
+ goto err_clk2;
+ }
+ fimg2d_debug("sclk: %s\n", pdata->clkname);
+
+ if (clk_set_parent(sclk, parent))
+ printk(KERN_ERR "FIMG2D failed to set parent\n");
+
+ clk_set_rate(sclk, pdata->clkrate);
+ fimg2d_debug("clkrate: %ld parent clkrate: %ld\n",
+ clk_get_rate(sclk), clk_get_rate(parent));
+ } else {
+ fimg2d_debug("aclk_acp(%lu) pclk_acp(%lu)\n",
+ clk_get_rate(clk_get(NULL, "aclk_acp")),
+ clk_get_rate(clk_get(NULL, "pclk_acp")));
+ }
+
+ /* clock for gating */
+ info->clock = clk_get(info->dev, pdata->gate_clkname);
+ if (IS_ERR(info->clock)) {
+ printk(KERN_ERR "FIMG2D failed to get gate clk\n");
+ ret = -ENOENT;
+ goto err_clk3;
+ }
+ fimg2d_debug("gate clk: %s\n", pdata->gate_clkname);
+ return ret;
+
+err_clk3:
+ if (sclk)
+ clk_put(sclk);
+
+err_clk2:
+ if (parent)
+ clk_put(parent);
+
+err_clk1:
+ return ret;
+}
+
+void fimg2d_clk_release(struct fimg2d_control *info)
+{
+ clk_put(info->clock);
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ struct fimg2d_platdata *pdata;
+ pdata = to_fimg2d_plat(info->dev);
+ clk_put(clk_get(info->dev, pdata->clkname));
+ clk_put(clk_get(info->dev, pdata->parent_clkname));
+ }
+}
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.h b/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.h
new file mode 100644
index 0000000..c3fbf67
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.h
@@ -0,0 +1,26 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_clk.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D_CLK_H__
+#define __FIMG2D_CLK_H__
+
+#include "fimg2d.h"
+
+int fimg2d_clk_setup(struct fimg2d_control *info);
+void fimg2d_clk_release(struct fimg2d_control *info);
+void fimg2d_clk_on(struct fimg2d_control *info);
+void fimg2d_clk_off(struct fimg2d_control *info);
+void fimg2d_clk_save(struct fimg2d_control *info);
+void fimg2d_clk_restore(struct fimg2d_control *info);
+void fimg2d_clk_dump(struct fimg2d_control *info);
+
+#endif /* __FIMG2D_CLK_H__ */
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.c b/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.c
new file mode 100644
index 0000000..63fd483
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.c
@@ -0,0 +1,551 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <plat/fimg2d.h>
+#include "fimg2d.h"
+#include "fimg2d_ctx.h"
+#include "fimg2d_cache.h"
+#include "fimg2d_helper.h"
+
+static int bpptable[MSK_FORMAT_END+1] = {
+ 32, 32, 16, 16, 16, 16, 16, 24, /* rgb */
+ 0, 0, 0, 8, 8, 0, /* yuv */
+ 1, 4, 8, 16, 16, 16, 32, 0, /* msk */
+};
+
+static int fimg2d_check_params(struct fimg2d_bltcmd *cmd)
+{
+ int w, h, i;
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *img;
+ struct fimg2d_scale *scl;
+ struct fimg2d_clip *clp;
+ struct fimg2d_rect *r;
+
+ /* dst is mandatory */
+ if (!cmd->image[IDST].addr.type)
+ return -1;
+
+ /* DST op makes no effect */
+ if (cmd->op < 0 || cmd->op == BLIT_OP_DST || cmd->op >= BLIT_OP_END)
+ return -1;
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ if (!img->addr.type)
+ continue;
+
+ w = img->width;
+ h = img->height;
+ r = &img->rect;
+
+ /* 8000: max width & height */
+ if (w > 8000 || h > 8000)
+ return -1;
+
+ if (r->x1 < 0 || r->y1 < 0 ||
+ r->x1 >= w || r->y1 >= h ||
+ r->x1 >= r->x2 || r->y1 >= r->y2)
+ return -1;
+ }
+
+ clp = &p->clipping;
+ if (clp->enable) {
+ img = &cmd->image[IDST];
+
+ w = img->width;
+ h = img->height;
+ r = &img->rect;
+
+ if (clp->x1 < 0 || clp->y1 < 0 ||
+ clp->x1 >= w || clp->y1 >= h ||
+ clp->x1 >= clp->x2 || clp->y1 >= clp->y2 ||
+ clp->x1 >= r->x2 || clp->x2 <= r->x1 ||
+ clp->y1 >= r->y2 || clp->y2 <= r->y1)
+ return -1;
+ }
+
+ scl = &p->scaling;
+ if (scl->mode) {
+ if (!scl->src_w || !scl->src_h || !scl->dst_w || !scl->dst_h)
+ return -1;
+ }
+
+ return 0;
+}
+
+static void fimg2d_fixup_params(struct fimg2d_bltcmd *cmd)
+{
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *img;
+ struct fimg2d_scale *scl;
+ struct fimg2d_clip *clp;
+ struct fimg2d_rect *r;
+ int i;
+
+ clp = &p->clipping;
+ scl = &p->scaling;
+
+ /* fix dst/clip rect */
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ if (!img->addr.type)
+ continue;
+
+ r = &img->rect;
+
+ if (i == IMAGE_DST && clp->enable) {
+ if (clp->x2 > img->width)
+ clp->x2 = img->width;
+ if (clp->y2 > img->height)
+ clp->y2 = img->height;
+ } else {
+ if (r->x2 > img->width)
+ r->x2 = img->width;
+ if (r->y2 > img->height)
+ r->y2 = img->height;
+ }
+ }
+
+ /* avoid devided-by-zero */
+ if (scl->mode &&
+ (scl->src_w == scl->dst_w && scl->src_h == scl->dst_h))
+ scl->mode = NO_SCALING;
+}
+
+static int pixel2offset(int pixel, enum color_format cf)
+{
+ return (pixel * bpptable[cf]) >> 3;
+}
+
+static int width2bytes(int width, enum color_format cf)
+{
+ int bpp = bpptable[cf];
+
+ switch (bpp) {
+ case 1:
+ return (width + 7) >> 3;
+ case 4:
+ return (width + 1) >> 1;
+ case 8:
+ case 16:
+ case 24:
+ case 32:
+ return width * bpp >> 3;
+ default:
+ return 0;
+ }
+}
+
+static inline int is_yuvfmt(enum color_format fmt)
+{
+ switch (fmt) {
+ case CF_YCBCR_420:
+ case CF_YCBCR_422:
+ case CF_YCBCR_444:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * @plane: 0 for 1st plane, 1 for 2nd plane
+ */
+static int yuv_stride(int width, enum color_format cf, enum pixel_order order,
+ int plane)
+{
+ int bpp;
+
+ switch (cf) {
+ case CF_YCBCR_420:
+ bpp = (!plane) ? 8 : 4;
+ break;
+ case CF_YCBCR_422:
+ if (order == P2_CRCB || order == P2_CBCR)
+ bpp = 8;
+ else
+ bpp = (!plane) ? 16 : 0;
+ break;
+ case CF_YCBCR_444:
+ bpp = (!plane) ? 8 : 16;
+ break;
+ default:
+ bpp = 0;
+ break;
+ }
+
+ return width * bpp >> 3;
+}
+
+static inline void fimg2d_calc_dma_size(struct fimg2d_bltcmd *cmd)
+{
+ struct fimg2d_image *img;
+ struct fimg2d_clip *clp;
+ struct fimg2d_rect *r;
+ struct fimg2d_dma *c;
+ int i, y1, y2, stride;
+
+ clp = &cmd->param.clipping;
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ if (img->addr.type != ADDR_USER &&
+ img->addr.type != ADDR_USER_CONTIG)
+ continue;
+
+ /* ! yuv format */
+ if (!is_yuvfmt(img->fmt)) {
+ r = &img->rect;
+
+ if (i == IMAGE_DST && clp->enable) {
+ y1 = clp->y1;
+ y2 = clp->y2;
+ } else {
+ y1 = r->y1;
+ y2 = r->y2;
+ }
+
+ c = &cmd->dma[i].base;
+ c->addr = img->addr.start + (img->stride * y1);
+ c->size = img->stride * (y2 - y1);
+
+ if (img->need_cacheopr) {
+ c->cached = c->size;
+ cmd->dma_all += c->cached;
+ }
+ continue;
+ }
+
+ stride = yuv_stride(img->width, img->fmt, img->order, 0);
+
+ c = &cmd->dma[i].base;
+ c->addr = img->addr.start;
+ c->size = stride * img->height;
+
+ if (img->need_cacheopr) {
+ c->cached = c->size;
+ cmd->dma_all += c->cached;
+ }
+
+ if (img->order == P2_CRCB || img->order == P2_CBCR) {
+ stride = yuv_stride(img->width, img->fmt,
+ img->order, 1);
+
+ c = &cmd->dma[i].plane2;
+ c->addr = img->plane2.start;
+ c->size = stride * img->height;
+
+ if (img->need_cacheopr) {
+ c->cached = c->size;
+ cmd->dma_all += c->cached;
+ }
+ }
+ }
+}
+
+static inline void inner_flush_clip_range(struct fimg2d_bltcmd *cmd)
+{
+ struct fimg2d_image *img;
+ struct fimg2d_clip *clp;
+ struct fimg2d_rect *r;
+ struct fimg2d_dma *c;
+ int clp_x, clp_w, clp_h, y, i, dir;
+ int x1, y1, x2, y2;
+ unsigned long start;
+
+ clp = &cmd->param.clipping;
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+
+ dir = (i == IMAGE_DST) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ /* yuv format */
+ if (is_yuvfmt(img->fmt)) {
+ c = &cmd->dma[i].base;
+ if (c->cached)
+ fimg2d_dma_sync_inner(c->addr, c->cached, dir);
+
+ c = &cmd->dma[i].plane2;
+ if (c->cached)
+ fimg2d_dma_sync_inner(c->addr, c->cached, dir);
+
+ continue;
+ }
+
+ c = &cmd->dma[i].base;
+ if (!c->cached)
+ continue;
+
+ r = &img->rect;
+
+ if (i == IMAGE_DST && clp->enable) {
+ x1 = clp->x1;
+ y1 = clp->y1;
+ x2 = clp->x2;
+ y2 = clp->y2;
+ } else {
+ x1 = r->x1;
+ y1 = r->y1;
+ x2 = r->x2;
+ y2 = r->y2;
+ }
+
+ clp_x = pixel2offset(x1, img->fmt);
+ clp_w = width2bytes(x2 - x1, img->fmt);
+ clp_h = y2 - y1;
+
+ if (is_inner_flushrange(img->stride - clp_w))
+ fimg2d_dma_sync_inner(c->addr, c->cached, dir);
+ else {
+ for (y = 0; y < clp_h; y++) {
+ start = c->addr + (img->stride * y) + clp_x;
+ fimg2d_dma_sync_inner(start, clp_w, dir);
+ }
+ }
+ }
+}
+
+static inline void outer_flush_clip_range(struct fimg2d_bltcmd *cmd)
+{
+ struct mm_struct *mm = cmd->ctx->mm;
+ struct fimg2d_image *img;
+ struct fimg2d_clip *clp;
+ struct fimg2d_rect *r;
+ struct fimg2d_dma *c;
+ int clp_x, clp_w, clp_h, y, i, dir;
+ int x1, y1, x2, y2;
+ unsigned long start;
+
+ clp = &cmd->param.clipping;
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+
+ /* clean pagetable on outercache */
+ c = &cmd->dma[i].base;
+ if (c->size)
+ fimg2d_clean_outer_pagetable(mm, c->addr, c->size);
+
+ c = &cmd->dma[i].plane2;
+ if (c->size)
+ fimg2d_clean_outer_pagetable(mm, c->addr, c->size);
+
+ dir = (i == IMAGE_DST) ? CACHE_FLUSH : CACHE_CLEAN;
+
+ /* yuv format */
+ if (is_yuvfmt(img->fmt)) {
+ c = &cmd->dma[i].base;
+ if (c->cached) {
+ fimg2d_dma_sync_outer(mm, c->addr, c->cached,
+ dir);
+ }
+
+ c = &cmd->dma[i].plane2;
+ if (c->cached) {
+ fimg2d_dma_sync_outer(mm, c->addr, c->cached,
+ dir);
+ }
+
+ continue;
+ }
+
+ c = &cmd->dma[i].base;
+ if (!c->cached)
+ continue;
+
+ r = &img->rect;
+
+ if (i == IMAGE_DST && clp->enable) {
+ x1 = clp->x1;
+ y1 = clp->y1;
+ x2 = clp->x2;
+ y2 = clp->y2;
+ } else {
+ x1 = r->x1;
+ y1 = r->y1;
+ x2 = r->x2;
+ y2 = r->y2;
+ }
+
+ clp_x = pixel2offset(x1, img->fmt);
+ clp_w = width2bytes(x2 - x1, img->fmt);
+ clp_h = y2 - y1;
+
+ if (is_outer_flushrange(img->stride - clp_w))
+ fimg2d_dma_sync_outer(mm, c->addr, c->cached, dir);
+ else {
+ for (y = 0; y < clp_h; y++) {
+ start = c->addr + (img->stride * y) + clp_x;
+ fimg2d_dma_sync_outer(mm, start, clp_w, dir);
+ }
+ }
+ }
+}
+
+static int fimg2d_check_dma_sync(struct fimg2d_bltcmd *cmd)
+{
+ struct mm_struct *mm = cmd->ctx->mm;
+ struct fimg2d_dma *c;
+ enum pt_status pt;
+ int i, ret;
+
+ fimg2d_calc_dma_size(cmd);
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ c = &cmd->dma[i].base;
+ if (!c->size)
+ continue;
+
+ pt = fimg2d_check_pagetable(mm, c->addr, c->size);
+ if (pt == PT_FAULT) {
+ ret = -EFAULT;
+ goto err_pgtable;
+ }
+ }
+
+#ifdef PERF_PROFILE
+ perf_start(cmd->ctx, PERF_INNERCACHE);
+#endif
+ if (is_inner_flushall(cmd->dma_all))
+ flush_all_cpu_caches();
+ else
+ inner_flush_clip_range(cmd);
+#ifdef PERF_PROFILE
+ perf_end(cmd->ctx, PERF_INNERCACHE);
+#endif
+
+#ifdef CONFIG_OUTER_CACHE
+#ifdef PERF_PROFILE
+ perf_start(cmd->ctx, PERF_OUTERCACHE);
+#endif
+ if (is_outer_flushall(cmd->dma_all))
+ outer_flush_all();
+ else
+ outer_flush_clip_range(cmd);
+#ifdef PERF_PROFILE
+ perf_end(cmd->ctx, PERF_OUTERCACHE);
+#endif
+#endif
+ return 0;
+
+err_pgtable:
+ return ret;
+}
+
+int fimg2d_add_command(struct fimg2d_control *info, struct fimg2d_context *ctx,
+ struct fimg2d_blit *blit)
+{
+ int i, ret;
+ struct fimg2d_image *buf[MAX_IMAGES] = image_table(blit);
+ struct fimg2d_bltcmd *cmd;
+ struct fimg2d_image dst;
+
+ if (blit->dst)
+ if (copy_from_user(&dst, (void *)blit->dst, sizeof(dst)))
+ return -EFAULT;
+
+ if ((blit->dst) && (dst.addr.type == ADDR_USER))
+ up_write(&page_alloc_slow_rwsem);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if ((blit->dst) && (dst.addr.type == ADDR_USER))
+ down_write(&page_alloc_slow_rwsem);
+
+ if (!cmd)
+ return -ENOMEM;
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ if (!buf[i])
+ continue;
+
+ if (copy_from_user(&cmd->image[i], buf[i],
+ sizeof(struct fimg2d_image))) {
+ ret = -EFAULT;
+ goto err_user;
+ }
+ }
+
+ cmd->ctx = ctx;
+ cmd->op = blit->op;
+ cmd->sync = blit->sync;
+ cmd->seq_no = blit->seq_no;
+ memcpy(&cmd->param, &blit->param, sizeof(cmd->param));
+
+#ifdef CONFIG_VIDEO_FIMG2D_DEBUG
+ fimg2d_dump_command(cmd);
+#endif
+
+ if (fimg2d_check_params(cmd)) {
+ printk(KERN_ERR "[%s] invalid params\n", __func__);
+ fimg2d_dump_command(cmd);
+ ret = -EINVAL;
+ goto err_user;
+ }
+
+ fimg2d_fixup_params(cmd);
+
+ if (fimg2d_check_dma_sync(cmd)) {
+ ret = -EFAULT;
+ goto err_user;
+ }
+
+ /* add command node and increase ncmd */
+ spin_lock(&info->bltlock);
+ if (atomic_read(&info->suspended)) {
+ fimg2d_debug("fimg2d suspended, do sw fallback\n");
+ spin_unlock(&info->bltlock);
+ ret = -EFAULT;
+ goto err_user;
+ }
+ atomic_inc(&ctx->ncmd);
+ fimg2d_enqueue(&cmd->node, &info->cmd_q);
+ fimg2d_debug("ctx %p pgd %p ncmd(%d) seq_no(%u)\n",
+ cmd->ctx, (unsigned long *)cmd->ctx->mm->pgd,
+ atomic_read(&ctx->ncmd), cmd->seq_no);
+ spin_unlock(&info->bltlock);
+
+ return 0;
+
+err_user:
+ kfree(cmd);
+ return ret;
+}
+
+void fimg2d_del_command(struct fimg2d_control *info, struct fimg2d_bltcmd *cmd)
+{
+ struct fimg2d_context *ctx = cmd->ctx;
+
+ spin_lock(&info->bltlock);
+ fimg2d_dequeue(&cmd->node);
+ kfree(cmd);
+ atomic_dec(&ctx->ncmd);
+ spin_unlock(&info->bltlock);
+}
+
+void fimg2d_add_context(struct fimg2d_control *info, struct fimg2d_context *ctx)
+{
+ atomic_set(&ctx->ncmd, 0);
+ init_waitqueue_head(&ctx->wait_q);
+
+ atomic_inc(&info->nctx);
+ fimg2d_debug("ctx %p nctx(%d)\n", ctx, atomic_read(&info->nctx));
+}
+
+void fimg2d_del_context(struct fimg2d_control *info, struct fimg2d_context *ctx)
+{
+ atomic_dec(&info->nctx);
+ fimg2d_debug("ctx %p nctx(%d)\n", ctx, atomic_read(&info->nctx));
+}
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.h b/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.h
new file mode 100644
index 0000000..8a14607
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.h
@@ -0,0 +1,46 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_ctx.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include "fimg2d.h"
+#include "fimg2d_helper.h"
+
+static inline void fimg2d_enqueue(struct list_head *node, struct list_head *q)
+{
+ list_add_tail(node, q);
+}
+
+static inline void fimg2d_dequeue(struct list_head *node)
+{
+ list_del(node);
+}
+
+static inline int fimg2d_queue_is_empty(struct list_head *q)
+{
+ return list_empty(q);
+}
+
+static inline
+struct fimg2d_bltcmd *fimg2d_get_first_command(struct fimg2d_control *info)
+{
+ if (list_empty(&info->cmd_q))
+ return NULL;
+
+ return list_first_entry(&info->cmd_q, struct fimg2d_bltcmd, node);
+}
+
+void fimg2d_add_context(struct fimg2d_control *info,
+ struct fimg2d_context *ctx);
+void fimg2d_del_context(struct fimg2d_control *info,
+ struct fimg2d_context *ctx);
+int fimg2d_add_command(struct fimg2d_control *info,
+ struct fimg2d_context *ctx, struct fimg2d_blit *blit);
+void fimg2d_del_command(struct fimg2d_control *info, struct fimg2d_bltcmd *cmd);
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_drv.c b/drivers/media/video/samsung/fimg2d4x/fimg2d_drv.c
new file mode 100644
index 0000000..0dc277e
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_drv.c
@@ -0,0 +1,507 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_drv.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <asm/cacheflush.h>
+#include <plat/cpu.h>
+#include <plat/fimg2d.h>
+#include <plat/sysmmu.h>
+#include <mach/dev.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include "fimg2d.h"
+#include "fimg2d_clk.h"
+#include "fimg2d_ctx.h"
+#include "fimg2d_helper.h"
+
+#define CTX_TIMEOUT msecs_to_jiffies(5000)
+
+static struct fimg2d_control *info;
+
+static void fimg2d_worker(struct work_struct *work)
+{
+ fimg2d_debug("start kernel thread\n");
+ info->blit(info);
+}
+
+static DECLARE_WORK(fimg2d_work, fimg2d_worker);
+
+/**
+ * @irq: irq number
+ * @dev_id: pointer to private data
+ */
+static irqreturn_t fimg2d_irq(int irq, void *dev_id)
+{
+ fimg2d_debug("irq\n");
+ info->stop(info);
+
+ return IRQ_HANDLED;
+}
+
+static int fimg2d_sysmmu_fault_handler(enum exynos_sysmmu_inttype itype,
+ unsigned long pgtable_base, unsigned long fault_addr)
+{
+ struct fimg2d_bltcmd *cmd;
+
+ if (itype == SYSMMU_PAGEFAULT) {
+ printk(KERN_ERR "[%s] sysmmu page fault(0x%lx), pgd(0x%lx)\n",
+ __func__, fault_addr, pgtable_base);
+ } else {
+ printk(KERN_ERR "[%s] sysmmu interrupt "
+ "type(%d) pgd(0x%lx) addr(0x%lx)\n",
+ __func__, itype, pgtable_base, fault_addr);
+ }
+
+ cmd = fimg2d_get_first_command(info);
+ if (!cmd) {
+ printk(KERN_ERR "[%s] null command\n", __func__);
+ goto next;
+ }
+
+ if (cmd->ctx->mm->pgd != phys_to_virt(pgtable_base)) {
+ printk(KERN_ERR "[%s] pgtable base is different from current command\n",
+ __func__);
+ goto next;
+ }
+
+ fimg2d_dump_command(cmd);
+
+next:
+ fimg2d_clk_dump(info);
+ info->dump(info);
+
+ BUG();
+ return 0;
+}
+
+static void fimg2d_context_wait(struct fimg2d_context *ctx)
+{
+ while (atomic_read(&ctx->ncmd)) {
+ if (!wait_event_timeout(ctx->wait_q, !atomic_read(&ctx->ncmd), CTX_TIMEOUT)) {
+ fimg2d_debug("[%s] ctx %p blit wait timeout\n", __func__, ctx);
+ if (info->err)
+ break;
+ }
+ }
+}
+
+static void fimg2d_request_bitblt(struct fimg2d_context *ctx)
+{
+ if (!atomic_read(&info->active)) {
+ atomic_set(&info->active, 1);
+ fimg2d_debug("dispatch ctx %p to kernel thread\n", ctx);
+ queue_work(info->work_q, &fimg2d_work);
+ }
+ fimg2d_context_wait(ctx);
+}
+
+static int fimg2d_open(struct inode *inode, struct file *file)
+{
+ struct fimg2d_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ printk(KERN_ERR "[%s] not enough memory for ctx\n", __func__);
+ return -ENOMEM;
+ }
+ file->private_data = (void *)ctx;
+
+ ctx->mm = current->mm;
+ fimg2d_debug("ctx %p current pgd %p init_mm pgd %p\n",
+ ctx, (unsigned long *)ctx->mm->pgd,
+ (unsigned long *)init_mm.pgd);
+
+ fimg2d_add_context(info, ctx);
+ return 0;
+}
+
+static int fimg2d_release(struct inode *inode, struct file *file)
+{
+ struct fimg2d_context *ctx = file->private_data;
+
+ fimg2d_debug("ctx %p\n", ctx);
+ while (1) {
+ if (!atomic_read(&ctx->ncmd))
+ break;
+
+ mdelay(2);
+ }
+ fimg2d_del_context(info, ctx);
+
+ kfree(ctx);
+ return 0;
+}
+
+static int fimg2d_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+static unsigned int fimg2d_poll(struct file *file, struct poll_table_struct *wait)
+{
+ return 0;
+}
+
+static long fimg2d_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ struct fimg2d_context *ctx;
+ struct fimg2d_platdata *pdata;
+ struct fimg2d_blit blit;
+ struct fimg2d_version ver;
+ struct fimg2d_image dst;
+
+ ctx = file->private_data;
+ if (!ctx) {
+ printk(KERN_ERR "[%s] missing ctx\n", __func__);
+ return -EFAULT;
+ }
+
+ switch (cmd) {
+ case FIMG2D_BITBLT_BLIT:
+ if (info->err) {
+ printk(KERN_ERR "[%s] device error, do sw fallback\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&blit, (void *)arg, sizeof(blit)))
+ return -EFAULT;
+ if (blit.dst)
+ if (copy_from_user(&dst, (void *)blit.dst, sizeof(dst)))
+ return -EFAULT;
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ dev_lock(info->bus_dev, info->dev, 160160);
+#endif
+#endif
+ if ((blit.dst) && (dst.addr.type == ADDR_USER))
+ down_write(&page_alloc_slow_rwsem);
+ ret = fimg2d_add_command(info, ctx, &blit);
+ if (!ret) {
+ fimg2d_request_bitblt(ctx);
+ }
+
+#ifdef PERF_PROFILE
+ perf_print(ctx, blit.seq_no);
+ perf_clear(ctx);
+#endif
+ if ((blit.dst) && (dst.addr.type == ADDR_USER))
+ up_write(&page_alloc_slow_rwsem);
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ dev_unlock(info->bus_dev, info->dev);
+#endif
+#endif
+ break;
+
+ case FIMG2D_BITBLT_SYNC:
+ fimg2d_debug("FIMG2D_BITBLT_SYNC ctx: %p\n", ctx);
+ /* FIXME: */
+ break;
+
+ case FIMG2D_BITBLT_VERSION:
+ pdata = to_fimg2d_plat(info->dev);
+ ver.hw = pdata->hw_ver;
+ ver.sw = 0;
+ fimg2d_debug("fimg2d version, hw: 0x%x sw: 0x%x\n",
+ ver.hw, ver.sw);
+ if (copy_to_user((void *)arg, &ver, sizeof(ver)))
+ return -EFAULT;
+ break;
+
+ default:
+#if 0
+ printk(KERN_ERR "[%s] unknown ioctl\n", __func__);
+#endif
+ ret = -EFAULT;
+ break;
+ }
+
+ return ret;
+}
+
+/* fops */
+static const struct file_operations fimg2d_fops = {
+ .owner = THIS_MODULE,
+ .open = fimg2d_open,
+ .release = fimg2d_release,
+ .mmap = fimg2d_mmap,
+ .poll = fimg2d_poll,
+ .unlocked_ioctl = fimg2d_ioctl,
+};
+
+/* miscdev */
+static struct miscdevice fimg2d_dev = {
+ .minor = FIMG2D_MINOR,
+ .name = "fimg2d",
+ .fops = &fimg2d_fops,
+};
+
+static int fimg2d_setup_controller(struct fimg2d_control *info)
+{
+ atomic_set(&info->suspended, 0);
+ atomic_set(&info->clkon, 0);
+ atomic_set(&info->busy, 0);
+ atomic_set(&info->nctx, 0);
+ atomic_set(&info->active, 0);
+
+ spin_lock_init(&info->bltlock);
+
+ INIT_LIST_HEAD(&info->cmd_q);
+ init_waitqueue_head(&info->wait_q);
+ fimg2d_register_ops(info);
+
+ info->work_q = create_singlethread_workqueue("kfimg2dd");
+ if (!info->work_q)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int fimg2d_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct fimg2d_platdata *pdata;
+ int ret;
+
+ pdata = to_fimg2d_plat(&pdev->dev);
+ if (!pdata) {
+ printk(KERN_ERR "FIMG2D failed to get platform data\n");
+ ret = -ENOMEM;
+ goto err_plat;
+ }
+
+ /* global structure */
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ printk(KERN_ERR "FIMG2D failed to allocate memory for controller\n");
+ ret = -ENOMEM;
+ goto err_plat;
+ }
+
+ /* setup global info */
+ ret = fimg2d_setup_controller(info);
+ if (ret) {
+ printk(KERN_ERR "FIMG2D failed to setup controller\n");
+ goto err_setup;
+ }
+ info->dev = &pdev->dev;
+
+ /* memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ printk(KERN_ERR "FIMG2D failed to get resource\n");
+ ret = -ENOENT;
+ goto err_region;
+ }
+
+ info->mem = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (!info->mem) {
+ printk(KERN_ERR "FIMG2D failed to request memory region\n");
+ ret = -ENOMEM;
+ goto err_region;
+ }
+
+ /* ioremap */
+ info->regs = ioremap(res->start, resource_size(res));
+ if (!info->regs) {
+ printk(KERN_ERR "FIMG2D failed to ioremap for SFR\n");
+ ret = -ENOENT;
+ goto err_map;
+ }
+ fimg2d_debug("device name: %s base address: 0x%lx\n",
+ pdev->name, (unsigned long)res->start);
+
+ /* irq */
+ info->irq = platform_get_irq(pdev, 0);
+ if (!info->irq) {
+ printk(KERN_ERR "FIMG2D failed to get irq resource\n");
+ ret = -ENOENT;
+ goto err_irq;
+ }
+ fimg2d_debug("irq: %d\n", info->irq);
+
+ ret = request_irq(info->irq, fimg2d_irq, IRQF_DISABLED, pdev->name, info);
+ if (ret) {
+ printk(KERN_ERR "FIMG2D failed to request irq\n");
+ ret = -ENOENT;
+ goto err_irq;
+ }
+
+ ret = fimg2d_clk_setup(info);
+ if (ret) {
+ printk(KERN_ERR "FIMG2D failed to setup clk\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_enable(info->dev);
+ fimg2d_debug("enable runtime pm\n");
+#endif
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ /* To lock bus frequency in OPP mode */
+ info->bus_dev = dev_get("exynos-busfreq");
+#endif
+#endif
+ exynos_sysmmu_set_fault_handler(info->dev, fimg2d_sysmmu_fault_handler);
+ fimg2d_debug("register sysmmu page fault handler\n");
+
+ /* misc register */
+ ret = misc_register(&fimg2d_dev);
+ if (ret) {
+ printk(KERN_ERR "FIMG2D failed to register misc driver\n");
+ goto err_reg;
+ }
+
+ printk(KERN_INFO "Samsung Graphics 2D driver, (c) 2011 Samsung Electronics\n");
+ return 0;
+
+err_reg:
+ fimg2d_clk_release(info);
+
+err_clk:
+ free_irq(info->irq, NULL);
+
+err_irq:
+ iounmap(info->regs);
+
+err_map:
+ release_resource(info->mem);
+ kfree(info->mem);
+
+err_region:
+ destroy_workqueue(info->work_q);
+
+err_setup:
+ kfree(info);
+
+err_plat:
+ return ret;
+}
+
+static int fimg2d_remove(struct platform_device *pdev)
+{
+ free_irq(info->irq, NULL);
+
+ if (info->mem) {
+ iounmap(info->regs);
+ release_resource(info->mem);
+ kfree(info->mem);
+ }
+
+ destroy_workqueue(info->work_q);
+ misc_deregister(&fimg2d_dev);
+ kfree(info);
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(&pdev->dev);
+ fimg2d_debug("disable runtime pm\n");
+#endif
+
+ return 0;
+}
+
+static int fimg2d_suspend(struct device *dev)
+{
+ fimg2d_debug("suspend... start\n");
+ atomic_set(&info->suspended, 1);
+ while (1) {
+ if (fimg2d_queue_is_empty(&info->cmd_q))
+ break;
+
+ mdelay(2);
+ }
+ fimg2d_debug("suspend... done\n");
+ return 0;
+}
+
+static int fimg2d_resume(struct device *dev)
+{
+ fimg2d_debug("resume... start\n");
+ atomic_set(&info->suspended, 0);
+ fimg2d_debug("resume... done\n");
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimg2d_runtime_suspend(struct device *dev)
+{
+ fimg2d_debug("runtime suspend... done\n");
+ return 0;
+}
+
+static int fimg2d_runtime_resume(struct device *dev)
+{
+ fimg2d_debug("runtime resume... done\n");
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops fimg2d_pm_ops = {
+ .suspend = fimg2d_suspend,
+ .resume = fimg2d_resume,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = fimg2d_runtime_suspend,
+ .runtime_resume = fimg2d_runtime_resume,
+#endif
+};
+
+static struct platform_driver fimg2d_driver = {
+ .probe = fimg2d_probe,
+ .remove = fimg2d_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s5p-fimg2d",
+ .pm = &fimg2d_pm_ops,
+ },
+};
+
+static int __init fimg2d_register(void)
+{
+ return platform_driver_register(&fimg2d_driver);
+}
+
+static void __exit fimg2d_unregister(void)
+{
+ platform_driver_unregister(&fimg2d_driver);
+}
+
+module_init(fimg2d_register);
+module_exit(fimg2d_unregister);
+
+MODULE_AUTHOR("Eunseok Choi <es10.choi@samsung.com>");
+MODULE_AUTHOR("Jinsung Yang <jsgood.yang@samsung.com>");
+MODULE_DESCRIPTION("Samsung Graphics 2D driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.c b/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.c
new file mode 100644
index 0000000..61e2d53
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.c
@@ -0,0 +1,118 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include "fimg2d.h"
+#include "fimg2d_cache.h"
+#include "fimg2d_helper.h"
+
+void perf_print(struct fimg2d_context *ctx, int seq_no)
+{
+ int i;
+ long time;
+ struct fimg2d_perf *perf;
+
+ for (i = 0; i < MAX_PERF_DESCS; i++) {
+ perf = &ctx->perf[i];
+ if (perf->valid != 0x11)
+ continue;
+ time = elapsed_usec(ctx, i);
+ printk(KERN_INFO "[FIMG2D PERF (%8s)] ctx(0x%08x) seq(%d) "
+ "%8ld usec\n",
+ perfname(i), (unsigned int)ctx, seq_no, time);
+ }
+ printk(KERN_INFO "[FIMG2D PERF **]\n");
+}
+
+void fimg2d_dump_command(struct fimg2d_bltcmd *cmd)
+{
+ int i;
+ struct fimg2d_param *p = &cmd->param;
+ struct fimg2d_image *img;
+ struct fimg2d_rect *r;
+ struct fimg2d_dma *c;
+
+ printk(KERN_INFO " op: %d\n", cmd->op);
+ printk(KERN_INFO " solid color: 0x%lx\n", p->solid_color);
+ printk(KERN_INFO " g_alpha: 0x%x\n", p->g_alpha);
+ printk(KERN_INFO " premultiplied: %d\n", p->premult);
+ if (p->dither)
+ printk(KERN_INFO " dither: %d\n", p->dither);
+ if (p->rotate)
+ printk(KERN_INFO " rotate: %d\n", p->rotate);
+ if (p->repeat.mode) {
+ printk(KERN_INFO " repeat: %d, pad color: 0x%lx\n",
+ p->repeat.mode, p->repeat.pad_color);
+ }
+ if (p->bluscr.mode) {
+ printk(KERN_INFO " bluescreen: %d, bs_color: 0x%lx "
+ "bg_color: 0x%lx\n",
+ p->bluscr.mode, p->bluscr.bs_color,
+ p->bluscr.bg_color);
+ }
+ if (p->scaling.mode) {
+ printk(KERN_INFO " scaling %d, s:%d,%d d:%d,%d\n",
+ p->scaling.mode,
+ p->scaling.src_w, p->scaling.src_h,
+ p->scaling.dst_w, p->scaling.dst_h);
+ }
+ if (p->clipping.enable) {
+ printk(KERN_INFO " clipping LT(%d,%d) RB(%d,%d) WH(%d,%d)\n",
+ p->clipping.x1, p->clipping.y1,
+ p->clipping.x2, p->clipping.y2,
+ rect_w(&p->clipping), rect_h(&p->clipping));
+ }
+
+ for (i = 0; i < MAX_IMAGES; i++) {
+ img = &cmd->image[i];
+ if (!img->addr.type)
+ continue;
+
+ r = &img->rect;
+
+ printk(KERN_INFO " %s type: %d addr: 0x%lx\n",
+ imagename(i), img->addr.type, img->addr.start);
+ if (img->plane2.type) {
+ printk(KERN_INFO " %s type: %d plane2: 0x%lx\n",
+ imagename(i), img->plane2.type,
+ img->plane2.start);
+ }
+ printk(KERN_INFO " %s width: %d height: %d "
+ "stride: %d order: %d format: %d\n",
+ imagename(i), img->width, img->height,
+ img->stride, img->order, img->fmt);
+ printk(KERN_INFO " %s rect LT(%d,%d) RB(%d,%d) WH(%d,%d)\n",
+ imagename(i), r->x1, r->y1, r->x2, r->y2,
+ rect_w(r), rect_h(r));
+
+ c = &cmd->dma[i].base;
+ if (c->size) {
+ printk(KERN_INFO " %s dma base addr: 0x%lx "
+ "size: 0x%x cached: 0x%x\n",
+ imagename(i), c->addr, c->size,
+ c->cached);
+ }
+
+ c = &cmd->dma[i].plane2;
+ if (c->size) {
+ printk(KERN_INFO " %s dma plane2 addr: 0x%lx "
+ "size: 0x%x cached: 0x%x\n",
+ imagename(i), c->addr, c->size,
+ c->cached);
+ }
+ }
+
+ if (cmd->dma_all)
+ printk(KERN_INFO " dma size all: 0x%x bytes\n", cmd->dma_all);
+
+ printk(KERN_INFO " ctx: %p seq_no(%u) sync(%d)\n",
+ cmd->ctx, cmd->seq_no, cmd->sync);
+}
diff --git a/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.h b/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.h
new file mode 100644
index 0000000..83f794c
--- /dev/null
+++ b/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.h
@@ -0,0 +1,117 @@
+/* linux/drivers/media/video/samsung/fimg2d4x/fimg2d_helper.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Samsung Graphics 2D driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __FIMG2D_HELPER_H
+#define __FIMG2D_HELPER_H
+
+#include <linux/sched.h>
+#include "fimg2d.h"
+
+#define rect_w(r) ((r)->x2 - (r)->x1)
+#define rect_h(r) ((r)->y2 - (r)->y1)
+
+static inline char *perfname(enum perf_desc id)
+{
+ switch (id) {
+ case PERF_INNERCACHE:
+ return "INNER$";
+ case PERF_OUTERCACHE:
+ return "OUTER$";
+ case PERF_BLIT:
+ return "BITBLT";
+ default:
+ return "";
+ }
+}
+
+static inline char *imagename(enum image_object image)
+{
+ switch (image) {
+ case IDST:
+ return "DST";
+ case ISRC:
+ return "SRC";
+ case IMSK:
+ return "MSK";
+ default:
+ return NULL;
+ }
+}
+
+static inline long elapsed_usec(struct fimg2d_context *ctx, enum perf_desc desc)
+{
+ struct fimg2d_perf *perf = &ctx->perf[desc];
+#ifdef PERF_TIMEVAL
+ struct timeval *start = &perf->start;
+ struct timeval *end = &perf->end;
+ long sec, usec;
+
+ sec = end->tv_sec - start->tv_sec;
+ if (end->tv_usec >= start->tv_usec) {
+ usec = end->tv_usec - start->tv_usec;
+ } else {
+ usec = end->tv_usec + 1000000 - start->tv_usec;
+ sec--;
+ }
+ return sec * 1000000 + usec;
+#else
+ return (long)(perf->end - perf->start)/1000;
+#endif
+}
+
+static inline void perf_start(struct fimg2d_context *ctx, enum perf_desc desc)
+{
+ struct fimg2d_perf *perf = &ctx->perf[desc];
+
+ if (!perf->valid) {
+#ifdef PERF_TIMEVAL
+ struct timeval time;
+ do_gettimeofday(&time);
+ perf->start = time;
+#else
+ long time;
+ perf->start = sched_clock();
+ time = perf->start / 1000;
+#endif
+ perf->valid = 0x01;
+ }
+}
+
+static inline void perf_end(struct fimg2d_context *ctx, enum perf_desc desc)
+{
+ struct fimg2d_perf *perf = &ctx->perf[desc];
+
+ if (perf->valid == 0x01) {
+#ifdef PERF_TIMEVAL
+ struct timeval time;
+ do_gettimeofday(&time);
+ perf->end = time;
+#else
+ long time;
+ perf->end = sched_clock();
+ time = perf->end / 1000;
+#endif
+ perf->valid |= 0x10;
+ }
+}
+
+static inline void perf_clear(struct fimg2d_context *ctx)
+{
+ int i;
+ for (i = 0; i < MAX_PERF_DESCS; i++)
+ ctx->perf[i].valid = 0;
+}
+
+void perf_print(struct fimg2d_context *ctx, int seq_no);
+void fimg2d_dump_command(struct fimg2d_bltcmd *cmd);
+
+#endif /* __FIMG2D_HELPER_H */
diff --git a/drivers/media/video/samsung/jpeg/Kconfig b/drivers/media/video/samsung/jpeg/Kconfig
new file mode 100644
index 0000000..35ba86d
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/Kconfig
@@ -0,0 +1,15 @@
+#
+# Configuration for JPEG
+#
+
+config VIDEO_JPEG
+ bool "Samsung JPEG driver"
+ depends on VIDEO_SAMSUNG
+ default n
+ ---help---
+ This is a JPEG for Samsung S5PV210 or S5PV310
+
+config VIDEO_JPEG_DEBUG
+ bool "print JPEG debug message"
+ depends on VIDEO_JPEG
+ default n
diff --git a/drivers/media/video/samsung/jpeg/Makefile b/drivers/media/video/samsung/jpeg/Makefile
new file mode 100644
index 0000000..0f166c9
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/Makefile
@@ -0,0 +1,10 @@
+#################################################
+# Makefile for JPEG
+# 2009 (C) Samsung Electronics
+# Author : Hyunmin Kwak <hyunmin.kwak@samsung.com>
+#################################################
+
+obj-$(CONFIG_VIDEO_JPEG) += jpeg_dev.o jpeg_mem.o jpeg_core.o jpeg_regs.o
+
+EXTRA_CFLAGS += -Idrivers/media/video
+
diff --git a/drivers/media/video/samsung/jpeg/jpeg_conf.h b/drivers/media/video/samsung/jpeg/jpeg_conf.h
new file mode 100644
index 0000000..a82d1f6
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_conf.h
@@ -0,0 +1,363 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_conf.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Definition Quantization Table for Jpeg encoder/docoder
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __JPEG_CONF_H__
+#define __JPEG_CONF_H__
+
+/* coefficients of color space converter. */
+#define COEF1_RGB_2_YUV 0x4d971e
+#define COEF2_RGB_2_YUV 0x2c5783
+#define COEF3_RGB_2_YUV 0x836e13
+
+const unsigned char qtbl_luminance[4][64] = {
+#if 1
+ /* LSI Q-table has very high compression rate.
+ * It does not satisfy the spec of Reliablity Test Group
+ * belonging to TN. So We use the below Q-table.
+ * - DSLIM
+ */
+ { /* Very high quality Customized by Samsung TN */
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x03, 0x03,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03,
+ 0x01, 0x01, 0x01, 0x01, 0x02, 0x03, 0x03, 0x03,
+ 0x01, 0x01, 0x01, 0x01, 0x02, 0x04, 0x04, 0x03,
+ 0x01, 0x01, 0x03, 0x04, 0x04, 0x06, 0x06, 0x04,
+ 0x01, 0x02, 0x03, 0x03, 0x04, 0x05, 0x06, 0x05,
+ 0x02, 0x03, 0x04, 0x04, 0x05, 0x06, 0x06, 0x05,
+ 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05
+ },
+ {
+ 0x03, 0x02, 0x02, 0x02, 0x03, 0x08, 0x0B, 0x10,
+ 0x03, 0x02, 0x02, 0x02, 0x03, 0x0F, 0x10, 0x0E,
+ 0x02, 0x02, 0x02, 0x03, 0x08, 0x0E, 0x13, 0x0E,
+ 0x02, 0x02, 0x02, 0x04, 0x0C, 0x18, 0x17, 0x10,
+ 0x03, 0x07, 0x0E, 0x11, 0x17, 0x1F, 0x23, 0x1B,
+ 0x03, 0x07, 0x0E, 0x11, 0x17, 0x1F, 0x23, 0x1B,
+ 0x0C, 0x11, 0x16, 0x19, 0x1F, 0x1C, 0x1C, 0x1E,
+ 0x14, 0x1B, 0x1C, 0x1D, 0x23, 0x20, 0x1F, 0x1E
+ },
+ {
+ 0x05, 0x02, 0x02, 0x05, 0x0A, 0x16, 0x1E, 0x25,
+ 0x02, 0x02, 0x03, 0x07, 0x0C, 0x23, 0x25, 0x21,
+ 0x03, 0x02, 0x05, 0x0A, 0x16, 0x22, 0x2B, 0x22,
+ 0x03, 0x05, 0x09, 0x0E, 0x1E, 0x39, 0x33, 0x26,
+ 0x06, 0x09, 0x14, 0x22, 0x2A, 0x38, 0x44, 0x31,
+ 0x0A, 0x12, 0x21, 0x18, 0x34, 0x45, 0x4B, 0x3C,
+ 0x1D, 0x28, 0x32, 0x38, 0x44, 0x52, 0x51, 0x42,
+ 0x2D, 0x3C, 0x3E, 0x40, 0x4A, 0x42, 0x44, 0x41,
+ },
+ {/* LSI - level 1 - high quality */
+ 8, 6, 6, 8, 12, 14, 16, 17,
+ 6, 6, 6, 8, 10, 13, 12, 15,
+ 6, 6, 7, 8, 13, 14, 18, 24,
+ 8, 8, 8, 14, 13, 19, 24, 35,
+ 12, 10, 13, 13, 20, 26, 34, 39,
+ 14, 13, 14, 19, 26, 34, 39, 39,
+ 16, 12, 18, 24, 34, 39, 39, 39,
+ 17, 15, 24, 35, 39, 39, 39, 39
+ },
+#else
+ {/* level 1 - high quality */
+ 8, 6, 6, 8, 12, 14, 16, 17,
+ 6, 6, 6, 8, 10, 13, 12, 15,
+ 6, 6, 7, 8, 13, 14, 18, 24,
+ 8, 8, 8, 14, 13, 19, 24, 35,
+ 12, 10, 13, 13, 20, 26, 34, 39,
+ 14, 13, 14, 19, 26, 34, 39, 39,
+ 16, 12, 18, 24, 34, 39, 39, 39,
+ 17, 15, 24, 35, 39, 39, 39, 39
+ },
+ {/* level 2 */
+ 12, 8, 8, 12, 17, 21, 24, 23,
+ 8, 9, 9, 11, 15, 19, 18, 23,
+ 8, 9, 10, 12, 19, 20, 27, 36,
+ 12, 11, 12, 21, 20, 28, 36, 53,
+ 17, 15, 19, 20, 30, 39, 51, 59,
+ 21, 19, 20, 28, 39, 51, 59, 59,
+ 24, 18, 27, 36, 51, 59, 59, 59,
+ 23, 23, 36, 53, 59, 59, 59, 59
+ },
+ {/* level 3 */
+ 16, 11, 11, 16, 23, 27, 31, 30,
+ 11, 12, 12, 15, 20, 23, 23, 30,
+ 11, 12, 13, 16, 23, 26, 35, 47,
+ 16, 15, 16, 23, 26, 37, 47, 64,
+ 23, 20, 23, 26, 39, 51, 64, 64,
+ 27, 23, 26, 37, 51, 64, 64, 64,
+ 31, 23, 35, 47, 64, 64, 64, 64,
+ 30, 30, 47, 64, 64, 64, 64, 64
+
+ },
+ {/*level 4 - low quality */
+ 20, 16, 25, 39, 50, 46, 62, 68,
+ 16, 18, 23, 38, 38, 53, 65, 68,
+ 25, 23, 31, 38, 53, 65, 68, 68,
+ 39, 38, 38, 53, 65, 68, 68, 68,
+ 50, 38, 53, 65, 68, 68, 68, 68,
+ 46, 53, 65, 68, 68, 68, 68, 68,
+ 62, 65, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+ }
+#endif
+};
+
+const unsigned char qtbl_chrominance[4][64] = {
+#if 1
+ /* LSI Q-table has very high compression rate.
+ * It does not satisfy the spec of Reliablity Test Group
+ * belonging to TN. So We use the below Q-table.
+ * - DSLIM
+ */
+ { /* Very high quality Customized by Samsung TN */
+ 0x01, 0x01, 0x02, 0x04, 0x06, 0x11, 0x11, 0x11,
+ 0x01, 0x01, 0x02, 0x04, 0x08, 0x11, 0x11, 0x11,
+ 0x02, 0x02, 0x03, 0x04, 0x11, 0x11, 0x11, 0x11,
+ 0x04, 0x04, 0x04, 0x05, 0x11, 0x11, 0x11, 0x11,
+ 0x06, 0x08, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+ 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11
+ },
+ {
+ 0x02, 0x02, 0x05, 0x0B, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x02, 0x02, 0x03, 0x12, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x03, 0x03, 0x10, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x0B, 0x14, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
+ 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E, 0x1E,
+ },
+ {
+ 0x05, 0x06, 0x0A, 0x1B, 0x41, 0x41, 0x41, 0x41,
+ 0x06, 0x08, 0x0C, 0x29, 0x41, 0x41, 0x41, 0x41,
+ 0x09, 0x0C, 0x22, 0x41, 0x41, 0x41, 0x41, 0x41,
+ 0x1B, 0x29, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
+ 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
+ 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
+ 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
+ 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
+ },
+ {/* LSI - level 1 - high quality */
+ 9, 8, 9, 11, 14, 17, 19, 24,
+ 8, 10, 9, 11, 14, 13, 17, 22,
+ 9, 9, 13, 14, 13, 15, 23, 26,
+ 11, 11, 14, 14, 15, 20, 26, 33,
+ 14, 14, 13, 15, 20, 24, 33, 39,
+ 17, 13, 15, 20, 24, 32, 39, 39,
+ 19, 17, 23, 26, 33, 39, 39, 39,
+ 24, 22, 26, 33, 39, 39, 39, 39
+ },
+#else
+ {/* level 1 - high quality */
+ 9, 8, 9, 11, 14, 17, 19, 24,
+ 8, 10, 9, 11, 14, 13, 17, 22,
+ 9, 9, 13, 14, 13, 15, 23, 26,
+ 11, 11, 14, 14, 15, 20, 26, 33,
+ 14, 14, 13, 15, 20, 24, 33, 39,
+ 17, 13, 15, 20, 24, 32, 39, 39,
+ 19, 17, 23, 26, 33, 39, 39, 39,
+ 24, 22, 26, 33, 39, 39, 39, 39
+ },
+ {/* level 2 */
+ 13, 11, 13, 16, 20, 20, 29, 37,
+ 11, 14, 14, 14, 16, 20, 26, 32,
+ 13, 14, 15, 17, 20, 23, 35, 40,
+ 16, 14, 17, 21, 23, 30, 40, 50,
+ 20, 16, 20, 23, 30, 37, 50, 59,
+ 20, 20, 23, 30, 37, 48, 59, 59,
+ 29, 26, 35, 40, 50, 59, 59, 59,
+ 37, 32, 40, 50, 59, 59, 59, 59
+ },
+ {/* level 3 */
+ 17, 15, 17, 21, 20, 26, 38, 48,
+ 15, 19, 18, 17, 20, 26, 35, 43,
+ 17, 18, 20, 22, 26, 30, 46, 53,
+ 21, 17, 22, 28, 30, 39, 53, 64,
+ 20, 20, 26, 30, 39, 48, 64, 64,
+ 26, 26, 30, 39, 48, 63, 64, 64,
+ 38, 35, 46, 53, 64, 64, 64, 64,
+ 48, 43, 53, 64, 64, 64, 64, 64
+
+
+ },
+ {/*level 4 - low quality */
+ 21, 25, 32, 38, 54, 68, 68, 68,
+ 25, 28, 24, 38, 54, 68, 68, 68,
+ 32, 24, 32, 43, 66, 68, 68, 68,
+ 38, 38, 43, 53, 68, 68, 68, 68,
+ 54, 54, 66, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68,
+ 68, 68, 68, 68, 68, 68, 68, 68
+
+ }
+#endif
+};
+
+const unsigned char qtbl0[64] = {
+ 0x10, 0x0B, 0x0A, 0x10, 0x18, 0x28, 0x33, 0x3D,
+ 0x0C, 0x0C, 0x0E, 0x13, 0x1A, 0x3A, 0x3C, 0x37,
+ 0x0E, 0x0D, 0x10, 0x18, 0x28, 0x39, 0x45, 0x38,
+ 0x0E, 0x11, 0x16, 0x1D, 0x33, 0x57, 0x50, 0x3E,
+ 0x12, 0x16, 0x25, 0x38, 0x44, 0x6D, 0x67, 0x4D,
+ 0x18, 0x23, 0x37, 0x40, 0x51, 0x68, 0x71, 0x5C,
+ 0x31, 0x40, 0x4E, 0x57, 0x67, 0x79, 0x78, 0x65,
+ 0x48, 0x5C, 0x5F, 0x62, 0x70, 0x64, 0x67, 0x63
+};
+
+/* Added Quantization Table */
+const unsigned char std_chrominance_quant_tbl_plus[64] = {
+ 0x11, 0x12, 0x18, 0x2F, 0x63, 0x63, 0x63, 0x63,
+ 0x12, 0x15, 0x1A, 0x42, 0x63, 0x63, 0x63, 0x63,
+ 0x18, 0x1A, 0x38, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x2F, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
+};
+
+/* Quantization Table0 */
+unsigned char std_luminance_quant_tbl[64] = {
+ 1, 1, 2, 1, 1, 2, 2, 2,
+ 2, 3, 2, 2, 3, 3, 6, 4,
+ 3, 3, 3, 3, 7, 5, 8, 4,
+ 6, 8, 8, 10, 9, 8, 7, 11,
+ 8, 10, 14, 13, 11, 10, 10, 12,
+ 10, 8, 8, 11, 16, 12, 12, 13,
+ 15, 15, 15, 15, 9, 11, 16, 17,
+ 15, 14, 17, 13, 14, 14, 14, 1
+};
+
+/* uantization Table1 */
+unsigned char std_chrominance_quant_tbl[64] = {
+ 4, 4, 4, 5, 4, 5, 9, 5,
+ 5, 9, 15, 10, 8, 10, 15, 26,
+ 19, 9, 9, 19, 26, 26, 26, 26,
+ 13, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26
+};
+
+/* Huffman Table */
+unsigned char hdctbl0[16] = {0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0};
+unsigned char hdctblg0[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb};
+
+unsigned char hactbl0[16] = {0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d};
+const unsigned char hactblg0[162] = {
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa
+};
+
+/* Huffman Table0 */
+unsigned char len_dc_luminance[16] = {
+ 0, 1, 5, 1, 1, 1, 1, 1,
+ 1, 0, 0, 0, 0, 0, 0, 0
+};
+
+unsigned char val_dc_luminance[12] = {
+ 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11
+};
+
+unsigned char len_ac_luminance[16] = {
+ 0, 2, 1, 3, 3, 2, 4, 3,
+ 5, 5, 4, 4, 0, 0, 1, 0x7d
+};
+
+unsigned char val_ac_luminance[162] = {
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa
+};
+
+/* Huffman Table1 */
+unsigned char len_dc_chrominance[16] = {
+ 0, 3, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 0, 0, 0, 0, 0
+};
+
+unsigned char val_dc_chrominance[12] = {
+ 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10, 11
+};
+
+unsigned char len_ac_chrominance[16] = {
+ 0, 2, 1, 2, 4, 4, 3, 4,
+ 7, 5, 4, 4, 0, 1, 2, 0x77
+};
+
+unsigned char val_ac_chrominance[162] = {
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x81, 0x08, 0x14, 0x42,
+ 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52,
+ 0xf0, 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24,
+ 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a,
+ 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86,
+ 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95,
+ 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4,
+ 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3,
+ 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2,
+ 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca,
+ 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9,
+ 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8,
+ 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9
+};
+
+#endif /* __JPEG_CONF_H__ */
diff --git a/drivers/media/video/samsung/jpeg/jpeg_core.c b/drivers/media/video/samsung/jpeg/jpeg_core.c
new file mode 100644
index 0000000..86203ca
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_core.c
@@ -0,0 +1,126 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_core.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Core file for operation of the jpeg driver encoder/docoder
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+
+#include "jpeg_core.h"
+#include "jpeg_regs.h"
+#include "jpeg_mem.h"
+
+int jpeg_int_pending(struct jpeg_control *ctrl)
+{
+ unsigned int int_status;
+
+ int_status = jpeg_get_int_status(ctrl->reg_base);
+ jpeg_dbg("state(%d)\n", int_status);
+
+ jpeg_clear_int(ctrl->reg_base);
+
+ return int_status;
+}
+
+int jpeg_set_dec_param(struct jpeg_control *ctrl)
+{
+ if (ctrl) {
+ jpeg_sw_reset(ctrl->reg_base);
+ } else {
+ jpeg_err("jpeg ctrl is NULL\n");
+ return -1;
+ }
+
+ jpeg_set_clk_power_on(ctrl->reg_base);
+ jpeg_set_mode(ctrl->reg_base, 1);
+ jpeg_set_dec_out_fmt(ctrl->reg_base, ctrl->dec_param.out_fmt);
+ jpeg_set_stream_buf(&ctrl->mem.stream_data_addr, ctrl->mem.base);
+ jpeg_set_stream_addr(ctrl->reg_base, ctrl->mem.stream_data_addr);
+ jpeg_set_frame_buf(&ctrl->mem.frame_data_addr, ctrl->mem.base);
+ jpeg_set_frame_addr(ctrl->reg_base, ctrl->mem.frame_data_addr);
+
+ jpeg_info("jpeg_set_dec_param fmt(%d)\
+ img_addr(0x%08x) jpeg_addr(0x%08x)\n",
+ ctrl->dec_param.out_fmt,
+ ctrl->mem.frame_data_addr,
+ ctrl->mem.stream_data_addr);
+
+ return 0;
+}
+
+int jpeg_set_enc_param(struct jpeg_control *ctrl)
+{
+ if (ctrl) {
+ jpeg_sw_reset(ctrl->reg_base);
+ } else {
+ jpeg_err("jpeg ctrl is NULL\n");
+ return -1;
+ }
+
+ jpeg_set_clk_power_on(ctrl->reg_base);
+ jpeg_set_mode(ctrl->reg_base, 0);
+ jpeg_set_enc_in_fmt(ctrl->reg_base, ctrl->enc_param.in_fmt);
+ jpeg_set_enc_out_fmt(ctrl->reg_base, ctrl->enc_param.out_fmt);
+ jpeg_set_enc_dri(ctrl->reg_base, 2);
+ jpeg_set_frame_size(ctrl->reg_base,
+ ctrl->enc_param.width, ctrl->enc_param.height);
+ jpeg_set_stream_buf(&ctrl->mem.stream_data_addr, ctrl->mem.base);
+ jpeg_set_stream_addr(ctrl->reg_base, ctrl->mem.stream_data_addr);
+ jpeg_set_frame_buf(&ctrl->mem.frame_data_addr, ctrl->mem.base);
+ jpeg_set_frame_addr(ctrl->reg_base, ctrl->mem.frame_data_addr);
+ jpeg_set_enc_coef(ctrl->reg_base);
+ jpeg_set_enc_qtbl(ctrl->reg_base, ctrl->enc_param.quality);
+ jpeg_set_enc_htbl(ctrl->reg_base);
+
+ return 0;
+}
+
+int jpeg_exe_dec(struct jpeg_control *ctrl)
+{
+
+ jpeg_start_decode(ctrl->reg_base);
+
+ if (interruptible_sleep_on_timeout(&ctrl->wq, INT_TIMEOUT) == 0)
+ jpeg_err("waiting for interrupt is timeout\n");
+
+
+ if (ctrl->irq_ret != OK_ENC_OR_DEC) {
+ jpeg_err("jpeg decode error(%d)\n", ctrl->irq_ret);
+ return -1;
+ }
+
+ jpeg_get_frame_size(ctrl->reg_base,
+ &ctrl->dec_param.width, &ctrl->dec_param.height);
+
+ ctrl->dec_param.in_fmt = jpeg_get_stream_fmt(ctrl->reg_base);
+
+ jpeg_info("decode img in_fmt(%d) width(%d) height(%d)\n",
+ ctrl->dec_param.in_fmt , ctrl->dec_param.width,
+ ctrl->dec_param.height);
+ return 0;
+}
+
+int jpeg_exe_enc(struct jpeg_control *ctrl)
+{
+
+ jpeg_start_encode(ctrl->reg_base);
+
+ if (interruptible_sleep_on_timeout(&ctrl->wq, INT_TIMEOUT) == 0)
+ jpeg_err("waiting for interrupt is timeout\n");
+
+ if (ctrl->irq_ret != OK_ENC_OR_DEC) {
+ jpeg_err("jpeg encode error(%d)\n", ctrl->irq_ret);
+ return -1;
+ }
+
+ ctrl->enc_param.size = jpeg_get_stream_size(ctrl->reg_base);
+
+ return 0;
+}
+
diff --git a/drivers/media/video/samsung/jpeg/jpeg_core.h b/drivers/media/video/samsung/jpeg/jpeg_core.h
new file mode 100644
index 0000000..73c44e2
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_core.h
@@ -0,0 +1,138 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_core.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Definition for core file of the jpeg operation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __JPEG_CORE_H__
+#define __JPEG_CORE_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+
+#include "jpeg_mem.h"
+
+#define INT_TIMEOUT 1000
+
+enum jpeg_result {
+ OK_ENC_OR_DEC,
+ ERR_ENC_OR_DEC,
+ ERR_UNKNOWN,
+};
+
+enum jpeg_img_quality_level {
+ QUALITY_LEVEL_1 = 0, /* high */
+ QUALITY_LEVEL_2,
+ QUALITY_LEVEL_3,
+ QUALITY_LEVEL_4, /* low */
+};
+
+/* raw data image format */
+enum jpeg_frame_format {
+ YUV_422, /* decode output, encode input */
+ YUV_420, /* decode output, encode output */
+ RGB_565, /* encode input */
+};
+
+/* jpeg data format */
+enum jpeg_stream_format {
+ JPEG_422, /* decode input, encode output */
+ JPEG_420, /* decode input, encode output */
+ JPEG_444, /* decode input*/
+ JPEG_GRAY, /* decode input*/
+ JPEG_RESERVED,
+};
+
+struct jpeg_dec_param {
+ unsigned int width;
+ unsigned int height;
+ unsigned int size;
+ enum jpeg_stream_format in_fmt;
+ enum jpeg_frame_format out_fmt;
+};
+
+struct jpeg_enc_param {
+ unsigned int width;
+ unsigned int height;
+ unsigned int size;
+ enum jpeg_frame_format in_fmt;
+ enum jpeg_stream_format out_fmt;
+ enum jpeg_img_quality_level quality;
+};
+
+struct jpeg_control {
+ struct clk *clk;
+ atomic_t in_use;
+ struct mutex lock;
+ int irq_no;
+ enum jpeg_result irq_ret;
+ wait_queue_head_t wq;
+ void __iomem *reg_base; /* register i/o */
+ struct jpeg_mem mem; /* for reserved memory */
+ struct jpeg_dec_param dec_param;
+ struct jpeg_enc_param enc_param;
+};
+
+enum jpeg_log {
+ JPEG_LOG_DEBUG = 0x1000,
+ JPEG_LOG_INFO = 0x0100,
+ JPEG_LOG_WARN = 0x0010,
+ JPEG_LOG_ERR = 0x0001,
+};
+
+/* debug macro */
+#define JPEG_LOG_DEFAULT (JPEG_LOG_WARN | JPEG_LOG_ERR)
+
+#define JPEG_DEBUG(fmt, ...) \
+ do { \
+ if (JPEG_LOG_DEFAULT & JPEG_LOG_DEBUG) \
+ printk(KERN_DEBUG "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define JPEG_INFO(fmt, ...) \
+ do { \
+ if (JPEG_LOG_DEFAULT & JPEG_LOG_INFO) \
+ printk(KERN_INFO "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define JPEG_WARN(fmt, ...) \
+ do { \
+ if (JPEG_LOG_DEFAULT & JPEG_LOG_WARN) \
+ printk(KERN_WARNING "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define JPEG_ERROR(fmt, ...) \
+ do { \
+ if (JPEG_LOG_DEFAULT & JPEG_LOG_ERR) \
+ printk(KERN_ERR "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define jpeg_dbg(fmt, ...) JPEG_DEBUG(fmt, ##__VA_ARGS__)
+#define jpeg_info(fmt, ...) JPEG_INFO(fmt, ##__VA_ARGS__)
+#define jpeg_warn(fmt, ...) JPEG_WARN(fmt, ##__VA_ARGS__)
+#define jpeg_err(fmt, ...) JPEG_ERROR(fmt, ##__VA_ARGS__)
+
+/*=====================================================================*/
+int jpeg_int_pending(struct jpeg_control *ctrl);
+int jpeg_set_dec_param(struct jpeg_control *ctrl);
+int jpeg_set_enc_param(struct jpeg_control *ctrl);
+int jpeg_exe_dec(struct jpeg_control *ctrl);
+int jpeg_exe_enc(struct jpeg_control *ctrl);
+
+
+#endif /*__JPEG_CORE_H__*/
+
diff --git a/drivers/media/video/samsung/jpeg/jpeg_dev.c b/drivers/media/video/samsung/jpeg/jpeg_dev.c
new file mode 100644
index 0000000..4038fd2
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_dev.c
@@ -0,0 +1,556 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_dev.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Core file for Samsung Jpeg Interface driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/time.h>
+#include <linux/clk.h>
+#include <linux/semaphore.h>
+#include <linux/vmalloc.h>
+#include <asm/page.h>
+#include <linux/sched.h>
+
+#include <plat/regs_jpeg.h>
+#include <mach/irqs.h>
+#if defined(CONFIG_CPU_S5PV210)
+#include <mach/pd.h>
+#endif
+
+#if defined(CONFIG_S5P_SYSMMU_JPEG)
+#include <plat/sysmmu.h>
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+
+#include "jpeg_core.h"
+#include "jpeg_dev.h"
+#include "jpeg_mem.h"
+
+struct jpeg_control *jpeg_ctrl;
+static struct device *jpeg_pm;
+
+static int jpeg_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ int in_use;
+
+ mutex_lock(&jpeg_ctrl->lock);
+
+ in_use = atomic_read(&jpeg_ctrl->in_use);
+
+ if (in_use > JPEG_MAX_INSTANCE) {
+ ret = -EBUSY;
+ goto resource_busy;
+ } else {
+ atomic_inc(&jpeg_ctrl->in_use);
+ jpeg_info("jpeg driver opened.\n");
+ }
+
+ mutex_unlock(&jpeg_ctrl->lock);
+#if defined(CONFIG_CPU_S5PV210)
+ ret = s5pv210_pd_enable("jpeg_pd");
+ if (ret < 0) {
+ jpeg_err("failed to enable jpeg power domain\n");
+ return -EINVAL;
+ }
+#endif
+
+ /* clock enable */
+ clk_enable(jpeg_ctrl->clk);
+
+ file->private_data = (struct jpeg_control *)jpeg_ctrl;
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_get_sync(jpeg_pm);
+#endif
+
+ return 0;
+resource_busy:
+ mutex_unlock(&jpeg_ctrl->lock);
+ return ret;
+}
+
+static int jpeg_release(struct inode *inode, struct file *file)
+{
+ atomic_dec(&jpeg_ctrl->in_use);
+
+ jpeg_mem_free();
+
+ clk_disable(jpeg_ctrl->clk);
+
+#if defined(CONFIG_CPU_S5PV210)
+ if (s5pv210_pd_disable("jpeg_pd") < 0) {
+ jpeg_err("failed to disable jpeg power domain\n");
+ return -EINVAL;
+ }
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_put_sync(jpeg_pm);
+#endif
+
+ return 0;
+}
+
+static long jpeg_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct jpeg_control *ctrl;
+
+ ctrl = (struct jpeg_control *)file->private_data;
+ if (!ctrl) {
+ jpeg_err("jpeg invalid input argument\n");
+ return -1;
+ }
+
+ switch (cmd) {
+
+ case IOCTL_JPEG_DEC_EXE:
+ ret = copy_from_user(&ctrl->dec_param,
+ (struct jpeg_dec_param *)arg,
+ sizeof(struct jpeg_dec_param));
+
+ jpeg_exe_dec(ctrl);
+ ret = copy_to_user((void *)arg,
+ (void *) &ctrl->dec_param,
+ sizeof(struct jpeg_dec_param));
+ break;
+
+ case IOCTL_JPEG_ENC_EXE:
+ ret = copy_from_user(&ctrl->enc_param,
+ (struct jpeg_enc_param *)arg,
+ sizeof(struct jpeg_enc_param));
+
+ jpeg_exe_enc(ctrl);
+ ret = copy_to_user((void *)arg,
+ (void *) &ctrl->enc_param,
+ sizeof(struct jpeg_enc_param));
+ break;
+
+ case IOCTL_GET_DEC_IN_BUF:
+ case IOCTL_GET_ENC_OUT_BUF:
+ return jpeg_get_stream_buf(arg);
+
+ case IOCTL_GET_DEC_OUT_BUF:
+ case IOCTL_GET_ENC_IN_BUF:
+ return jpeg_get_frame_buf(arg);
+
+ case IOCTL_GET_PHYADDR:
+ return jpeg_ctrl->mem.frame_data_addr;
+
+ case IOCTL_GET_PHYMEM_BASE:
+#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_JPEG
+ if (copy_to_user((void *)arg, &jpeg_ctrl->mem.base, sizeof(unsigned int))) {
+ jpeg_err("IOCTL_GET_PHYMEM_BASE:::copy_to_user error\n");
+ return -1;
+ }
+ return 0;
+#else
+ return -1;
+#endif
+
+ case IOCTL_GET_PHYMEM_SIZE:
+#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_JPEG
+ ret = CONFIG_VIDEO_SAMSUNG_MEMSIZE_JPEG * 1024;
+ if (copy_to_user((void *)arg, &ret, sizeof(unsigned int))) {
+ jpeg_err("IOCTL_GET_PHYMEM_SIZE:::copy_to_user error\n");
+ return -1;
+ }
+ return 0;
+#else
+ return -1;
+#endif
+
+ case IOCTL_SET_DEC_PARAM:
+ ret = copy_from_user(&ctrl->dec_param,
+ (struct jpeg_dec_param *)arg,
+ sizeof(struct jpeg_dec_param));
+
+ ret = jpeg_set_dec_param(ctrl);
+
+ break;
+
+ case IOCTL_SET_ENC_PARAM:
+ ret = copy_from_user(&ctrl->enc_param,
+ (struct jpeg_enc_param *)arg,
+ sizeof(struct jpeg_enc_param));
+
+ ret = jpeg_set_enc_param(ctrl);
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+int jpeg_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+#if defined(CONFIG_S5P_SYSMMU_JPEG)
+#if !defined(CONFIG_S5P_VMEM)
+ unsigned long page_frame_no;
+ unsigned long start;
+ unsigned long size;
+ char *ptr; /* vmalloc */
+
+ size = vma->vm_end - vma->vm_start;
+ ptr = (char *)jpeg_ctrl->mem.base;
+ start = 0;
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ while (size > 0) {
+ page_frame_no = vmalloc_to_pfn(ptr);
+ if (remap_pfn_range(vma, vma->vm_start + start, page_frame_no,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ jpeg_err("failed to remap jpeg pfn range.\n");
+ return -ENOMEM;
+ }
+
+ start += PAGE_SIZE;
+ ptr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+#endif /* CONFIG_S5P_VMEM */
+#else
+ unsigned long page_frame_no;
+ unsigned long size;
+ int ret;
+
+ size = vma->vm_end - vma->vm_start;
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ page_frame_no = __phys_to_pfn(jpeg_ctrl->mem.base);
+ ret = remap_pfn_range(vma, vma->vm_start, page_frame_no,
+ size, vma->vm_page_prot);
+ if (ret != 0) {
+ jpeg_err("failed to remap jpeg pfn range.\n");
+ return -ENOMEM;
+ }
+#endif /* SYSMMU_JPEG_ON */
+
+ return 0;
+}
+
+static const struct file_operations jpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = jpeg_open,
+ .release = jpeg_release,
+ .unlocked_ioctl = jpeg_ioctl,
+ .mmap = jpeg_mmap,
+};
+
+static struct miscdevice jpeg_miscdev = {
+ .minor = JPEG_MINOR_NUMBER,
+ .name = JPEG_NAME,
+ .fops = &jpeg_fops,
+};
+
+static irqreturn_t jpeg_irq(int irq, void *dev_id)
+{
+ unsigned int int_status;
+ struct jpeg_control *ctrl = (struct jpeg_control *) dev_id;
+
+ int_status = jpeg_int_pending(ctrl);
+
+ if (int_status) {
+ switch (int_status) {
+ case 0x40:
+ ctrl->irq_ret = OK_ENC_OR_DEC;
+ break;
+ case 0x20:
+ ctrl->irq_ret = ERR_ENC_OR_DEC;
+ break;
+ default:
+ ctrl->irq_ret = ERR_UNKNOWN;
+ }
+ wake_up_interruptible(&ctrl->wq);
+ } else {
+ ctrl->irq_ret = ERR_UNKNOWN;
+ wake_up_interruptible(&ctrl->wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int jpeg_setup_controller(struct jpeg_control *ctrl)
+{
+#if defined(CONFIG_S5P_SYSMMU_JPEG)
+ s5p_sysmmu_enable(jpeg_pm);
+ jpeg_dbg("sysmmu on\n");
+ /* jpeg hw uses kernel virtual address */
+ s5p_sysmmu_set_tablebase_pgd(jpeg_pm, __pa(swapper_pg_dir));
+#endif
+ atomic_set(&ctrl->in_use, 0);
+ mutex_init(&ctrl->lock);
+ init_waitqueue_head(&ctrl->wq);
+
+ return 0;
+}
+
+static int jpeg_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ /* global structure */
+ jpeg_ctrl = kzalloc(sizeof(*jpeg_ctrl), GFP_KERNEL);
+ if (!jpeg_ctrl) {
+ dev_err(&pdev->dev, "%s: not enough memory\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* setup jpeg control */
+ ret = jpeg_setup_controller(jpeg_ctrl);
+ if (ret) {
+ jpeg_err("failed to setup controller\n");
+ goto err_setup;
+ }
+
+ /* memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ jpeg_err("failed to get jpeg memory region resource\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ res = request_mem_region(res->start,
+ res->end - res->start + 1, pdev->name);
+ if (!res) {
+ jpeg_err("failed to request jpeg io memory region\n");
+ ret = -ENOMEM;
+ goto err_region;
+ }
+
+ /* ioremap */
+ jpeg_ctrl->reg_base = ioremap(res->start, res->end - res->start + 1);
+ if (!jpeg_ctrl->reg_base) {
+ jpeg_err("failed to remap jpeg io region\n");
+ ret = -ENOENT;
+ goto err_map;
+ }
+
+ /* irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ jpeg_err("failed to request jpeg irq resource\n");
+ ret = -ENOENT;
+ goto err_irq;
+ }
+
+ jpeg_ctrl->irq_no = res->start;
+ ret = request_irq(jpeg_ctrl->irq_no, (void *)jpeg_irq,
+ IRQF_DISABLED, pdev->name, jpeg_ctrl);
+ if (ret != 0) {
+ jpeg_err("failed to jpeg request irq\n");
+ ret = -ENOENT;
+ goto err_irq;
+ }
+
+ /* clock */
+ jpeg_ctrl->clk = clk_get(&pdev->dev, "jpeg");
+ if (IS_ERR(jpeg_ctrl->clk)) {
+ jpeg_err("failed to find jpeg clock source\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+ ret = jpeg_init_mem(&pdev->dev, &jpeg_ctrl->mem.base);
+ if (ret != 0) {
+ jpeg_err("failed to init. jpeg mem");
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ ret = misc_register(&jpeg_miscdev);
+ if (ret) {
+ jpeg_err("failed to register misc driver\n");
+ goto err_reg;
+ }
+
+ jpeg_pm = &pdev->dev;
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_enable(jpeg_pm);
+#endif
+ return 0;
+
+err_reg:
+ clk_put(jpeg_ctrl->clk);
+err_mem:
+err_clk:
+ free_irq(jpeg_ctrl->irq_no, NULL);
+err_irq:
+ iounmap(jpeg_ctrl->reg_base);
+err_map:
+err_region:
+ kfree(res);
+err_res:
+ mutex_destroy(&jpeg_ctrl->lock);
+err_setup:
+ kfree(jpeg_ctrl);
+err_alloc:
+ return ret;
+
+}
+
+static int jpeg_remove(struct platform_device *dev)
+{
+#if defined(CONFIG_S5P_SYSMMU_JPEG)
+ s5p_sysmmu_disable(jpeg_pm);
+ jpeg_dbg("sysmmu off\n");
+#endif
+ free_irq(jpeg_ctrl->irq_no, dev);
+ mutex_destroy(&jpeg_ctrl->lock);
+ iounmap(jpeg_ctrl->reg_base);
+
+ kfree(jpeg_ctrl);
+ misc_deregister(&jpeg_miscdev);
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(jpeg_pm);
+#endif
+ return 0;
+}
+
+static int jpeg_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /* clock disable */
+ clk_disable(jpeg_ctrl->clk);
+#if defined(CONFIG_CPU_S5PV210)
+ if (s5pv210_pd_disable("jpeg_pd") < 0) {
+ jpeg_err("failed to disable jpeg power domain\n");
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
+static int jpeg_resume(struct platform_device *pdev)
+{
+#if defined(CONFIG_CPU_S5PV210)
+ if (s5pv210_pd_enable("jpeg_pd") < 0) {
+ jpeg_err("failed to enable jpeg power domain\n");
+ return -EINVAL;
+ }
+#endif
+ /* clock enable */
+ clk_enable(jpeg_ctrl->clk);
+
+ return 0;
+}
+
+int jpeg_suspend_pd(struct device *dev)
+{
+ struct platform_device *pdev;
+ int ret;
+ pm_message_t state;
+
+ state.event = 0;
+ pdev = to_platform_device(dev);
+ ret = jpeg_suspend(pdev, state);
+
+ return 0;
+}
+
+int jpeg_resume_pd(struct device *dev)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = to_platform_device(dev);
+ ret = jpeg_resume(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int jpeg_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int jpeg_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops jpeg_pm_ops = {
+ .suspend = jpeg_suspend_pd,
+ .resume = jpeg_resume_pd,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = jpeg_runtime_suspend,
+ .runtime_resume = jpeg_runtime_resume,
+#endif
+};
+static struct platform_driver jpeg_driver = {
+ .probe = jpeg_probe,
+ .remove = jpeg_remove,
+#if (!defined(CONFIG_S5PV310_DEV_PD) || !defined(CONFIG_PM_RUNTIME))
+ .suspend = jpeg_suspend,
+ .resume = jpeg_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = JPEG_NAME,
+#if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ .pm = &jpeg_pm_ops,
+#else
+ .pm = NULL,
+#endif
+ },
+};
+
+static int __init jpeg_init(void)
+{
+ printk("Initialize JPEG driver\n");
+
+ platform_driver_register(&jpeg_driver);
+
+ return 0;
+}
+
+static void __exit jpeg_exit(void)
+{
+ platform_driver_unregister(&jpeg_driver);
+}
+
+module_init(jpeg_init);
+module_exit(jpeg_exit);
+
+MODULE_AUTHOR("Hyunmin, Kwak <hyunmin.kwak@samsung.com>");
+MODULE_DESCRIPTION("JPEG Codec Device Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/media/video/samsung/jpeg/jpeg_dev.h b/drivers/media/video/samsung/jpeg/jpeg_dev.h
new file mode 100644
index 0000000..20fba9c
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_dev.h
@@ -0,0 +1,36 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_dev.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file for Samsung Jpeg Interface driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#ifndef __JPEG_DEV_H__
+#define __JPEG_DEV_H__
+
+
+#define JPEG_MINOR_NUMBER 254
+#define JPEG_NAME "s5p-jpeg"
+#define JPEG_MAX_INSTANCE 1
+
+#define JPEG_IOCTL_MAGIC 'J'
+
+#define IOCTL_JPEG_DEC_EXE _IO(JPEG_IOCTL_MAGIC, 1)
+#define IOCTL_JPEG_ENC_EXE _IO(JPEG_IOCTL_MAGIC, 2)
+#define IOCTL_GET_DEC_IN_BUF _IO(JPEG_IOCTL_MAGIC, 3)
+#define IOCTL_GET_DEC_OUT_BUF _IO(JPEG_IOCTL_MAGIC, 4)
+#define IOCTL_GET_ENC_IN_BUF _IO(JPEG_IOCTL_MAGIC, 5)
+#define IOCTL_GET_ENC_OUT_BUF _IO(JPEG_IOCTL_MAGIC, 6)
+#define IOCTL_SET_DEC_PARAM _IO(JPEG_IOCTL_MAGIC, 7)
+#define IOCTL_SET_ENC_PARAM _IO(JPEG_IOCTL_MAGIC, 8)
+#define IOCTL_GET_PHYADDR _IO(JPEG_IOCTL_MAGIC, 9)
+#define IOCTL_GET_PHYMEM_BASE _IOR(JPEG_IOCTL_MAGIC, 10, unsigned int)
+#define IOCTL_GET_PHYMEM_SIZE _IOR(JPEG_IOCTL_MAGIC, 11, unsigned int)
+#endif /*__JPEG_DEV_H__*/
+
diff --git a/drivers/media/video/samsung/jpeg/jpeg_mem.c b/drivers/media/video/samsung/jpeg/jpeg_mem.c
new file mode 100644
index 0000000..9214c10
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_mem.c
@@ -0,0 +1,131 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_mem.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Managent memory of the jpeg driver for encoder/docoder.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <asm/page.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/device.h>
+
+#if defined(CONFIG_S5P_MEM_CMA)
+#include <linux/cma.h>
+#elif defined(CONFIG_S5P_MEM_BOOTMEM)
+#include <mach/media.h>
+#include <plat/media.h>
+#endif
+#include "jpeg_mem.h"
+#include "jpeg_core.h"
+
+#if defined(CONFIG_S5P_SYSMMU_JPEG) && defined(CONFIG_S5P_VMEM)
+unsigned int s_cookie; /* for stream buffer */
+unsigned int f_cookie; /* for frame buffer */
+#endif
+
+int jpeg_init_mem(struct device *dev, unsigned int *base)
+{
+#ifdef CONFIG_S5P_MEM_CMA
+ struct cma_info mem_info;
+ int err;
+ int size;
+ char cma_name[8];
+#endif
+#if defined(CONFIG_S5P_SYSMMU_JPEG)
+#if !defined(CONFIG_S5P_VMEM)
+ unsigned char *addr;
+ addr = vmalloc(JPEG_MEM_SIZE);
+ if (addr == NULL)
+ return -1;
+ *base = (unsigned int)addr;
+#endif /* CONFIG_S5P_VMEM */
+#else
+#ifdef CONFIG_S5P_MEM_CMA
+ /* CMA */
+ sprintf(cma_name, "jpeg");
+ err = cma_info(&mem_info, dev, 0);
+ jpeg_info("[cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x\n",
+ mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size);
+ if (err) {
+ printk("%s: get cma info failed\n", __func__);
+ return -1;
+ }
+ size = mem_info.total_size;
+ *base = (dma_addr_t)cma_alloc
+ (dev, cma_name, (size_t)size, 0);
+ jpeg_info("size = 0x%x\n", size);
+ jpeg_info("*base = 0x%x\n", *base);
+#else
+ *base = s5p_get_media_memory_bank(S5P_MDEV_JPEG, 0);
+#endif
+#endif /* CONFIG_S5P_SYSMMU_JPEG */
+ return 0;
+}
+
+int jpeg_mem_free(void)
+{
+#if defined(CONFIG_S5P_SYSMMU_JPEG) && defined(CONFIG_S5P_VMEM)
+ if (s_cookie != 0) {
+ s5p_vfree(s_cookie);
+ s_cookie = 0;
+ }
+ if (f_cookie != 0) {
+ s5p_vfree(f_cookie);
+ f_cookie = 0;
+ }
+#endif
+ return 0;
+}
+
+unsigned long jpeg_get_stream_buf(unsigned long arg)
+{
+#if defined(CONFIG_S5P_SYSMMU_JPEG) && defined(CONFIG_S5P_VMEM)
+ arg = ((arg / PAGE_SIZE + 1) * PAGE_SIZE);
+ s_cookie = (unsigned int)s5p_vmalloc(arg);
+ if (s_cookie == 0)
+ return -1;
+ return (unsigned long)s_cookie;
+#else
+ return arg + JPEG_MAIN_START;
+#endif
+}
+
+unsigned long jpeg_get_frame_buf(unsigned long arg)
+{
+#if defined(CONFIG_S5P_SYSMMU_JPEG) && defined(CONFIG_S5P_VMEM)
+ arg = ((arg / PAGE_SIZE + 1) * PAGE_SIZE);
+ f_cookie = (unsigned int)s5p_vmalloc(arg);
+ if (f_cookie == 0)
+ return -1;
+ return (unsigned long)f_cookie;
+#else
+ return arg + JPEG_S_BUF_SIZE;
+#endif
+}
+
+void jpeg_set_stream_buf(unsigned int *str_buf, unsigned int base)
+{
+#if defined(CONFIG_S5P_SYSMMU_JPEG) && defined(CONFIG_S5P_VMEM)
+ *str_buf = (unsigned int)s5p_getaddress(s_cookie);
+#else
+ *str_buf = base;
+#endif
+}
+
+void jpeg_set_frame_buf(unsigned int *fra_buf, unsigned int base)
+{
+#if defined(CONFIG_S5P_SYSMMU_JPEG) && defined(CONFIG_S5P_VMEM)
+ *fra_buf = (unsigned int)s5p_getaddress(f_cookie);
+#else
+ *fra_buf = base + JPEG_S_BUF_SIZE;
+#endif
+}
+
diff --git a/drivers/media/video/samsung/jpeg/jpeg_mem.h b/drivers/media/video/samsung/jpeg/jpeg_mem.h
new file mode 100644
index 0000000..6874992
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_mem.h
@@ -0,0 +1,66 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_mem.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Definition for Operation of Jpeg encoder/docoder with memory
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __JPEG_MEM_H__
+#define __JPEG_MEM_H__
+
+// JYSHIN for google demo 20101014
+#define MAX_JPEG_WIDTH 3264
+#define MAX_JPEG_HEIGHT 2448
+
+//#define MAX_JPEG_WIDTH 3072
+//#define MAX_JPEG_HEIGHT 2048
+#ifdef CONFIG_UMP_VCM_ALLOC
+#include <plat/s5p-vcm.h>
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#endif
+
+#define MAX_JPEG_RES (MAX_JPEG_WIDTH * MAX_JPEG_HEIGHT)
+
+/* jpeg stream buf */
+#define JPEG_S_BUF_SIZE ((MAX_JPEG_RES / PAGE_SIZE + 1) * PAGE_SIZE)
+/* jpeg frame buf */
+#define JPEG_F_BUF_SIZE (((MAX_JPEG_RES * 3) / PAGE_SIZE + 1) * PAGE_SIZE)
+
+#define JPEG_MEM_SIZE (JPEG_S_BUF_SIZE + JPEG_F_BUF_SIZE)
+#define JPEG_MAIN_START 0x00
+
+#define SYSMMU_JPEG_ON
+
+/* for reserved memory */
+struct jpeg_mem {
+ /* buffer base */
+ unsigned int base;
+ /* for jpeg stream data */
+ unsigned int stream_data_addr;
+ unsigned int stream_data_size;
+ /* for raw data */
+ unsigned int frame_data_addr;
+ unsigned int frame_data_size;
+};
+
+int jpeg_init_mem(struct device *dev, unsigned int *base);
+int jpeg_mem_free(void);
+unsigned long jpeg_get_stream_buf(unsigned long arg);
+unsigned long jpeg_get_frame_buf(unsigned long arg);
+void jpeg_set_stream_buf(unsigned int *str_buf, unsigned int base);
+void jpeg_set_frame_buf(unsigned int *fra_buf, unsigned int base);
+
+#if defined(CONFIG_S5P_SYSMMU_JPEG) && defined(CONFIG_S5P_VMEM)
+extern unsigned int *s5p_vmalloc(size_t size);
+extern void *s5p_getaddress(unsigned int cookie);
+extern void s5p_vfree(unsigned int cookie);
+#endif
+
+#endif /* __JPEG_MEM_H__ */
+
diff --git a/drivers/media/video/samsung/jpeg/jpeg_regs.c b/drivers/media/video/samsung/jpeg/jpeg_regs.c
new file mode 100644
index 0000000..d83326f
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_regs.c
@@ -0,0 +1,289 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_regs.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register interface file for jpeg driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#include <linux/io.h>
+#include <plat/regs_jpeg.h>
+
+#include "jpeg_regs.h"
+#include "jpeg_conf.h"
+
+void jpeg_sw_reset(void __iomem *base)
+{
+ writel(S5P_JPEG_SW_RESET_REG_ENABLE,
+ base + S5P_JPEG_SW_RESET_REG);
+
+ do {
+ writel(S5P_JPEG_SW_RESET_REG_ENABLE,
+ base + S5P_JPEG_SW_RESET_REG);
+ } while (((readl(base + S5P_JPEG_SW_RESET_REG))
+ & S5P_JPEG_SW_RESET_REG_ENABLE)
+ == S5P_JPEG_SW_RESET_REG_ENABLE);
+
+}
+
+void jpeg_set_clk_power_on(void __iomem *base)
+{
+ /* set jpeg clock register : power on */
+ writel(readl(base + S5P_JPEG_CLKCON_REG) |
+ (S5P_JPEG_CLKCON_REG_POWER_ON_ACTIVATE),
+ base + S5P_JPEG_CLKCON_REG);
+}
+
+void jpeg_set_mode(void __iomem *base, int mode)
+{
+ /* set jpeg mod register */
+ if (mode) {/* decode */
+ writel(readl(base + S5P_JPEG_MOD_REG) |
+ (S5P_JPEG_MOD_REG_PROC_DEC),
+ base + S5P_JPEG_MOD_REG);
+ } else {/* encode */
+ writel(readl(base + S5P_JPEG_MOD_REG) |
+ (S5P_JPEG_MOD_REG_PROC_ENC),
+ base + S5P_JPEG_MOD_REG);
+ }
+
+}
+
+void jpeg_set_dec_out_fmt(void __iomem *base,
+ enum jpeg_frame_format out_fmt)
+{
+ /* set jpeg deocde ouput format register */
+ writel(readl(base + S5P_JPEG_OUTFORM_REG) &
+ ~(S5P_JPEG_OUTFORM_REG_YCBCY420),
+ base + S5P_JPEG_OUTFORM_REG);
+ if (out_fmt == YUV_422) {
+ writel(readl(base + S5P_JPEG_OUTFORM_REG) |
+ (S5P_JPEG_OUTFORM_REG_YCBCY422),
+ base + S5P_JPEG_OUTFORM_REG);
+ } else { /* default YUV420 */
+ writel(readl(base + S5P_JPEG_OUTFORM_REG) |
+ (S5P_JPEG_OUTFORM_REG_YCBCY420),
+ base + S5P_JPEG_OUTFORM_REG);
+ }
+
+}
+
+void jpeg_set_enc_in_fmt(void __iomem *base,
+ enum jpeg_frame_format in_fmt)
+{
+ if (in_fmt == YUV_422) {
+ writel(readl(base + S5P_JPEG_CMOD_REG) |
+ (S5P_JPEG_CMOD_REG_MOD_SEL_YCBCR422),
+ base + S5P_JPEG_CMOD_REG);
+ } else {
+ writel(readl(base + S5P_JPEG_CMOD_REG) |
+ (S5P_JPEG_CMOD_REG_MOD_SEL_RGB),
+ base + S5P_JPEG_CMOD_REG);
+ }
+
+}
+
+void jpeg_set_enc_out_fmt(void __iomem *base,
+ enum jpeg_stream_format out_fmt)
+{
+ if (out_fmt == JPEG_422) {
+ writel(readl(base + S5P_JPEG_MOD_REG) |
+ (S5P_JPEG_MOD_REG_SUBSAMPLE_422),
+ base + S5P_JPEG_MOD_REG);
+ } else {
+ writel(readl(base + S5P_JPEG_MOD_REG) |
+ (S5P_JPEG_MOD_REG_SUBSAMPLE_420),
+ base + S5P_JPEG_MOD_REG);
+ }
+}
+
+void jpeg_set_enc_dri(void __iomem *base, unsigned int value)
+{
+ /* set JPEG Restart Interval */
+ writel(value, base + S5P_JPEG_DRI_L_REG);
+ writel((value >> 8), base + S5P_JPEG_DRI_U_REG);
+}
+
+void jpeg_set_enc_qtbl(void __iomem *base,
+ enum jpeg_img_quality_level level)
+{
+ /* set quantization table index for jpeg encode */
+ unsigned int val;
+ int i;
+
+ switch (level) {
+ case QUALITY_LEVEL_1:
+ val = S5P_JPEG_QHTBL_REG_QT_NUM1;
+ break;
+ case QUALITY_LEVEL_2:
+ val = S5P_JPEG_QHTBL_REG_QT_NUM2;
+ break;
+ case QUALITY_LEVEL_3:
+ val = S5P_JPEG_QHTBL_REG_QT_NUM3;
+ break;
+ case QUALITY_LEVEL_4:
+ val = S5P_JPEG_QHTBL_REG_QT_NUM4;
+ break;
+ default:
+ val = S5P_JPEG_QHTBL_REG_QT_NUM1;
+ break;
+ }
+ writel(val, base + S5P_JPEG_QTBL_REG);
+
+ for (i = 0; i < 64; i++) {
+ writel((unsigned int)qtbl_luminance[level][i],
+ base + S5P_JPEG_QTBL0_REG + (i*0x04));
+ }
+ for (i = 0; i < 64; i++) {
+ writel((unsigned int)qtbl_chrominance[level][i],
+ base + S5P_JPEG_QTBL1_REG + (i*0x04));
+ }
+
+}
+
+void jpeg_set_enc_htbl(void __iomem *base)
+{
+ int i;
+
+ /* set huffman table index for jpeg encode */
+ writel(0x00, base + S5P_JPEG_HTBL_REG);
+
+ for (i = 0; i < 16; i++) {
+ writel((unsigned int)hdctbl0[i],
+ base + S5P_JPEG_HDCTBL0_REG + (i*0x04));
+ }
+ for (i = 0; i < 12; i++) {
+ writel((unsigned int)hdctblg0[i],
+ base + S5P_JPEG_HDCTBLG0_REG + (i*0x04));
+ }
+ for (i = 0; i < 16; i++) {
+ writel((unsigned int)hactbl0[i],
+ base + S5P_JPEG_HACTBL0_REG + (i*0x04));
+ }
+ for (i = 0; i < 162; i++) {
+ writel((unsigned int)hactblg0[i],
+ base + S5P_JPEG_HACTBLG0_REG + (i*0x04));
+ }
+}
+
+void jpeg_set_enc_coef(void __iomem *base)
+{
+ /* set coefficient value for RGB-to-YCbCr */
+ writel(COEF1_RGB_2_YUV, base + S5P_JPEG_COEF1_REG);
+ writel(COEF2_RGB_2_YUV, base + S5P_JPEG_COEF2_REG);
+ writel(COEF3_RGB_2_YUV, base + S5P_JPEG_COEF3_REG);
+}
+
+void jpeg_set_frame_addr(void __iomem *base, unsigned int fra_addr)
+{
+ /* set the address of compressed input data */
+ writel(fra_addr, base + S5P_JPEG_IMGADR_REG);
+}
+
+void jpeg_set_stream_addr(void __iomem *base, unsigned int str_addr)
+{
+ /* set the address of compressed input data */
+ writel(str_addr, base + S5P_JPEG_JPGADR_REG);
+}
+
+void jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height)
+{
+ *width = (readl(base + S5P_JPEG_X_U_REG)<<8)|
+ readl(base + S5P_JPEG_X_L_REG);
+ *height = (readl(base + S5P_JPEG_Y_U_REG)<<8)|
+ readl(base + S5P_JPEG_Y_L_REG);
+}
+
+void jpeg_set_frame_size(void __iomem *base,
+ unsigned int width, unsigned int height)
+{
+ /* Horizontal resolution */
+ writel((width >> 8), base + S5P_JPEG_X_U_REG);
+ writel(width, base + S5P_JPEG_X_L_REG);
+
+ /* Vertical resolution */
+ writel((height >> 8), base + S5P_JPEG_Y_U_REG);
+ writel(height, base + S5P_JPEG_Y_L_REG);
+}
+
+enum jpeg_stream_format jpeg_get_stream_fmt(void __iomem *base)
+{
+ enum jpeg_stream_format out_fmt;
+ unsigned long jpeg_mode;
+
+ jpeg_mode = readl(base + S5P_JPEG_MOD_REG);
+
+ out_fmt =
+ ((jpeg_mode & 0x07) == 0x00) ? JPEG_444 :
+ ((jpeg_mode & 0x07) == 0x01) ? JPEG_422 :
+ ((jpeg_mode & 0x07) == 0x02) ? JPEG_420 :
+ ((jpeg_mode & 0x07) == 0x03) ? JPEG_GRAY : JPEG_RESERVED;
+
+ return out_fmt;
+
+}
+
+unsigned int jpeg_get_stream_size(void __iomem *base)
+{
+ unsigned int size;
+
+ size = readl(base + S5P_JPEG_CNT_U_REG) << 16;
+ size |= readl(base + S5P_JPEG_CNT_M_REG) << 8;
+ size |= readl(base + S5P_JPEG_CNT_L_REG);
+
+ return size;
+}
+
+void jpeg_start_decode(void __iomem *base)
+{
+ /* set jpeg interrupt */
+ writel(readl(base + S5P_JPEG_INTSE_REG) |
+ (S5P_JPEG_INTSE_REG_RSTM_INT_EN |
+ S5P_JPEG_INTSE_REG_DATA_NUM_INT_EN |
+ S5P_JPEG_INTSE_REG_FINAL_MCU_NUM_INT_EN),
+ base + S5P_JPEG_INTSE_REG);
+
+ /* start decoding */
+ writel(readl(base + S5P_JPEG_JRSTART_REG) |
+ S5P_JPEG_JRSTART_REG_ENABLE,
+ base + S5P_JPEG_JSTART_REG);
+}
+
+void jpeg_start_encode(void __iomem *base)
+{
+ /* set jpeg interrupt */
+ writel(readl(base + S5P_JPEG_INTSE_REG) |
+ (S5P_JPEG_INTSE_REG_RSTM_INT_EN |
+ S5P_JPEG_INTSE_REG_DATA_NUM_INT_EN |
+ S5P_JPEG_INTSE_REG_FINAL_MCU_NUM_INT_EN),
+ base + S5P_JPEG_INTSE_REG);
+
+ /* start encoding */
+ writel(readl(base + S5P_JPEG_JSTART_REG) |
+ S5P_JPEG_JSTART_REG_ENABLE,
+ base + S5P_JPEG_JSTART_REG);
+}
+
+unsigned int jpeg_get_int_status(void __iomem *base)
+{
+ unsigned int int_status;
+ unsigned int status;
+
+ int_status = readl(base + S5P_JPEG_INTST_REG);
+
+ do {
+ status = readl(base + S5P_JPEG_OPR_REG);
+ } while (status);
+
+ return int_status;
+}
+
+void jpeg_clear_int(void __iomem *base)
+{
+ writel(S5P_JPEG_COM_INT_RELEASE, base + S5P_JPEG_COM_REG);
+}
+
diff --git a/drivers/media/video/samsung/jpeg/jpeg_regs.h b/drivers/media/video/samsung/jpeg/jpeg_regs.h
new file mode 100644
index 0000000..93ac2d0
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg/jpeg_regs.h
@@ -0,0 +1,46 @@
+/* linux/drivers/media/video/samsung/jpeg/jpeg_regs.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file of the register interface for jpeg driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __JPEG_REGS_H__
+#define __JPEG_REGS_H__
+
+#include "jpeg_core.h"
+
+void jpeg_sw_reset(void __iomem *base);
+void jpeg_set_clk_power_on(void __iomem *base);
+void jpeg_set_mode(void __iomem *base, int mode);
+void jpeg_set_dec_out_fmt(void __iomem *base,
+ enum jpeg_frame_format out_fmt);
+void jpeg_set_enc_in_fmt(void __iomem *base,
+ enum jpeg_frame_format in_fmt);
+void jpeg_set_enc_out_fmt(void __iomem *base,
+ enum jpeg_stream_format out_fmt);
+void jpeg_set_enc_dri(void __iomem *base, unsigned int value);
+void jpeg_set_enc_qtbl(void __iomem *base,
+ enum jpeg_img_quality_level level);
+void jpeg_set_enc_htbl(void __iomem *base);
+void jpeg_set_enc_coef(void __iomem *base);
+void jpeg_set_frame_addr(void __iomem *base, unsigned int fra_addr);
+void jpeg_set_stream_addr(void __iomem *base, unsigned int str_addr);
+void jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height);
+void jpeg_set_frame_size(void __iomem *base,
+ unsigned int width, unsigned int height);
+enum jpeg_stream_format jpeg_get_stream_fmt(void __iomem *base);
+unsigned int jpeg_get_stream_size(void __iomem *base);
+void jpeg_start_decode(void __iomem *base);
+void jpeg_start_encode(void __iomem *base);
+unsigned int jpeg_get_int_status(void __iomem *base);
+void jpeg_clear_int(void __iomem *base);
+
+#endif /* __JPEG_REGS_H__ */
+
diff --git a/drivers/media/video/samsung/jpeg_v2x/Kconfig b/drivers/media/video/samsung/jpeg_v2x/Kconfig
new file mode 100644
index 0000000..2738ca3
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/Kconfig
@@ -0,0 +1,29 @@
+#
+# Configuration for JPEG
+#
+
+config VIDEO_JPEG_V2X
+ bool "Samsung JPEG_v2.x driver"
+ depends on VIDEO_SAMSUNG
+ default n
+ depends on VIDEO_DEV && VIDEO_V4L2
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ This is a Samsung JPEG H/W driver for V2.x
+choice
+ prompt "JPEG V2X VERSION"
+ default S5P_JPEG_V2_1
+ depends on VIDEO_JPEG_V2X
+ ---help---
+ Select version of JPEG driver
+
+config JPEG_V2_1
+ bool "JPEG 2.1"
+ ---help---
+ Use JPEG 2.1 Pegasus/Gaia evt 0.0
+
+config JPEG_V2_2
+ bool "JPEG 2.2"
+ ---help---
+ Use JPEG 2.2 Gaia evt 1.0
+endchoice
diff --git a/drivers/media/video/samsung/jpeg_v2x/Makefile b/drivers/media/video/samsung/jpeg_v2x/Makefile
new file mode 100644
index 0000000..369ee75
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/Makefile
@@ -0,0 +1,8 @@
+#################################################
+# Makefile for JPEG
+# 2009 (C) Samsung Electronics
+#################################################
+
+obj-$(CONFIG_VIDEO_JPEG_V2X) += jpeg_dev.o jpeg_dec.o jpeg_enc.o jpeg_regs.o jpeg_mem.o
+
+EXTRA_CFLAGS += -Idrivers/media/video
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_conf.h b/drivers/media/video/samsung/jpeg_v2x/jpeg_conf.h
new file mode 100644
index 0000000..6fcc276
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_conf.h
@@ -0,0 +1,92 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_conf.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Definition Quantization Table for Jpeg encoder/docoder
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __JPEG_CONF_H__
+#define __JPEG_CONF_H__
+
+/* Q-table for JPEG */
+/* ITU standard Q-table */
+const unsigned int ITU_Q_tbl[4][16] = {
+ {
+ 0x01010101, 0x01020303, 0x01010101, 0x01030303, /* Y */
+ 0x01010101, 0x02030303, 0x01010101, 0x03040403,
+ 0x01010203, 0x03050504, 0x01020303, 0x04050605,
+ 0x02030404, 0x05060605, 0x04050505, 0x06050505
+ } , {
+ 0x01010102, 0x05050505, 0x01010103, 0x05050505, /* CbCr */
+ 0x01010503, 0x05050505, 0x02030505, 0x05050505,
+ 0x05050505, 0x05050505, 0x05050505, 0x05050505,
+ 0x05050505, 0x05050505, 0x05050505, 0x05050505
+ } , {
+ 0x05020205, 0x0a161e25, 0x02020307, 0x0c232521, /* Y */
+ 0x0302050a, 0x16222b22, 0x0305090e, 0x1e393326,
+ 0x06091422, 0x2a384431, 0x0a122118, 0x34454b3c,
+ 0x1d283238, 0x44525142, 0x2d3c3e40, 0x4a424441
+ } , {
+ 0x05020205, 0x251e160a, 0x07030202, 0x2125230c, /* CbCr */
+ 0x0a050203, 0x222b2216, 0x0e090503, 0x2633391e,
+ 0x22140906, 0x3144382a, 0x1821120a, 0x3c4b4534,
+ 0x3832281d, 0x42515244, 0x403e3c2d, 0x4144424a
+ }
+};
+
+/* ITU Luminace Huffman Table */
+static unsigned int ITU_H_tbl_len_DC_luminance[4] = {
+ 0x01050100, 0x01010101, 0x00000001, 0x00000000
+};
+static unsigned int ITU_H_tbl_val_DC_luminance[3] = {
+ 0x03020100, 0x07060504, 0x0b0a0908
+};
+
+/* ITU Chrominace Huffman Table */
+static unsigned int ITU_H_tbl_len_DC_chrominance[4] = {
+ 0x01010300, 0x01010101, 0x00010101, 0x00000000
+};
+static unsigned int ITU_H_tbl_val_DC_chrominance[3] = {
+ 0x03020100, 0x07060504, 0x0b0a0908
+};
+
+static unsigned int ITU_H_tbl_len_AC_luminance[4] = {
+ 0x03010200, 0x03040203, 0x04040505, 0x7d010000
+};
+
+static unsigned int ITU_H_tbl_val_AC_luminance[41] = {
+ 0x00030201, 0x12051104, 0x06413121, 0x07615113,
+ 0x32147122, 0x08a19181, 0xc1b14223, 0xf0d15215,
+ 0x72623324, 0x160a0982, 0x1a191817, 0x28272625,
+ 0x35342a29, 0x39383736, 0x4544433a, 0x49484746,
+ 0x5554534a, 0x59585756, 0x6564635a, 0x69686766,
+ 0x7574736a, 0x79787776, 0x8584837a, 0x89888786,
+ 0x9493928a, 0x98979695, 0xa3a29a99, 0xa7a6a5a4,
+ 0xb2aaa9a8, 0xb6b5b4b3, 0xbab9b8b7, 0xc5c4c3c2,
+ 0xc9c8c7c6, 0xd4d3d2ca, 0xd8d7d6d5, 0xe2e1dad9,
+ 0xe6e5e4e3, 0xeae9e8e7, 0xf4f3f2f1, 0xf8f7f6f5,
+ 0x0000faf9
+};
+
+static u32 ITU_H_tbl_len_AC_chrominance[4] = {
+ 0x02010200, 0x04030404, 0x04040507, 0x77020100
+};
+static u32 ITU_H_tbl_val_AC_chrominance[41] = {
+ 0x03020100, 0x21050411, 0x41120631, 0x71610751,
+ 0x81322213, 0x91421408, 0x09c1b1a1, 0xf0523323,
+ 0xd1726215, 0x3424160a, 0x17f125e1, 0x261a1918,
+ 0x2a292827, 0x38373635, 0x44433a39, 0x48474645,
+ 0x54534a49, 0x58575655, 0x64635a59, 0x68676665,
+ 0x74736a69, 0x78777675, 0x83827a79, 0x87868584,
+ 0x928a8988, 0x96959493, 0x9a999897, 0xa5a4a3a2,
+ 0xa9a8a7a6, 0xb4b3b2aa, 0xb8b7b6b5, 0xc3c2bab9,
+ 0xc7c6c5c4, 0xd2cac9c8, 0xd6d5d4d3, 0xdad9d8d7,
+ 0xe5e4e3e2, 0xe9e8e7e6, 0xf4f3f2ea, 0xf8f7f6f5,
+ 0x0000faf9
+};
+#endif /* __JPEG_CONF_H__ */
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_core.h b/drivers/media/video/samsung/jpeg_v2x/jpeg_core.h
new file mode 100644
index 0000000..8208235
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_core.h
@@ -0,0 +1,278 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_core.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Definition for core file of the jpeg operation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __JPEG_CORE_H__
+#define __JPEG_CORE_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+
+#include <linux/videodev2.h>
+#include <linux/videodev2_exynos_media.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-ioctl.h>
+
+#include <media/videobuf2-core.h>
+#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
+#include <media/videobuf2-cma-phys.h>
+#elif defined(CONFIG_VIDEOBUF2_ION)
+#include <media/videobuf2-ion.h>
+#endif
+#include "jpeg_mem.h"
+
+#define INT_TIMEOUT 1000
+
+#define JPEG_NUM_INST 4
+#define JPEG_MAX_PLANE 3
+
+enum jpeg_state {
+ JPEG_IDLE,
+ JPEG_SRC_ADDR,
+ JPEG_DST_ADDR,
+ JPEG_ISR,
+ JPEG_STREAM,
+};
+
+enum jpeg_mode {
+ ENCODING,
+ DECODING,
+};
+
+enum jpeg_result {
+ OK_ENC_OR_DEC,
+ ERR_PROT,
+ ERR_DEC_INVALID_FORMAT,
+ ERR_MULTI_SCAN,
+ ERR_FRAME,
+ ERR_TIME_OUT,
+ ERR_UNKNOWN,
+};
+
+enum jpeg_img_quality_level {
+ QUALITY_LEVEL_1 = 0, /* high */
+ QUALITY_LEVEL_2,
+ QUALITY_LEVEL_3,
+ QUALITY_LEVEL_4, /* low */
+};
+
+/* raw data image format */
+enum jpeg_frame_format {
+ YCRCB_444_2P,
+ YCBCR_444_2P,
+ YCBCR_444_3P,
+ YCBYCR_422_1P,
+ YCRYCB_422_1P,
+ CBYCRY_422_1P,
+ CRYCBY_422_1P,
+ YCBCR_422_2P,
+ YCRCB_422_2P,
+ YCBYCR_422_3P,
+ YCBCR_420_3P,
+ YCRCB_420_3P,
+ YCBCR_420_2P,
+ YCRCB_420_2P,
+ YCBCR_420_2P_M,
+ YCRCB_420_2P_M,
+ RGB_565,
+ RGB_888,
+ BGR_888,
+ GRAY,
+};
+
+/* jpeg data format */
+enum jpeg_stream_format {
+ JPEG_422, /* decode input, encode output */
+ JPEG_420, /* decode input, encode output */
+ JPEG_444, /* decode input*/
+ JPEG_GRAY, /* decode input*/
+ JPEG_RESERVED,
+};
+
+enum jpeg_scale_value {
+ JPEG_SCALE_NORMAL,
+ JPEG_SCALE_2,
+ JPEG_SCALE_4,
+};
+
+enum jpeg_interface {
+ M2M_OUTPUT,
+ M2M_CAPTURE,
+};
+
+enum jpeg_node_type {
+ JPEG_NODE_INVALID = -1,
+ JPEG_NODE_DECODER = 11,
+ JPEG_NODE_ENCODER = 12,
+};
+
+struct jpeg_fmt {
+ char *name;
+ unsigned int fourcc;
+ int depth[JPEG_MAX_PLANE];
+ int color;
+ int memplanes;
+ int colplanes;
+ enum jpeg_interface types;
+};
+
+struct jpeg_dec_param {
+ unsigned int in_width;
+ unsigned int in_height;
+ unsigned int out_width;
+ unsigned int out_height;
+ unsigned int size;
+ unsigned int mem_size;
+ unsigned int in_plane;
+ unsigned int out_plane;
+ unsigned int in_depth;
+ unsigned int out_depth[JPEG_MAX_PLANE];
+
+ enum jpeg_stream_format in_fmt;
+ enum jpeg_frame_format out_fmt;
+};
+
+struct jpeg_enc_param {
+ unsigned int in_width;
+ unsigned int in_height;
+ unsigned int out_width;
+ unsigned int out_height;
+ unsigned int size;
+ unsigned int in_plane;
+ unsigned int out_plane;
+ unsigned int in_depth[JPEG_MAX_PLANE];
+ unsigned int out_depth;
+
+ enum jpeg_frame_format in_fmt;
+ enum jpeg_stream_format out_fmt;
+ enum jpeg_img_quality_level quality;
+};
+
+struct jpeg_ctx {
+ spinlock_t slock;
+ struct jpeg_dev *dev;
+ struct v4l2_m2m_ctx *m2m_ctx;
+
+ union {
+ struct jpeg_dec_param dec_param;
+ struct jpeg_enc_param enc_param;
+ } param;
+
+ int index;
+ unsigned long payload[VIDEO_MAX_PLANES];
+ bool input_cacheable;
+ bool output_cacheable;
+};
+
+struct jpeg_vb2 {
+ const struct vb2_mem_ops *ops;
+ void *(*init)(struct jpeg_dev *dev);
+ void (*cleanup)(void *alloc_ctx);
+
+ unsigned long (*plane_addr)(struct vb2_buffer *vb, u32 plane_no);
+
+ int (*resume)(void *alloc_ctx);
+ void (*suspend)(void *alloc_ctx);
+
+ int (*cache_flush)(struct vb2_buffer *vb, u32 num_planes);
+ void (*set_cacheable)(void *alloc_ctx, bool cacheable);
+};
+
+struct jpeg_dev {
+ spinlock_t slock;
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_enc;
+ struct video_device *vfd_dec;
+ struct v4l2_m2m_dev *m2m_dev_enc;
+ struct v4l2_m2m_dev *m2m_dev_dec;
+ struct jpeg_ctx *ctx;
+ struct vb2_alloc_ctx *alloc_ctx;
+
+ struct platform_device *plat_dev;
+
+ struct clk *clk;
+
+ struct mutex lock;
+
+ int irq_no;
+ enum jpeg_result irq_ret;
+ wait_queue_head_t wq;
+ void __iomem *reg_base; /* register i/o */
+ enum jpeg_mode mode;
+ const struct jpeg_vb2 *vb2;
+
+ unsigned long hw_run;
+ atomic_t watchdog_cnt;
+ struct timer_list watchdog_timer;
+ struct workqueue_struct *watchdog_workqueue;
+ struct work_struct watchdog_work;
+ struct device *bus_dev;
+};
+
+enum jpeg_log {
+ JPEG_LOG_DEBUG = 0x1000,
+ JPEG_LOG_INFO = 0x0100,
+ JPEG_LOG_WARN = 0x0010,
+ JPEG_LOG_ERR = 0x0001,
+};
+
+/* debug macro */
+#define JPEG_LOG_DEFAULT (JPEG_LOG_WARN | JPEG_LOG_ERR)
+
+#define JPEG_DEBUG(fmt, ...) \
+ do { \
+ if (JPEG_LOG_DEFAULT & JPEG_LOG_DEBUG) \
+ printk(KERN_DEBUG "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define JPEG_INFO(fmt, ...) \
+ do { \
+ if (JPEG_LOG_DEFAULT & JPEG_LOG_INFO) \
+ printk(KERN_INFO "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define JPEG_WARN(fmt, ...) \
+ do { \
+ if (JPEG_LOG_DEFAULT & JPEG_LOG_WARN) \
+ printk(KERN_WARNING "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define JPEG_ERROR(fmt, ...) \
+ do { \
+ if (JPEG_LOG_DEFAULT & JPEG_LOG_ERR) \
+ printk(KERN_ERR "%s: " \
+ fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+#define jpeg_dbg(fmt, ...) JPEG_DEBUG(fmt, ##__VA_ARGS__)
+#define jpeg_info(fmt, ...) JPEG_INFO(fmt, ##__VA_ARGS__)
+#define jpeg_warn(fmt, ...) JPEG_WARN(fmt, ##__VA_ARGS__)
+#define jpeg_err(fmt, ...) JPEG_ERROR(fmt, ##__VA_ARGS__)
+
+/*=====================================================================*/
+const struct v4l2_ioctl_ops *get_jpeg_dec_v4l2_ioctl_ops(void);
+const struct v4l2_ioctl_ops *get_jpeg_enc_v4l2_ioctl_ops(void);
+
+int jpeg_int_pending(struct jpeg_dev *ctrl);
+
+#endif /*__JPEG_CORE_H__*/
+
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_dec.c b/drivers/media/video/samsung/jpeg_v2x/jpeg_dec.c
new file mode 100644
index 0000000..7020f39
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_dec.c
@@ -0,0 +1,542 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_dec.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Core file for Samsung Jpeg v2.x Interface driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/time.h>
+#include <linux/clk.h>
+#include <linux/semaphore.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <asm/page.h>
+
+#include <plat/regs_jpeg_v2_x.h>
+#include <mach/irqs.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "jpeg_core.h"
+#include "jpeg_dev.h"
+
+#include "jpeg_mem.h"
+#include "jpeg_regs.h"
+
+static struct jpeg_fmt formats[] = {
+ {
+ .name = "JPEG compressed format",
+ .fourcc = V4L2_PIX_FMT_JPEG_444,
+ .depth = {8},
+ .color = JPEG_444,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "JPEG compressed format",
+ .fourcc = V4L2_PIX_FMT_JPEG_422,
+ .depth = {8},
+ .color = JPEG_422,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "JPEG compressed format",
+ .fourcc = V4L2_PIX_FMT_JPEG_420,
+ .depth = {8},
+ .color = JPEG_420,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "JPEG compressed format",
+ .fourcc = V4L2_PIX_FMT_JPEG_GRAY,
+ .depth = {8},
+ .color = JPEG_GRAY,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = {16},
+ .color = RGB_565,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "XRGB-8-8-8-8, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = {32},
+ .color = RGB_888,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:4:4 packed, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_YUV444_2P,
+ .depth = {8, 16},
+ .color = YCBCR_444_2P,
+ .memplanes = 2,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:4:4 packed, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_YVU444_2P,
+ .depth = {8, 16},
+ .color = YCRCB_444_2P,
+ .memplanes = 2,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:4:4 packed, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV444_3P,
+ .depth = {8, 8, 8},
+ .color = YCBCR_444_3P,
+ .memplanes = 3,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = {16},
+ .color = YCRYCB_422_1P,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = {16},
+ .color = YCBYCR_422_1P,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = {8, 8},
+ .color = YCRCB_422_2P,
+ .memplanes = 2,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = {8, 8},
+ .color = YCBCR_422_2P,
+ .memplanes = 2,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = {8, 4},
+ .color = YCBCR_420_2P,
+ .memplanes = 2,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .depth = {8, 4},
+ .color = YCRCB_420_2P,
+ .memplanes = 2,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = {8, 2, 2},
+ .color = YCBCR_420_3P,
+ .memplanes = 3,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cr/Cb",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .depth = {8, 2, 2},
+ .color = YCRCB_420_3P,
+ .memplanes = 3,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "Gray",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = {8},
+ .color = GRAY,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ },
+#ifdef CONFIG_JPEG_V2_2
+ {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = {16},
+ .color = CRYCBY_422_1P,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = {16},
+ .color = CRYCBY_422_1P,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "XBGR-8-8-8-8, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .depth = {32},
+ .color = BGR_888,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ },
+#endif
+};
+
+static struct jpeg_fmt *find_format(struct v4l2_format *f)
+{
+ struct jpeg_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ fmt = &formats[i];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ break;
+ }
+
+ return (i == ARRAY_SIZE(formats)) ? NULL : fmt;
+}
+
+static int jpeg_dec_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct jpeg_ctx *ctx = file->private_data;
+ struct jpeg_dev *dev = ctx->dev;
+
+ strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ return 0;
+}
+
+int jpeg_dec_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct jpeg_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->color;
+
+ return 0;
+}
+
+int jpeg_dec_vidioc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct v4l2_pix_format_mplane *pixm;
+ struct jpeg_dec_param *dec_param = &ctx->param.dec_param;
+ unsigned int width, height;
+
+ pixm = &f->fmt.pix_mp;
+
+ pixm->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ pixm->pixelformat = dec_param->in_fmt;
+ pixm->num_planes = dec_param->in_plane;
+ pixm->width = dec_param->in_width;
+ pixm->height = dec_param->in_height;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ jpeg_get_frame_size(ctx->dev->reg_base, &width, &height);
+ pixm->pixelformat =
+ dec_param->out_fmt;
+ pixm->num_planes = dec_param->out_plane;
+ pixm->width = width;
+ pixm->height = height;
+ } else {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Wrong buffer/video queue type (%d)\n", f->type);
+ }
+
+ return 0;
+}
+
+static int jpeg_dec_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct jpeg_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct jpeg_ctx *ctx = priv;
+ int i;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ fmt = find_format(f);
+
+ if (!fmt) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != pix->field)
+ return -EINVAL;
+
+ pix->num_planes = fmt->memplanes;
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ int bpl = pix->plane_fmt[i].bytesperline;
+
+ jpeg_dbg("[%d] bpl: %d, depth: %d, w: %d, h: %d",
+ i, bpl, fmt->depth[i], pix->width, pix->height);
+ if (!bpl || (bpl * 8 / fmt->depth[i]) > pix->width)
+ bpl = (pix->width * fmt->depth[i]) >> 3;
+
+ if (!pix->plane_fmt[i].sizeimage)
+ pix->plane_fmt[i].sizeimage = pix->height * bpl;
+
+ pix->plane_fmt[i].bytesperline = bpl;
+
+ jpeg_dbg("[%d]: bpl: %d, sizeimage: %d",
+ i, pix->plane_fmt[i].bytesperline,
+ pix->plane_fmt[i].sizeimage);
+ }
+
+ if (f->fmt.pix.height > MAX_JPEG_HEIGHT)
+ f->fmt.pix.height = MAX_JPEG_HEIGHT;
+
+ if (f->fmt.pix.width > MAX_JPEG_WIDTH)
+ f->fmt.pix.width = MAX_JPEG_WIDTH;
+
+ return 0;
+}
+
+static int jpeg_dec_vidioc_s_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct vb2_queue *vq;
+ struct v4l2_pix_format_mplane *pix;
+ struct jpeg_fmt *fmt;
+ int ret;
+ int i;
+
+ ret = jpeg_dec_vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ /* TODO: width & height has to be multiple of two */
+ pix = &f->fmt.pix_mp;
+ fmt = find_format(f);
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ ctx->payload[i] =
+ pix->plane_fmt[i].bytesperline * pix->height;
+ ctx->param.dec_param.out_depth[i] = fmt->depth[i];
+ }
+ ctx->param.dec_param.out_width = pix->width;
+ ctx->param.dec_param.out_height = pix->height;
+ ctx->param.dec_param.out_plane = fmt->memplanes;
+ ctx->param.dec_param.out_fmt = fmt->color;
+
+ return 0;
+}
+
+static int jpeg_dec_vidioc_s_fmt_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct vb2_queue *vq;
+ struct v4l2_pix_format_mplane *pix;
+ struct jpeg_fmt *fmt;
+ int ret;
+ int i;
+
+ ret = jpeg_dec_vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ /* TODO: width & height has to be multiple of two */
+ pix = &f->fmt.pix_mp;
+ fmt = find_format(f);
+
+ for (i = 0; i < fmt->memplanes; i++)
+ ctx->payload[i] =
+ pix->plane_fmt[i].bytesperline * pix->height;
+
+ ctx->param.dec_param.in_width = pix->width;
+ ctx->param.dec_param.in_height = pix->height;
+ ctx->param.dec_param.in_plane = fmt->memplanes;
+ ctx->param.dec_param.in_depth = fmt->depth[0];
+ ctx->param.dec_param.in_fmt = fmt->color;
+ if((pix->plane_fmt[0].sizeimage % 32) == 0)
+ ctx->param.dec_param.size = (pix->plane_fmt[0].sizeimage / 32);
+ else
+ ctx->param.dec_param.size = (pix->plane_fmt[0].sizeimage / 32) + 1;
+ ctx->param.dec_param.mem_size = pix->plane_fmt[0].sizeimage;
+
+ return 0;
+}
+
+static int jpeg_dec_m2m_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, reqbufs->type);
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ ctx->dev->vb2->set_cacheable(ctx->dev->alloc_ctx, ctx->input_cacheable);
+ else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ctx->dev->vb2->set_cacheable(ctx->dev->alloc_ctx, ctx->output_cacheable);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int jpeg_dec_m2m_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int jpeg_dec_m2m_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int jpeg_dec_m2m_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int jpeg_dec_m2m_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int jpeg_dec_m2m_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int vidioc_dec_s_jpegcomp(struct file *file, void *priv,
+ struct v4l2_jpegcompression *jpegcomp)
+{
+ struct jpeg_ctx *ctx = priv;
+ ctx->param.enc_param.quality = jpegcomp->quality;
+ return 0;
+}
+
+static int jpeg_dec_vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct jpeg_ctx *ctx = priv;
+/*
+* 0 : input/output noncacheable
+* 1 : input/output cacheable
+* 2 : input cacheable / output noncacheable
+* 3 : input noncacheable / output cacheable
+*/
+ switch (ctrl->id) {
+ case V4L2_CID_CACHEABLE:
+ if (ctrl->value == 0) {
+ ctx->input_cacheable = 0;
+ ctx->output_cacheable = 0;
+ } else if (ctrl->value == 1) {
+ ctx->input_cacheable = 1;
+ ctx->output_cacheable = 1;
+ } else if (ctrl->value == 2) {
+ ctx->input_cacheable = 1;
+ ctx->output_cacheable = 0;
+ } else if (ctrl->value == 3) {
+ ctx->input_cacheable = 0;
+ ctx->output_cacheable = 1;
+ } else {
+ ctx->input_cacheable = 0;
+ ctx->output_cacheable = 0;
+ }
+ break;
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops jpeg_dec_ioctl_ops = {
+ .vidioc_querycap = jpeg_dec_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = jpeg_dec_vidioc_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = jpeg_dec_vidioc_enum_fmt,
+
+ .vidioc_g_fmt_vid_cap_mplane = jpeg_dec_vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = jpeg_dec_vidioc_g_fmt,
+
+ .vidioc_try_fmt_vid_cap_mplane = jpeg_dec_vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = jpeg_dec_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = jpeg_dec_vidioc_s_fmt_cap,
+ .vidioc_s_fmt_vid_out_mplane = jpeg_dec_vidioc_s_fmt_out,
+
+ .vidioc_reqbufs = jpeg_dec_m2m_reqbufs,
+ .vidioc_querybuf = jpeg_dec_m2m_querybuf,
+ .vidioc_qbuf = jpeg_dec_m2m_qbuf,
+ .vidioc_dqbuf = jpeg_dec_m2m_dqbuf,
+ .vidioc_streamon = jpeg_dec_m2m_streamon,
+ .vidioc_streamoff = jpeg_dec_m2m_streamoff,
+ .vidioc_s_jpegcomp = vidioc_dec_s_jpegcomp,
+ .vidioc_s_ctrl = jpeg_dec_vidioc_s_ctrl,
+};
+const struct v4l2_ioctl_ops *get_jpeg_dec_v4l2_ioctl_ops(void)
+{
+ return &jpeg_dec_ioctl_ops;
+}
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.c b/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.c
new file mode 100644
index 0000000..1e6b085
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.c
@@ -0,0 +1,1122 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Core file for Samsung Jpeg v2.x Interface driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/time.h>
+#include <linux/clk.h>
+#include <linux/semaphore.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <asm/page.h>
+
+#include <plat/regs_jpeg_v2_x.h>
+#include <plat/cpu.h>
+#include <mach/irqs.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+
+#include <media/v4l2-ioctl.h>
+#include <mach/dev.h>
+
+#include "jpeg_core.h"
+#include "jpeg_dev.h"
+
+#include "jpeg_mem.h"
+#include "jpeg_regs.h"
+
+#if defined (CONFIG_JPEG_V2_1)
+void jpeg_watchdog(unsigned long arg)
+{
+ struct jpeg_dev *dev = (struct jpeg_dev *)arg;
+
+ printk(KERN_DEBUG "jpeg_watchdog\n");
+ if (test_bit(0, &dev->hw_run)) {
+ atomic_inc(&dev->watchdog_cnt);
+ printk(KERN_DEBUG "jpeg_watchdog_count.\n");
+ }
+
+ if (atomic_read(&dev->watchdog_cnt) >= JPEG_WATCHDOG_CNT)
+ queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
+
+ dev->watchdog_timer.expires = jiffies +
+ msecs_to_jiffies(JPEG_WATCHDOG_INTERVAL);
+ add_timer(&dev->watchdog_timer);
+}
+
+static void jpeg_watchdog_worker(struct work_struct *work)
+{
+ struct jpeg_dev *dev;
+ struct jpeg_ctx *ctx;
+ unsigned long flags;
+ struct vb2_buffer *src_vb, *dst_vb;
+
+ printk(KERN_DEBUG "jpeg_watchdog_worker\n");
+ dev = container_of(work, struct jpeg_dev, watchdog_work);
+
+ clear_bit(0, &dev->hw_run);
+ if (dev->mode == ENCODING)
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev_enc);
+ else
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev_dec);
+
+ if (ctx) {
+ spin_lock_irqsave(&ctx->slock, flags);
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ if (dev->mode == ENCODING)
+ v4l2_m2m_job_finish(dev->m2m_dev_enc, ctx->m2m_ctx);
+ else
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ spin_unlock_irqrestore(&ctx->slock, flags);
+ } else {
+ printk(KERN_ERR "watchdog_ctx is NULL\n");
+ }
+}
+#endif
+static int jpeg_dec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *allocators[])
+{
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+
+ int i;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ *num_planes = ctx->param.dec_param.in_plane;
+ for (i = 0; i < ctx->param.dec_param.in_plane; i++) {
+ sizes[i] = ctx->param.dec_param.mem_size;
+ allocators[i] = ctx->dev->alloc_ctx;
+ }
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ *num_planes = ctx->param.dec_param.out_plane;
+ for (i = 0; i < ctx->param.dec_param.out_plane; i++) {
+ sizes[i] = (ctx->param.dec_param.out_width *
+ ctx->param.dec_param.out_height *
+ ctx->param.dec_param.out_depth[i]) / 8;
+ allocators[i] = ctx->dev->alloc_ctx;
+ }
+ }
+
+ return 0;
+}
+
+static int jpeg_dec_buf_prepare(struct vb2_buffer *vb)
+{
+ int i;
+ int num_plane = 0;
+
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ num_plane = ctx->param.dec_param.in_plane;
+ if (ctx->input_cacheable == 1)
+ ctx->dev->vb2->cache_flush(vb, num_plane);
+ } else if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ num_plane = ctx->param.dec_param.out_plane;
+ if (ctx->output_cacheable == 1)
+ ctx->dev->vb2->cache_flush(vb, num_plane);
+ }
+
+ for (i = 0; i < num_plane; i++)
+ vb2_set_plane_payload(vb, i, ctx->payload[i]);
+
+ return 0;
+}
+
+static void jpeg_dec_buf_queue(struct vb2_buffer *vb)
+{
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (ctx->m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void jpeg_dec_lock(struct vb2_queue *vq)
+{
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_lock(&ctx->dev->lock);
+}
+
+static void jpeg_dec_unlock(struct vb2_queue *vq)
+{
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_unlock(&ctx->dev->lock);
+}
+
+static int jpeg_dec_stop_streaming(struct vb2_queue *q)
+{
+ struct jpeg_ctx *ctx = q->drv_priv;
+ struct jpeg_dev *dev = ctx->dev;
+
+ v4l2_m2m_get_next_job(dev->m2m_dev_dec, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static int jpeg_enc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned long sizes[],
+ void *allocators[])
+{
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+
+ int i;
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ *num_planes = ctx->param.enc_param.in_plane;
+ for (i = 0; i < ctx->param.enc_param.in_plane; i++) {
+ sizes[i] = (ctx->param.enc_param.in_width *
+ ctx->param.enc_param.in_height *
+ ctx->param.enc_param.in_depth[i]) / 8;
+ allocators[i] = ctx->dev->alloc_ctx;
+ }
+
+ } else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ *num_planes = ctx->param.enc_param.out_plane;
+ for (i = 0; i < ctx->param.enc_param.in_plane; i++) {
+ sizes[i] = (ctx->param.enc_param.out_width *
+ ctx->param.enc_param.out_height *
+ ctx->param.enc_param.out_depth * 22 / 10) / 8;
+ allocators[i] = ctx->dev->alloc_ctx;
+ }
+ }
+
+ return 0;
+}
+
+static int jpeg_enc_buf_prepare(struct vb2_buffer *vb)
+{
+ int i;
+ int num_plane = 0;
+
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ num_plane = ctx->param.enc_param.in_plane;
+ if (ctx->input_cacheable == 1)
+ ctx->dev->vb2->cache_flush(vb, num_plane);
+ } else if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ num_plane = ctx->param.enc_param.out_plane;
+ if (ctx->output_cacheable == 1)
+ ctx->dev->vb2->cache_flush(vb, num_plane);
+ }
+
+ for (i = 0; i < num_plane; i++)
+ vb2_set_plane_payload(vb, i, ctx->payload[i]);
+
+ return 0;
+}
+
+static void jpeg_enc_buf_queue(struct vb2_buffer *vb)
+{
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (ctx->m2m_ctx)
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+static void jpeg_enc_lock(struct vb2_queue *vq)
+{
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_lock(&ctx->dev->lock);
+}
+
+static void jpeg_enc_unlock(struct vb2_queue *vq)
+{
+ struct jpeg_ctx *ctx = vb2_get_drv_priv(vq);
+ mutex_unlock(&ctx->dev->lock);
+}
+
+static int jpeg_enc_stop_streaming(struct vb2_queue *q)
+{
+ struct jpeg_ctx *ctx = q->drv_priv;
+ struct jpeg_dev *dev = ctx->dev;
+
+ v4l2_m2m_get_next_job(dev->m2m_dev_enc, ctx->m2m_ctx);
+
+ return 0;
+}
+
+static struct vb2_ops jpeg_enc_vb2_qops = {
+ .queue_setup = jpeg_enc_queue_setup,
+ .buf_prepare = jpeg_enc_buf_prepare,
+ .buf_queue = jpeg_enc_buf_queue,
+ .wait_prepare = jpeg_enc_lock,
+ .wait_finish = jpeg_enc_unlock,
+ .stop_streaming = jpeg_enc_stop_streaming,
+};
+
+static struct vb2_ops jpeg_dec_vb2_qops = {
+ .queue_setup = jpeg_dec_queue_setup,
+ .buf_prepare = jpeg_dec_buf_prepare,
+ .buf_queue = jpeg_dec_buf_queue,
+ .wait_prepare = jpeg_dec_lock,
+ .wait_finish = jpeg_dec_unlock,
+ .stop_streaming = jpeg_dec_stop_streaming,
+};
+
+static inline enum jpeg_node_type jpeg_get_node_type(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (!vdev) {
+ jpeg_err("failed to get video_device\n");
+ return JPEG_NODE_INVALID;
+ }
+
+ jpeg_dbg("video_device index: %d\n", vdev->num);
+
+ if (vdev->num == JPEG_NODE_DECODER)
+ return JPEG_NODE_DECODER;
+ else if (vdev->num == JPEG_NODE_ENCODER)
+ return JPEG_NODE_ENCODER;
+ else
+ return JPEG_NODE_INVALID;
+}
+
+static int queue_init_dec(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct jpeg_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &jpeg_dec_vb2_qops;
+ src_vq->mem_ops = ctx->dev->vb2->ops;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &jpeg_dec_vb2_qops;
+ dst_vq->mem_ops = ctx->dev->vb2->ops;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int queue_init_enc(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct jpeg_ctx *ctx = priv;
+ int ret;
+
+ memset(src_vq, 0, sizeof(*src_vq));
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &jpeg_enc_vb2_qops;
+ src_vq->mem_ops = ctx->dev->vb2->ops;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ memset(dst_vq, 0, sizeof(*dst_vq));
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &jpeg_enc_vb2_qops;
+ dst_vq->mem_ops = ctx->dev->vb2->ops;
+
+ return vb2_queue_init(dst_vq);
+}
+static int jpeg_m2m_open(struct file *file)
+{
+ struct jpeg_dev *dev = video_drvdata(file);
+ struct jpeg_ctx *ctx = NULL;
+ int ret = 0;
+ enum jpeg_node_type node;
+
+ node = jpeg_get_node_type(file);
+
+ if (node == JPEG_NODE_INVALID) {
+ jpeg_err("cannot specify node type\n");
+ ret = -ENOENT;
+ goto err_node_type;
+ }
+
+ ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->private_data = ctx;
+ ctx->dev = dev;
+
+ if (node == JPEG_NODE_DECODER)
+ ctx->m2m_ctx =
+ v4l2_m2m_ctx_init(dev->m2m_dev_dec, ctx,
+ queue_init_dec);
+ else
+ ctx->m2m_ctx =
+ v4l2_m2m_ctx_init(dev->m2m_dev_enc, ctx,
+ queue_init_enc);
+
+ if (IS_ERR(ctx->m2m_ctx)) {
+ int err = PTR_ERR(ctx->m2m_ctx);
+ kfree(ctx);
+ return err;
+ }
+
+#ifdef CONFIG_PM_RUNTIME
+#if defined (CONFIG_CPU_EXYNOS5250)
+ clk_enable(dev->clk);
+ dev->vb2->resume(dev->alloc_ctx);
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* lock bus frequency */
+ dev_lock(dev->bus_dev, &dev->plat_dev->dev, BUSFREQ_400MHZ);
+#endif
+#else
+ pm_runtime_get_sync(&dev->plat_dev->dev);
+#endif
+#endif
+
+ return 0;
+
+err_node_type:
+ kfree(ctx);
+ return ret;
+}
+
+static int jpeg_m2m_release(struct file *file)
+{
+ struct jpeg_ctx *ctx = file->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->dev->slock, flags);
+#ifdef CONFIG_JPEG_V2_1
+ if (test_bit(0, &ctx->dev->hw_run) == 0)
+ del_timer_sync(&ctx->dev->watchdog_timer);
+#endif
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ spin_unlock_irqrestore(&ctx->dev->slock, flags);
+
+#ifdef CONFIG_PM_RUNTIME
+#if defined (CONFIG_CPU_EXYNOS5250)
+ ctx->dev->vb2->suspend(ctx->dev->alloc_ctx);
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* Unlock bus frequency */
+ dev_unlock(ctx->dev->bus_dev, &ctx->dev->plat_dev->dev);
+#endif
+ clk_disable(ctx->dev->clk);
+#else
+ pm_runtime_put_sync(&ctx->dev->plat_dev->dev);
+#endif
+#endif
+ kfree(ctx);
+
+ return 0;
+}
+
+static unsigned int jpeg_m2m_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct jpeg_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+
+static int jpeg_m2m_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct jpeg_ctx *ctx = file->private_data;
+
+ return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
+}
+
+static const struct v4l2_file_operations jpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = jpeg_m2m_open,
+ .release = jpeg_m2m_release,
+ .poll = jpeg_m2m_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = jpeg_m2m_mmap,
+};
+
+static struct video_device jpeg_enc_videodev = {
+ .name = JPEG_ENC_NAME,
+ .fops = &jpeg_fops,
+ .minor = 12,
+ .release = video_device_release,
+};
+
+static struct video_device jpeg_dec_videodev = {
+ .name = JPEG_DEC_NAME,
+ .fops = &jpeg_fops,
+ .minor = 11,
+ .release = video_device_release,
+};
+
+static void jpeg_device_enc_run(void *priv)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct jpeg_dev *dev = ctx->dev;
+ struct jpeg_enc_param enc_param;
+ struct vb2_buffer *vb = NULL;
+ unsigned long flags;
+
+ dev = ctx->dev;
+ spin_lock_irqsave(&ctx->dev->slock, flags);
+
+ dev->mode = ENCODING;
+ enc_param = ctx->param.enc_param;
+
+ jpeg_sw_reset(dev->reg_base);
+ jpeg_set_interrupt(dev->reg_base);
+ jpeg_set_huf_table_enable(dev->reg_base, 1);
+ jpeg_set_enc_tbl(dev->reg_base);
+ jpeg_set_encode_tbl_select(dev->reg_base, enc_param.quality);
+ jpeg_set_stream_size(dev->reg_base,
+ enc_param.in_width, enc_param.in_height);
+ jpeg_set_enc_out_fmt(dev->reg_base, enc_param.out_fmt);
+ jpeg_set_enc_in_fmt(dev->reg_base, enc_param.in_fmt);
+ vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ jpeg_set_stream_buf_address(dev->reg_base, dev->vb2->plane_addr(vb, 0));
+
+ vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (enc_param.in_plane == 1)
+ jpeg_set_frame_buf_address(dev->reg_base,
+ enc_param.in_fmt, dev->vb2->plane_addr(vb, 0), 0, 0);
+ if (enc_param.in_plane == 2)
+ jpeg_set_frame_buf_address(dev->reg_base,
+ enc_param.in_fmt, dev->vb2->plane_addr(vb, 0),
+ dev->vb2->plane_addr(vb, 1), 0);
+ if (enc_param.in_plane == 3)
+ jpeg_set_frame_buf_address(dev->reg_base,
+ enc_param.in_fmt, dev->vb2->plane_addr(vb, 0),
+ dev->vb2->plane_addr(vb, 1), dev->vb2->plane_addr(vb, 2));
+
+ jpeg_set_encode_hoff_cnt(dev->reg_base, enc_param.out_fmt);
+
+#ifdef CONFIG_JPEG_V2_2
+ jpeg_set_timer_count(dev->reg_base, enc_param.in_width * enc_param.in_height * 32 + 0xff);
+#endif
+ jpeg_set_enc_dec_mode(dev->reg_base, ENCODING);
+
+ spin_unlock_irqrestore(&ctx->dev->slock, flags);
+}
+
+static void jpeg_device_dec_run(void *priv)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct jpeg_dev *dev = ctx->dev;
+ struct jpeg_dec_param dec_param;
+ struct vb2_buffer *vb = NULL;
+ unsigned long flags;
+
+ dev = ctx->dev;
+
+ spin_lock_irqsave(&ctx->dev->slock, flags);
+
+#ifdef CONFIG_JPEG_V2_1
+ printk(KERN_DEBUG "dec_run.\n");
+
+ if (timer_pending(&ctx->dev->watchdog_timer) == 0) {
+ ctx->dev->watchdog_timer.expires = jiffies +
+ msecs_to_jiffies(JPEG_WATCHDOG_INTERVAL);
+ add_timer(&ctx->dev->watchdog_timer);
+ }
+
+ set_bit(0, &ctx->dev->hw_run);
+#endif
+ dev->mode = DECODING;
+ dec_param = ctx->param.dec_param;
+
+ jpeg_sw_reset(dev->reg_base);
+ jpeg_set_interrupt(dev->reg_base);
+
+ jpeg_set_encode_tbl_select(dev->reg_base, 0);
+
+ vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ jpeg_set_stream_buf_address(dev->reg_base, dev->vb2->plane_addr(vb, 0));
+
+ vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (dec_param.out_plane == 1)
+ jpeg_set_frame_buf_address(dev->reg_base,
+ dec_param.out_fmt, dev->vb2->plane_addr(vb, 0), 0, 0);
+ else if (dec_param.out_plane == 2)
+ jpeg_set_frame_buf_address(dev->reg_base,
+ dec_param.out_fmt, dev->vb2->plane_addr(vb, 0), dev->vb2->plane_addr(vb, 1), 0);
+ else if (dec_param.out_plane == 3)
+ jpeg_set_frame_buf_address(dev->reg_base,
+ dec_param.out_fmt, dev->vb2->plane_addr(vb, 0),
+ dev->vb2->plane_addr(vb, 1), dev->vb2->plane_addr(vb, 2));
+
+ if (dec_param.out_width > 0 && dec_param.out_height > 0) {
+ if ((dec_param.out_width * 2 == dec_param.in_width) &&
+ (dec_param.out_height * 2 == dec_param.in_height))
+ jpeg_set_dec_scaling(dev->reg_base, JPEG_SCALE_2, JPEG_SCALE_2);
+ else if ((dec_param.out_width * 4 == dec_param.in_width) &&
+ (dec_param.out_height * 4 == dec_param.in_height))
+ jpeg_set_dec_scaling(dev->reg_base, JPEG_SCALE_4, JPEG_SCALE_4);
+ else
+ jpeg_set_dec_scaling(dev->reg_base, JPEG_SCALE_NORMAL, JPEG_SCALE_NORMAL);
+ }
+
+ jpeg_set_dec_out_fmt(dev->reg_base, dec_param.out_fmt);
+ jpeg_set_dec_bitstream_size(dev->reg_base, dec_param.size);
+#ifdef CONFIG_JPEG_V2_2
+ jpeg_set_timer_count(dev->reg_base, dec_param.in_width * dec_param.in_height * 8 + 0xff);
+#endif
+ jpeg_set_enc_dec_mode(dev->reg_base, DECODING);
+
+ spin_unlock_irqrestore(&ctx->dev->slock, flags);
+}
+
+static void jpeg_job_enc_abort(void *priv)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct jpeg_dev *dev = ctx->dev;
+ v4l2_m2m_get_next_job(dev->m2m_dev_enc, ctx->m2m_ctx);
+}
+
+static void jpeg_job_dec_abort(void *priv)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct jpeg_dev *dev = ctx->dev;
+ v4l2_m2m_get_next_job(dev->m2m_dev_dec, ctx->m2m_ctx);
+}
+
+static struct v4l2_m2m_ops jpeg_m2m_enc_ops = {
+ .device_run = jpeg_device_enc_run,
+ .job_abort = jpeg_job_enc_abort,
+};
+
+static struct v4l2_m2m_ops jpeg_m2m_dec_ops = {
+ .device_run = jpeg_device_dec_run,
+ .job_abort = jpeg_job_dec_abort,
+};
+
+int jpeg_int_pending(struct jpeg_dev *ctrl)
+{
+ unsigned int int_status;
+
+ int_status = jpeg_get_int_status(ctrl->reg_base);
+ jpeg_dbg("state(%d)\n", int_status);
+
+ return int_status;
+}
+
+static irqreturn_t jpeg_irq(int irq, void *priv)
+{
+ unsigned int int_status;
+ struct vb2_buffer *src_vb, *dst_vb;
+ struct jpeg_dev *ctrl = priv;
+ struct jpeg_ctx *ctx;
+
+ spin_lock(&ctrl->slock);
+
+#ifdef CONFIG_JPEG_V2_2
+ jpeg_clean_interrupt(ctrl->reg_base);
+#endif
+
+ if (ctrl->mode == ENCODING)
+ ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_enc);
+ else
+ ctx = v4l2_m2m_get_curr_priv(ctrl->m2m_dev_dec);
+
+ if (ctx == 0) {
+ printk(KERN_ERR "ctx is null.\n");
+ int_status = jpeg_int_pending(ctrl);
+ jpeg_sw_reset(ctrl->reg_base);
+ goto ctx_err;
+ }
+
+ src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ int_status = jpeg_int_pending(ctrl);
+
+ if (int_status) {
+ switch (int_status & 0x1f) {
+ case 0x1:
+ ctrl->irq_ret = ERR_PROT;
+ break;
+ case 0x2:
+ ctrl->irq_ret = OK_ENC_OR_DEC;
+ break;
+ case 0x4:
+ ctrl->irq_ret = ERR_DEC_INVALID_FORMAT;
+ break;
+ case 0x8:
+ ctrl->irq_ret = ERR_MULTI_SCAN;
+ break;
+ case 0x10:
+ ctrl->irq_ret = ERR_FRAME;
+ break;
+ case 0x20:
+ ctrl->irq_ret = ERR_TIME_OUT;
+ break;
+ default:
+ ctrl->irq_ret = ERR_UNKNOWN;
+ break;
+ }
+ } else {
+ ctrl->irq_ret = ERR_UNKNOWN;
+ }
+
+ if (ctrl->irq_ret == OK_ENC_OR_DEC) {
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
+ } else {
+ v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
+ v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
+ }
+
+#ifdef CONFIG_JPEG_V2_1
+ clear_bit(0, &ctx->dev->hw_run);
+#endif
+ if (ctrl->mode == ENCODING)
+ v4l2_m2m_job_finish(ctrl->m2m_dev_enc, ctx->m2m_ctx);
+ else
+ v4l2_m2m_job_finish(ctrl->m2m_dev_dec, ctx->m2m_ctx);
+ctx_err:
+ spin_unlock(&ctrl->slock);
+ return IRQ_HANDLED;
+}
+
+static int jpeg_setup_controller(struct jpeg_dev *ctrl)
+{
+ mutex_init(&ctrl->lock);
+ init_waitqueue_head(&ctrl->wq);
+
+ return 0;
+}
+
+static int jpeg_probe(struct platform_device *pdev)
+{
+ struct jpeg_dev *dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret;
+
+ /* global structure */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_err(&pdev->dev, "%s: not enough memory\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ dev->plat_dev = pdev;
+
+ /* setup jpeg control */
+ ret = jpeg_setup_controller(dev);
+ if (ret) {
+ jpeg_err("failed to setup controller\n");
+ goto err_setup;
+ }
+
+ /* memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ jpeg_err("failed to get jpeg memory region resource\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ res = request_mem_region(res->start, resource_size(res),
+ pdev->name);
+ if (!res) {
+ jpeg_err("failed to request jpeg io memory region\n");
+ ret = -ENOMEM;
+ goto err_region;
+ }
+
+ /* ioremap */
+ dev->reg_base = ioremap(res->start, resource_size(res));
+ if (!dev->reg_base) {
+ jpeg_err("failed to remap jpeg io region\n");
+ ret = -ENOENT;
+ goto err_map;
+ }
+
+ spin_lock_init(&dev->slock);
+ /* irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ jpeg_err("failed to request jpeg irq resource\n");
+ ret = -ENOENT;
+ goto err_irq;
+ }
+
+ dev->irq_no = res->start;
+ ret = request_irq(dev->irq_no, (void *)jpeg_irq,
+ IRQF_DISABLED, pdev->name, dev);
+ if (ret != 0) {
+ jpeg_err("failed to jpeg request irq\n");
+ ret = -ENOENT;
+ goto err_irq;
+ }
+
+ /* clock */
+ dev->clk = clk_get(&pdev->dev, "jpeg");
+ if (IS_ERR(dev->clk)) {
+ jpeg_err("failed to find jpeg clock source\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+#ifdef CONFIG_PM_RUNTIME
+#ifndef CONFIG_CPU_EXYNOS5250
+ pm_runtime_enable(&pdev->dev);
+#endif
+#endif
+
+ /* clock enable */
+ clk_enable(dev->clk);
+
+ ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev, "Failed to register v4l2 device\n");
+ goto err_v4l2;
+ }
+
+ /* encoder */
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_vd_alloc_enc;
+ }
+
+ *vfd = jpeg_enc_videodev;
+ vfd->ioctl_ops = get_jpeg_enc_v4l2_ioctl_ops();
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 12);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "%s(): failed to register video device\n", __func__);
+ video_device_release(vfd);
+ goto err_vd_alloc_enc;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "JPEG driver is registered to /dev/video%d\n", vfd->num);
+
+ dev->vfd_enc = vfd;
+ dev->m2m_dev_enc = v4l2_m2m_init(&jpeg_m2m_enc_ops);
+ if (IS_ERR(dev->m2m_dev_enc)) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(dev->m2m_dev_enc);
+ goto err_m2m_init_enc;
+ }
+ video_set_drvdata(vfd, dev);
+
+ /* decoder */
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_vd_alloc_dec;
+ }
+
+ *vfd = jpeg_dec_videodev;
+ vfd->ioctl_ops = get_jpeg_dec_v4l2_ioctl_ops();
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 11);
+ if (ret) {
+ v4l2_err(&dev->v4l2_dev,
+ "%s(): failed to register video device\n", __func__);
+ video_device_release(vfd);
+ goto err_vd_alloc_dec;
+ }
+ v4l2_info(&dev->v4l2_dev,
+ "JPEG driver is registered to /dev/video%d\n", vfd->num);
+
+ dev->vfd_dec = vfd;
+ dev->m2m_dev_dec = v4l2_m2m_init(&jpeg_m2m_dec_ops);
+ if (IS_ERR(dev->m2m_dev_dec)) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(dev->m2m_dev_dec);
+ goto err_m2m_init_dec;
+ }
+ video_set_drvdata(vfd, dev);
+
+ platform_set_drvdata(pdev, dev);
+
+#ifdef CONFIG_VIDEOBUF2_CMA_PHYS
+ dev->vb2 = &jpeg_vb2_cma;
+#elif defined(CONFIG_VIDEOBUF2_ION)
+ dev->vb2 = &jpeg_vb2_ion;
+#endif
+ dev->alloc_ctx = dev->vb2->init(dev);
+
+ if (IS_ERR(dev->alloc_ctx)) {
+ ret = PTR_ERR(dev->alloc_ctx);
+ goto err_video_reg;
+ }
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* To lock bus frequency in OPP mode */
+ dev->bus_dev = dev_get("exynos-busfreq");
+#endif
+
+#ifdef CONFIG_JPEG_V2_1
+ dev->watchdog_workqueue = create_singlethread_workqueue(JPEG_NAME);
+ INIT_WORK(&dev->watchdog_work, jpeg_watchdog_worker);
+ atomic_set(&dev->watchdog_cnt, 0);
+ init_timer(&dev->watchdog_timer);
+ dev->watchdog_timer.data = (unsigned long)dev;
+ dev->watchdog_timer.function = jpeg_watchdog;
+#endif
+ /* clock disable */
+ clk_disable(dev->clk);
+
+ return 0;
+
+err_video_reg:
+ v4l2_m2m_release(dev->m2m_dev_dec);
+err_m2m_init_dec:
+ video_unregister_device(dev->vfd_dec);
+ video_device_release(dev->vfd_dec);
+err_vd_alloc_dec:
+ v4l2_m2m_release(dev->m2m_dev_enc);
+err_m2m_init_enc:
+ video_unregister_device(dev->vfd_enc);
+ video_device_release(dev->vfd_enc);
+err_vd_alloc_enc:
+ v4l2_device_unregister(&dev->v4l2_dev);
+err_v4l2:
+ clk_disable(dev->clk);
+ clk_put(dev->clk);
+err_clk:
+ free_irq(dev->irq_no, NULL);
+err_irq:
+ iounmap(dev->reg_base);
+err_map:
+err_region:
+ kfree(res);
+err_res:
+ mutex_destroy(&dev->lock);
+err_setup:
+ kfree(dev);
+err_alloc:
+ return ret;
+
+}
+
+static int jpeg_remove(struct platform_device *pdev)
+{
+ struct jpeg_dev *dev = platform_get_drvdata(pdev);
+#ifdef CONFIG_JPEG_V2_1
+ del_timer_sync(&dev->watchdog_timer);
+ flush_workqueue(dev->watchdog_workqueue);
+ destroy_workqueue(dev->watchdog_workqueue);
+#endif
+ v4l2_m2m_release(dev->m2m_dev_enc);
+ video_unregister_device(dev->vfd_enc);
+
+ v4l2_m2m_release(dev->m2m_dev_dec);
+ video_unregister_device(dev->vfd_dec);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ dev->vb2->cleanup(dev->alloc_ctx);
+
+ free_irq(dev->irq_no, pdev);
+ mutex_destroy(&dev->lock);
+ iounmap(dev->reg_base);
+
+ clk_put(dev->clk);
+#ifdef CONFIG_PM_RUNTIME
+#if defined (CONFIG_CPU_EXYNOS5250)
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* lock bus frequency */
+ dev_unlock(dev->bus_dev, &pdev->dev);
+#endif
+#else
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+#endif
+#endif
+ kfree(dev);
+ return 0;
+}
+
+static int jpeg_suspend(struct platform_device *pdev, pm_message_t state)
+{
+#ifdef CONFIG_PM_RUNTIME
+#if defined (CONFIG_CPU_EXYNOS5250)
+ struct jpeg_dev *dev = platform_get_drvdata(pdev);
+
+ if (dev->ctx) {
+ dev->vb2->suspend(dev->alloc_ctx);
+ clk_disable(dev->clk);
+ }
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* lock bus frequency */
+ dev_unlock(dev->bus_dev, &pdev->dev);
+#endif
+#else
+ pm_runtime_put_sync(&pdev->dev);
+#endif
+#endif
+ return 0;
+}
+
+static int jpeg_resume(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_RUNTIME
+#if defined (CONFIG_CPU_EXYNOS5250)
+ struct jpeg_dev *dev = platform_get_drvdata(pdev);
+
+ if (dev->ctx) {
+ clk_enable(dev->clk);
+ dev->vb2->resume(dev->alloc_ctx);
+ }
+#else
+ pm_runtime_get_sync(&pdev->dev);
+#endif
+#endif
+ return 0;
+}
+
+int jpeg_suspend_pd(struct device *dev)
+{
+ struct platform_device *pdev;
+ int ret;
+ pm_message_t state;
+
+ state.event = 0;
+ pdev = to_platform_device(dev);
+ ret = jpeg_suspend(pdev, state);
+
+ return 0;
+}
+
+int jpeg_resume_pd(struct device *dev)
+{
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = to_platform_device(dev);
+ ret = jpeg_resume(pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int jpeg_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct jpeg_dev *jpeg_drv = platform_get_drvdata(pdev);
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* lock bus frequency */
+ dev_unlock(jpeg_drv->bus_dev, dev);
+#endif
+ jpeg_drv->vb2->suspend(jpeg_drv->alloc_ctx);
+ /* clock disable */
+ clk_disable(jpeg_drv->clk);
+ return 0;
+}
+
+static int jpeg_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct jpeg_dev *jpeg_drv = platform_get_drvdata(pdev);
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* lock bus frequency */
+ dev_lock(jpeg_drv->bus_dev, &jpeg_drv->plat_dev->dev, BUSFREQ_400MHZ);
+#endif
+ clk_enable(jpeg_drv->clk);
+ jpeg_drv->vb2->resume(jpeg_drv->alloc_ctx);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops jpeg_pm_ops = {
+ .suspend = jpeg_suspend_pd,
+ .resume = jpeg_resume_pd,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = jpeg_runtime_suspend,
+ .runtime_resume = jpeg_runtime_resume,
+#endif
+};
+static struct platform_driver jpeg_driver = {
+ .probe = jpeg_probe,
+ .remove = jpeg_remove,
+#if defined (CONFIG_CPU_EXYNOS5250)
+ .suspend = jpeg_suspend,
+ .resume = jpeg_resume,
+#else
+#ifndef CONFIG_PM_RUNTIME
+ .suspend = jpeg_suspend,
+ .resume = jpeg_resume,
+#endif
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = JPEG_NAME,
+#ifdef CONFIG_PM_RUNTIME
+#if defined (CONFIG_CPU_EXYNOS5250)
+ .pm = NULL,
+#else
+ .pm = &jpeg_pm_ops,
+#endif
+#else
+ .pm = NULL,
+#endif
+ },
+};
+
+static int __init jpeg_init(void)
+{
+ printk(KERN_CRIT "Initialize JPEG driver\n");
+
+ platform_driver_register(&jpeg_driver);
+
+ return 0;
+}
+
+static void __exit jpeg_exit(void)
+{
+ platform_driver_unregister(&jpeg_driver);
+}
+
+module_init(jpeg_init);
+module_exit(jpeg_exit);
+
+MODULE_AUTHOR("ym.song@samsung.com>");
+MODULE_DESCRIPTION("JPEG v2.x H/W Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.h b/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.h
new file mode 100644
index 0000000..a708293
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.h
@@ -0,0 +1,26 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file for Samsung Jpeg v2.x Interface driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __JPEG_DEV_H__
+#define __JPEG_DEV_H__
+
+#define JPEG_NAME "s5p-jpeg"
+#define JPEG_ENC_NAME "video12"
+#define JPEG_DEC_NAME "video11"
+
+#define JPEG_WATCHDOG_CNT 10
+#define JPEG_WATCHDOG_INTERVAL 1000
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+#define BUSFREQ_400MHZ 400266
+#endif
+
+#endif /*__JPEG_DEV_H__*/
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_enc.c b/drivers/media/video/samsung/jpeg_v2x/jpeg_enc.c
new file mode 100644
index 0000000..98dba01
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_enc.c
@@ -0,0 +1,572 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_dev.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Core file for Samsung Jpeg v2.x Interface driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/time.h>
+#include <linux/clk.h>
+#include <linux/semaphore.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <asm/page.h>
+
+#include <plat/regs_jpeg_v2_x.h>
+#include <mach/irqs.h>
+
+#include <media/v4l2-ioctl.h>
+
+#include "jpeg_core.h"
+#include "jpeg_dev.h"
+
+#include "jpeg_mem.h"
+#include "jpeg_regs.h"
+
+static struct jpeg_fmt formats[] = {
+ {
+ .name = "JPEG compressed format",
+ .fourcc = V4L2_PIX_FMT_JPEG_444,
+ .depth = {8},
+ .color = JPEG_444,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "JPEG compressed format",
+ .fourcc = V4L2_PIX_FMT_JPEG_422,
+ .depth = {8},
+ .color = JPEG_422,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "JPEG compressed format",
+ .fourcc = V4L2_PIX_FMT_JPEG_420,
+ .depth = {8},
+ .color = JPEG_420,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "JPEG compressed format",
+ .fourcc = V4L2_PIX_FMT_JPEG_GRAY,
+ .depth = {8},
+ .color = JPEG_GRAY,
+ .memplanes = 1,
+ .types = M2M_CAPTURE,
+ }, {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .depth = {16},
+ .color = RGB_565,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:4:4 packed, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_YUV444_2P,
+ .depth = {8, 16},
+ .color = YCBCR_444_2P,
+ .memplanes = 2,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:4:4 packed, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_YVU444_2P,
+ .depth = {8, 16},
+ .color = YCRCB_444_2P,
+ .memplanes = 2,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:4:4 packed, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV444_3P,
+ .depth = {8, 8, 8},
+ .color = YCBCR_444_3P,
+ .memplanes = 2,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "XRGB-8-8-8-8, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = {32},
+ .color = RGB_888,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .depth = {16},
+ .color = YCRYCB_422_1P,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = {16},
+ .color = YCBYCR_422_1P,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .depth = {8, 8},
+ .color = YCRCB_422_2P,
+ .memplanes = 2,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:2 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .depth = {8, 8},
+ .color = YCBCR_422_2P,
+ .memplanes = 2,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CbCr",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = {8, 4},
+ .color = YCBCR_420_2P,
+ .memplanes = 2,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:0 planar, Y/CrCb",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .depth = {8, 4},
+ .color = YCRCB_420_2P,
+ .memplanes = 2,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cb/Cr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = {8, 2, 2},
+ .color = YCBCR_420_3P,
+ .memplanes = 3,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:0 contiguous 3-planar, Y/Cr/Cb",
+ .fourcc = V4L2_PIX_FMT_YVU420,
+ .depth = {8, 2, 2},
+ .color = YCRCB_420_3P,
+ .memplanes = 3,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "Gray",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = {8},
+ .color = GRAY,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ },
+#ifdef CONFIG_JPEG_V2_2
+ {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .depth = {16},
+ .color = CRYCBY_422_1P,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = {16},
+ .color = CRYCBY_422_1P,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ }, {
+ .name = "XBGR-8-8-8-8, 32 bpp",
+ .fourcc = V4L2_PIX_FMT_BGR32,
+ .depth = {32},
+ .color = BGR_888,
+ .memplanes = 1,
+ .types = M2M_OUTPUT,
+ },
+#endif
+};
+
+static struct jpeg_fmt *find_format(struct v4l2_format *f)
+{
+ struct jpeg_fmt *fmt;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ fmt = &formats[i];
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
+ break;
+ }
+
+ return (i == ARRAY_SIZE(formats)) ? NULL : fmt;
+}
+
+static int jpeg_enc_vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct jpeg_ctx *ctx = file->private_data;
+ struct jpeg_dev *dev = ctx->dev;
+
+ strncpy(cap->driver, dev->plat_dev->name, sizeof(cap->driver) - 1);
+ strncpy(cap->card, dev->plat_dev->name, sizeof(cap->card) - 1);
+ cap->bus_info[0] = 0;
+ cap->version = KERNEL_VERSION(1, 0, 0);
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT |
+ V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ return 0;
+}
+
+int jpeg_enc_vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct jpeg_fmt *fmt;
+
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ fmt = &formats[f->index];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+int jpeg_enc_vidioc_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct v4l2_pix_format_mplane *pixm;
+ struct jpeg_enc_param *enc_param = &ctx->param.enc_param;
+
+ pixm = &f->fmt.pix_mp;
+
+ pixm->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ pixm->pixelformat =
+ enc_param->in_fmt;
+ pixm->num_planes =
+ enc_param->in_plane;
+ pixm->width =
+ enc_param->in_width;
+ pixm->height =
+ enc_param->in_height;
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ pixm->pixelformat =
+ enc_param->out_fmt;
+ pixm->num_planes =
+ enc_param->out_plane;
+ pixm->width =
+ enc_param->out_width;
+ pixm->height =
+ enc_param->out_height;
+ } else {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Wrong buffer/video queue type (%d)\n", f->type);
+ }
+
+ return 0;
+}
+
+static int jpeg_enc_vidioc_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct jpeg_fmt *fmt;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct jpeg_ctx *ctx = priv;
+ int i;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
+ f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ fmt = find_format(f);
+
+ if (!fmt) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "Fourcc format (0x%08x) invalid.\n",
+ f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ if (pix->field == V4L2_FIELD_ANY)
+ pix->field = V4L2_FIELD_NONE;
+ else if (V4L2_FIELD_NONE != pix->field)
+ return -EINVAL;
+
+
+ pix->num_planes = fmt->memplanes;
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ int bpl = pix->plane_fmt[i].bytesperline;
+
+ jpeg_dbg("[%d] bpl: %d, depth: %d, w: %d, h: %d",
+ i, bpl, fmt->depth[i], pix->width, pix->height);
+
+ if (!bpl || (bpl * 8 / fmt->depth[i]) > pix->width)
+ bpl = (pix->width * fmt->depth[i]) >> 3;
+
+ if (!pix->plane_fmt[i].sizeimage)
+ pix->plane_fmt[i].sizeimage = pix->height * bpl;
+
+ pix->plane_fmt[i].bytesperline = bpl;
+
+ jpeg_dbg("[%d]: bpl: %d, sizeimage: %d",
+ i, pix->plane_fmt[i].bytesperline,
+ pix->plane_fmt[i].sizeimage);
+ }
+
+ if (f->fmt.pix.height > MAX_JPEG_HEIGHT)
+ f->fmt.pix.height = MAX_JPEG_HEIGHT;
+
+ if (f->fmt.pix.width > MAX_JPEG_WIDTH)
+ f->fmt.pix.width = MAX_JPEG_WIDTH;
+
+ return 0;
+}
+
+static int jpeg_enc_vidioc_s_fmt_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct vb2_queue *vq;
+ struct v4l2_pix_format_mplane *pix;
+ struct jpeg_fmt *fmt;
+ int ret;
+ int i;
+
+ ret = jpeg_enc_vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ pix = &f->fmt.pix_mp;
+ fmt = find_format(f);
+
+ for (i = 0; i < fmt->memplanes; i++)
+ ctx->payload[i] =
+ pix->plane_fmt[i].bytesperline * pix->height;
+
+ ctx->param.enc_param.out_width = pix->height;
+ ctx->param.enc_param.out_height = pix->width;
+ ctx->param.enc_param.out_plane = fmt->memplanes;
+ ctx->param.enc_param.out_depth = fmt->depth[0];
+ ctx->param.enc_param.out_fmt = fmt->color;
+
+ return 0;
+}
+
+static int jpeg_enc_vidioc_s_fmt_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct vb2_queue *vq;
+ struct v4l2_pix_format_mplane *pix;
+ struct jpeg_fmt *fmt;
+ int ret;
+ int i;
+
+ ret = jpeg_enc_vidioc_try_fmt(file, priv, f);
+ if (ret)
+ return ret;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ v4l2_err(&ctx->dev->v4l2_dev, "queue (%d) busy\n", f->type);
+ return -EBUSY;
+ }
+
+ /* TODO: width & height has to be multiple of two */
+ pix = &f->fmt.pix_mp;
+ fmt = find_format(f);
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ ctx->payload[i] =
+ pix->plane_fmt[i].bytesperline * pix->height;
+ ctx->param.enc_param.in_depth[i] = fmt->depth[i];
+ }
+ ctx->param.enc_param.in_width = pix->width;
+ ctx->param.enc_param.in_height = pix->height;
+ ctx->param.enc_param.in_plane = fmt->memplanes;
+ ctx->param.enc_param.in_fmt = fmt->color;
+
+ return 0;
+}
+
+static int jpeg_enc_m2m_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *reqbufs)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, reqbufs->type);
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ ctx->dev->vb2->set_cacheable(ctx->dev->alloc_ctx, ctx->input_cacheable);
+ else if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ ctx->dev->vb2->set_cacheable(ctx->dev->alloc_ctx, ctx->output_cacheable);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
+}
+
+static int jpeg_enc_m2m_querybuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+static int jpeg_enc_m2m_qbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct jpeg_ctx *ctx = priv;
+
+ return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int jpeg_enc_m2m_dqbuf(struct file *file, void *priv,
+ struct v4l2_buffer *buf)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+static int jpeg_enc_m2m_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
+}
+
+static int jpeg_enc_m2m_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct jpeg_ctx *ctx = priv;
+ return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+}
+
+static int jpeg_enc_vidioc_g_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct jpeg_ctx *ctx = priv;
+ struct jpeg_dev *dev = ctx->dev;
+
+ switch (ctrl->id) {
+ case V4L2_CID_CAM_JPEG_ENCODEDSIZE:
+ ctrl->value = jpeg_get_stream_size(dev->reg_base);
+ break;
+ default:
+ break;
+ }
+ return ctrl->value;
+}
+
+static int vidioc_enc_s_jpegcomp(struct file *file, void *priv,
+ struct v4l2_jpegcompression *jpegcomp)
+{
+ struct jpeg_ctx *ctx = priv;
+
+ ctx->param.enc_param.quality = jpegcomp->quality;
+ return 0;
+}
+
+static int vidioc_enc_g_jpegcomp(struct file *file, void *priv,
+ struct v4l2_jpegcompression *jpegcomp)
+{
+ struct jpeg_ctx *ctx = priv;
+
+ jpegcomp->quality = ctx->param.enc_param.quality;
+ return 0;
+}
+
+static int jpeg_enc_vidioc_s_ctrl(struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct jpeg_ctx *ctx = priv;
+/*
+* 0 : input/output noncacheable
+* 1 : input/output cacheable
+* 2 : input cacheable / output noncacheable
+* 3 : input noncacheable / output cacheable
+*/
+ switch (ctrl->id) {
+ case V4L2_CID_CACHEABLE:
+ if (ctrl->value == 0) {
+ ctx->input_cacheable = 0;
+ ctx->output_cacheable = 0;
+ } else if (ctrl->value == 1) {
+ ctx->input_cacheable = 1;
+ ctx->output_cacheable = 1;
+ } else if (ctrl->value == 2) {
+ ctx->input_cacheable = 1;
+ ctx->output_cacheable = 0;
+ } else if (ctrl->value == 3) {
+ ctx->input_cacheable = 0;
+ ctx->output_cacheable = 1;
+ } else {
+ ctx->input_cacheable = 0;
+ ctx->output_cacheable = 0;
+ }
+ break;
+ default:
+ v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops jpeg_enc_ioctl_ops = {
+ .vidioc_querycap = jpeg_enc_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = jpeg_enc_vidioc_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = jpeg_enc_vidioc_enum_fmt,
+
+ .vidioc_g_fmt_vid_cap_mplane = jpeg_enc_vidioc_g_fmt,
+ .vidioc_g_fmt_vid_out_mplane = jpeg_enc_vidioc_g_fmt,
+
+ .vidioc_try_fmt_vid_cap_mplane = jpeg_enc_vidioc_try_fmt,
+ .vidioc_try_fmt_vid_out_mplane = jpeg_enc_vidioc_try_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = jpeg_enc_vidioc_s_fmt_cap,
+ .vidioc_s_fmt_vid_out_mplane = jpeg_enc_vidioc_s_fmt_out,
+
+ .vidioc_reqbufs = jpeg_enc_m2m_reqbufs,
+ .vidioc_querybuf = jpeg_enc_m2m_querybuf,
+ .vidioc_qbuf = jpeg_enc_m2m_qbuf,
+ .vidioc_dqbuf = jpeg_enc_m2m_dqbuf,
+ .vidioc_streamon = jpeg_enc_m2m_streamon,
+ .vidioc_streamoff = jpeg_enc_m2m_streamoff,
+ .vidioc_g_ctrl = jpeg_enc_vidioc_g_ctrl,
+ .vidioc_g_jpegcomp = vidioc_enc_g_jpegcomp,
+ .vidioc_s_jpegcomp = vidioc_enc_s_jpegcomp,
+ .vidioc_s_ctrl = jpeg_enc_vidioc_s_ctrl,
+};
+const struct v4l2_ioctl_ops *get_jpeg_enc_v4l2_ioctl_ops(void)
+{
+ return &jpeg_enc_ioctl_ops;
+}
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_mem.c b/drivers/media/video/samsung/jpeg_v2x/jpeg_mem.c
new file mode 100644
index 0000000..994da07
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_mem.c
@@ -0,0 +1,79 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_mem.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Managent memory of the jpeg driver for encoder/docoder.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+
+#include <linux/cma.h>
+
+#include "jpeg_mem.h"
+#include "jpeg_core.h"
+
+#if defined(CONFIG_VIDEOBUF2_ION)
+#define JPEG_ION_NAME "s5p-jpeg"
+#endif
+
+#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
+void *jpeg_cma_init(struct jpeg_dev *dev)
+{
+ return vb2_cma_phys_init(&dev->plat_dev->dev, NULL, SZ_8K, false);
+}
+
+int jpeg_cma_resume(void *alloc_ctx)
+{
+ return 1;
+}
+void jpeg_cma_suspend(void *alloc_ctx) {}
+
+const struct jpeg_vb2 jpeg_vb2_cma = {
+ .ops = &vb2_cma_phys_memops,
+ .init = jpeg_cma_init,
+ .cleanup = vb2_cma_phys_cleanup,
+ .plane_addr = vb2_cma_phys_plane_paddr,
+ .resume = jpeg_cma_resume,
+ .suspend = jpeg_cma_suspend,
+ .cache_flush = vb2_cma_phys_cache_flush,
+ .set_cacheable = vb2_cma_phys_set_cacheable,
+};
+#elif defined(CONFIG_VIDEOBUF2_ION)
+static void *jpeg_ion_init(struct jpeg_dev *dev)
+{
+ return vb2_ion_create_context(&dev->plat_dev->dev, SZ_8K,
+ VB2ION_CTX_VMCONTIG | VB2ION_CTX_IOMMU);
+}
+
+static unsigned long jpeg_vb2_plane_addr(struct vb2_buffer *vb, u32 plane_no)
+{
+ void *cookie = vb2_plane_cookie(vb, plane_no);
+ dma_addr_t dva = 0;
+
+ WARN_ON(vb2_ion_dma_address(cookie, &dva) != 0);
+
+ return dva;
+}
+
+const struct jpeg_vb2 jpeg_vb2_ion = {
+ .ops = &vb2_ion_memops,
+ .init = jpeg_ion_init,
+ .cleanup = vb2_ion_destroy_context,
+ .plane_addr = jpeg_vb2_plane_addr,
+ .resume = vb2_ion_attach_iommu,
+ .suspend = vb2_ion_detach_iommu,
+ .cache_flush = vb2_ion_cache_flush,
+ .set_cacheable = vb2_ion_set_cached,
+};
+#endif
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_mem.h b/drivers/media/video/samsung/jpeg_v2x/jpeg_mem.h
new file mode 100644
index 0000000..d912628
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_mem.h
@@ -0,0 +1,39 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_mem.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Definition for Operation of Jpeg encoder/docoder with memory
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __JPEG_MEM_H__
+#define __JPEG_MEM_H__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/cma.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-memops.h>
+
+#include <asm/cacheflush.h>
+
+#if defined(CONFIG_VIDEOBUF2_CMA_PHYS)
+extern const struct jpeg_vb2 jpeg_vb2_cma;
+#elif defined(CONFIG_VIDEOBUF2_ION)
+extern const struct jpeg_vb2 jpeg_vb2_ion;
+#endif
+
+#define MAX_JPEG_WIDTH 3264
+#define MAX_JPEG_HEIGHT 2448
+
+#endif /* __JPEG_MEM_H__ */
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_regs.c b/drivers/media/video/samsung/jpeg_v2x/jpeg_regs.c
new file mode 100644
index 0000000..e3300cc
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_regs.c
@@ -0,0 +1,629 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_regs.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register interface file for jpeg v2.x driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <plat/regs_jpeg_v2_x.h>
+#include <plat/cpu.h>
+
+#include "jpeg_regs.h"
+#include "jpeg_conf.h"
+#include "jpeg_core.h"
+
+void jpeg_sw_reset(void __iomem *base)
+{
+ unsigned int reg;
+
+#ifdef CONFIG_JPEG_V2_2
+ reg = readl(base + S5P_JPEG_CNTL_REG);
+ writel((reg & S5P_JPEG_ENC_DEC_MODE_MASK),
+ base + S5P_JPEG_CNTL_REG);
+#endif
+ reg = readl(base + S5P_JPEG_CNTL_REG);
+ writel(reg & ~S5P_JPEG_SOFT_RESET_HI,
+ base + S5P_JPEG_CNTL_REG);
+
+ ndelay(100000);
+
+ writel(reg | S5P_JPEG_SOFT_RESET_HI,
+ base + S5P_JPEG_CNTL_REG);
+}
+
+void jpeg_set_enc_dec_mode(void __iomem *base, enum jpeg_mode mode)
+{
+ unsigned int reg;
+
+ reg = readl(base + S5P_JPEG_CNTL_REG);
+ /* set jpeg mod register */
+ if (mode == DECODING) {
+ writel((reg & S5P_JPEG_ENC_DEC_MODE_MASK) | S5P_JPEG_DEC_MODE,
+ base + S5P_JPEG_CNTL_REG);
+ } else {/* encode */
+ writel((reg & S5P_JPEG_ENC_DEC_MODE_MASK) | S5P_JPEG_ENC_MODE,
+ base + S5P_JPEG_CNTL_REG);
+ }
+}
+
+void jpeg_set_dec_out_fmt(void __iomem *base,
+ enum jpeg_frame_format out_fmt)
+{
+ unsigned int reg = 0;
+
+ writel(0, base + S5P_JPEG_IMG_FMT_REG); /* clear */
+
+ /* set jpeg deocde ouput format register */
+ switch (out_fmt) {
+ case GRAY:
+ reg = S5P_JPEG_DEC_GRAY_IMG |
+ S5P_JPEG_GRAY_IMG_IP;
+ break;
+
+ case RGB_565:
+ reg = S5P_JPEG_DEC_RGB_IMG |
+ S5P_JPEG_RGB_IP_RGB_16BIT_IMG;
+ break;
+
+ case YCRCB_444_2P:
+ reg = S5P_JPEG_DEC_YUV_444_IMG |
+ S5P_JPEG_YUV_444_IP_YUV_444_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CrCb;
+ break;
+
+ case YCBCR_444_2P:
+ reg = S5P_JPEG_DEC_YUV_444_IMG |
+ S5P_JPEG_YUV_444_IP_YUV_444_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CbCr;
+ break;
+
+ case YCBCR_444_3P:
+ reg = S5P_JPEG_DEC_YUV_444_IMG |
+ S5P_JPEG_YUV_444_IP_YUV_444_3P_IMG;
+ break;
+#if defined (CONFIG_JPEG_V2_2)
+ case RGB_888:
+ reg = S5P_JPEG_DEC_RGB_IMG |
+ S5P_JPEG_RGB_IP_RGB_32BIT_IMG
+ |S5P_JPEG_ENC_FMT_RGB;
+ break;
+ case BGR_888:
+ reg = S5P_JPEG_DEC_RGB_IMG |
+ S5P_JPEG_RGB_IP_RGB_32BIT_IMG
+ |S5P_JPEG_ENC_FMT_BGR;
+ break;
+
+ case CRYCBY_422_1P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_ENC_FMT_VYUY;
+ break;
+
+ case CBYCRY_422_1P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_ENC_FMT_UYVY;
+ break;
+ case YCRYCB_422_1P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_ENC_FMT_YVYU;
+ break;
+ case YCBYCR_422_1P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_ENC_FMT_YUYV;
+ break;
+
+#elif defined (CONFIG_JPEG_V2_1)
+ case RGB_888:
+ reg = S5P_JPEG_DEC_RGB_IMG |
+ S5P_JPEG_RGB_IP_RGB_32BIT_IMG;
+ break;
+ case YCRYCB_422_1P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CrCb;
+ break;
+ case YCBYCR_422_1P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CbCr;
+ break;
+#endif
+
+ case YCRCB_422_2P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CrCb;
+ break;
+
+ case YCBCR_422_2P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CbCr;
+ break;
+
+ case YCBYCR_422_3P:
+ reg = S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_3P_IMG;
+ break;
+
+ case YCRCB_420_2P:
+ reg = S5P_JPEG_DEC_YUV_420_IMG |
+ S5P_JPEG_YUV_420_IP_YUV_420_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CrCb;
+ break;
+
+ case YCBCR_420_2P:
+ reg = S5P_JPEG_DEC_YUV_420_IMG |
+ S5P_JPEG_YUV_420_IP_YUV_420_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CbCr;
+ break;
+
+ case YCBCR_420_3P:
+ case YCRCB_420_3P:
+ reg = S5P_JPEG_DEC_YUV_420_IMG |
+ S5P_JPEG_YUV_420_IP_YUV_420_3P_IMG;
+ break;
+
+ default:
+ break;
+ }
+
+ writel(reg, base + S5P_JPEG_IMG_FMT_REG);
+}
+
+void jpeg_set_enc_in_fmt(void __iomem *base,
+ enum jpeg_frame_format in_fmt)
+{
+ unsigned int reg;
+
+ reg = readl(base + S5P_JPEG_IMG_FMT_REG) &
+ S5P_JPEG_ENC_IN_FMT_MASK; /* clear except enc format */
+
+ switch (in_fmt) {
+ case GRAY:
+ reg = reg | S5P_JPEG_ENC_GRAY_IMG | S5P_JPEG_GRAY_IMG_IP;
+ break;
+
+ case RGB_565:
+ reg = reg | S5P_JPEG_ENC_RGB_IMG |
+ S5P_JPEG_RGB_IP_RGB_16BIT_IMG;
+ break;
+
+ case YCRCB_444_2P:
+ reg = reg | S5P_JPEG_ENC_YUV_444_IMG |
+ S5P_JPEG_YUV_444_IP_YUV_444_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CrCb;
+ break;
+
+ case YCBCR_444_2P:
+ reg = reg | S5P_JPEG_ENC_YUV_444_IMG |
+ S5P_JPEG_YUV_444_IP_YUV_444_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CbCr;
+ break;
+
+ case YCBCR_444_3P:
+ reg = reg | S5P_JPEG_ENC_YUV_444_IMG |
+ S5P_JPEG_YUV_444_IP_YUV_444_3P_IMG;
+ break;
+
+#if defined (CONFIG_JPEG_V2_2)
+ case RGB_888:
+ reg = reg | S5P_JPEG_DEC_RGB_IMG |
+ S5P_JPEG_RGB_IP_RGB_32BIT_IMG
+ |S5P_JPEG_ENC_FMT_RGB;
+ break;
+ case BGR_888:
+ reg = reg | S5P_JPEG_DEC_RGB_IMG |
+ S5P_JPEG_RGB_IP_RGB_32BIT_IMG
+ |S5P_JPEG_ENC_FMT_BGR;
+ break;
+ case CRYCBY_422_1P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_ENC_FMT_VYUY;
+ break;
+ case CBYCRY_422_1P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_ENC_FMT_UYVY;
+ break;
+
+ case YCRYCB_422_1P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_ENC_FMT_YVYU;
+ break;
+ case YCBYCR_422_1P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_ENC_FMT_YUYV;
+ break;
+
+#elif defined (CONFIG_JPEG_V2_1)
+ case RGB_888:
+ reg = reg | S5P_JPEG_ENC_RGB_IMG |
+ S5P_JPEG_RGB_IP_RGB_32BIT_IMG;
+ break;
+ case YCRYCB_422_1P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CrCb;
+ break;
+ case YCBYCR_422_1P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_1P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CbCr;
+ break;
+#endif
+
+ case YCRCB_422_2P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CrCb;
+ break;
+
+ case YCBCR_422_2P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CbCr;
+ break;
+
+ case YCBYCR_422_3P:
+ reg = reg | S5P_JPEG_DEC_YUV_422_IMG |
+ S5P_JPEG_YUV_422_IP_YUV_422_3P_IMG;
+ break;
+
+ case YCRCB_420_2P:
+ reg = reg | S5P_JPEG_DEC_YUV_420_IMG |
+ S5P_JPEG_YUV_420_IP_YUV_420_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CrCb;
+ break;
+
+ case YCBCR_420_2P:
+ reg = reg | S5P_JPEG_DEC_YUV_420_IMG |
+ S5P_JPEG_YUV_420_IP_YUV_420_2P_IMG |
+ S5P_JPEG_SWAP_CHROMA_CbCr;
+ break;
+
+ case YCBCR_420_3P:
+ case YCRCB_420_3P:
+ reg = reg | S5P_JPEG_DEC_YUV_420_IMG |
+ S5P_JPEG_YUV_420_IP_YUV_420_3P_IMG;
+ break;
+
+ default:
+ break;
+
+ }
+
+ writel(reg, base + S5P_JPEG_IMG_FMT_REG);
+
+}
+
+void jpeg_set_enc_out_fmt(void __iomem *base,
+ enum jpeg_stream_format out_fmt)
+{
+ unsigned int reg;
+
+ reg = readl(base + S5P_JPEG_IMG_FMT_REG) &
+ ~S5P_JPEG_ENC_FMT_MASK; /* clear enc format */
+
+ switch (out_fmt) {
+ case JPEG_GRAY:
+ reg = reg | S5P_JPEG_ENC_FMT_GRAY;
+ break;
+
+ case JPEG_444:
+ reg = reg | S5P_JPEG_ENC_FMT_YUV_444;
+ break;
+
+ case JPEG_422:
+ reg = reg | S5P_JPEG_ENC_FMT_YUV_422;
+ break;
+
+ case JPEG_420:
+ reg = reg | S5P_JPEG_ENC_FMT_YUV_420;
+ break;
+
+ default:
+ break;
+ }
+
+ writel(reg, base + S5P_JPEG_IMG_FMT_REG);
+}
+
+void jpeg_set_enc_tbl(void __iomem *base)
+{
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ writel((unsigned int)ITU_Q_tbl[0][i],
+ base + S5P_JPEG_QUAN_TBL_ENTRY_REG + (i*0x04));
+ }
+
+ for (i = 0; i < 16; i++) {
+ writel((unsigned int)ITU_Q_tbl[1][i],
+ base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x40 + (i*0x04));
+ }
+
+ for (i = 0; i < 16; i++) {
+ writel((unsigned int)ITU_Q_tbl[2][i],
+ base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0x80 + (i*0x04));
+ }
+
+ for (i = 0; i < 16; i++) {
+ writel((unsigned int)ITU_Q_tbl[3][i],
+ base + S5P_JPEG_QUAN_TBL_ENTRY_REG + 0xc0 + (i*0x04));
+ }
+
+ for (i = 0; i < 4; i++) {
+ writel((unsigned int)ITU_H_tbl_len_DC_luminance[i],
+ base + S5P_JPEG_HUFF_TBL_ENTRY_REG + (i*0x04));
+ }
+
+ for (i = 0; i < 3; i++) {
+ writel((unsigned int)ITU_H_tbl_val_DC_luminance[i],
+ base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x10 + (i*0x04));
+ }
+
+ for (i = 0; i < 4; i++) {
+ writel((unsigned int)ITU_H_tbl_len_DC_chrominance[i],
+ base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x20 + (i*0x04));
+ }
+
+ for (i = 0; i < 3; i++) {
+ writel((unsigned int)ITU_H_tbl_val_DC_chrominance[i],
+ base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x30 + (i*0x04));
+ }
+
+ for (i = 0; i < 4; i++) {
+ writel((unsigned int)ITU_H_tbl_len_AC_luminance[i],
+ base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x40 + (i*0x04));
+ }
+
+ for (i = 0; i < 41; i++) {
+ writel((unsigned int)ITU_H_tbl_val_AC_luminance[i],
+ base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x50 + (i*0x04));
+ }
+
+ for (i = 0; i < 4; i++) {
+ writel((unsigned int)ITU_H_tbl_len_AC_chrominance[i],
+ base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x100 + (i*0x04));
+ }
+
+ for (i = 0; i < 41; i++) {
+ writel((unsigned int)ITU_H_tbl_val_AC_chrominance[i],
+ base + S5P_JPEG_HUFF_TBL_ENTRY_REG + 0x110 + (i*0x04));
+ }
+
+}
+
+void jpeg_set_interrupt(void __iomem *base)
+{
+ unsigned int reg;
+ reg = readl(base + S5P_JPEG_INT_EN_REG) & ~S5P_JPEG_INT_EN_MASK;
+ writel(S5P_JPEG_INT_EN_ALL, base + S5P_JPEG_INT_EN_REG);
+}
+
+void jpeg_clean_interrupt(void __iomem *base)
+{
+ writel(0, base + S5P_JPEG_INT_EN_REG);
+}
+
+unsigned int jpeg_get_int_status(void __iomem *base)
+{
+ unsigned int int_status;
+
+ int_status = readl(base + S5P_JPEG_INT_STATUS_REG);
+
+ return int_status;
+}
+
+void jpeg_set_huf_table_enable(void __iomem *base, int value)
+{
+ unsigned int reg;
+
+ reg = readl(base + S5P_JPEG_CNTL_REG) & ~S5P_JPEG_HUF_TBL_EN;
+
+ if (value == 1)
+ writel(reg | S5P_JPEG_HUF_TBL_EN, base + S5P_JPEG_CNTL_REG);
+ else
+ writel(reg | ~S5P_JPEG_HUF_TBL_EN, base + S5P_JPEG_CNTL_REG);
+}
+
+void jpeg_set_dec_scaling(void __iomem *base,
+ enum jpeg_scale_value x_value, enum jpeg_scale_value y_value)
+{
+ unsigned int reg;
+
+ reg = readl(base + S5P_JPEG_CNTL_REG) &
+ ~(S5P_JPEG_HOR_SCALING_MASK |
+ S5P_JPEG_VER_SCALING_MASK);
+
+ writel(reg | S5P_JPEG_HOR_SCALING(x_value) |
+ S5P_JPEG_VER_SCALING(y_value),
+ base + S5P_JPEG_CNTL_REG);
+}
+
+void jpeg_set_sys_int_enable(void __iomem *base, int value)
+{
+ unsigned int reg;
+
+ reg = readl(base + S5P_JPEG_CNTL_REG) & ~(S5P_JPEG_SYS_INT_EN);
+
+ if (value == 1)
+ writel(S5P_JPEG_SYS_INT_EN, base + S5P_JPEG_CNTL_REG);
+ else
+ writel(~S5P_JPEG_SYS_INT_EN, base + S5P_JPEG_CNTL_REG);
+}
+
+void jpeg_set_stream_buf_address(void __iomem *base, unsigned int address)
+{
+ writel(address, base + S5P_JPEG_OUT_MEM_BASE_REG);
+}
+
+void jpeg_set_stream_size(void __iomem *base,
+ unsigned int x_value, unsigned int y_value)
+{
+ writel(0x0, base + S5P_JPEG_IMG_SIZE_REG); /* clear */
+ writel(S5P_JPEG_X_SIZE(x_value) | S5P_JPEG_Y_SIZE(y_value),
+ base + S5P_JPEG_IMG_SIZE_REG);
+}
+
+void jpeg_set_frame_buf_address(void __iomem *base,
+ enum jpeg_frame_format fmt, unsigned int address_1p,
+ unsigned int address_2p, unsigned int address_3p)
+{
+ switch (fmt) {
+ case GRAY:
+ case RGB_565:
+ case RGB_888:
+ case YCRYCB_422_1P:
+ case YCBYCR_422_1P:
+#if defined (CONFIG_JPEG_V2_2)
+ case BGR_888:
+ case CBYCRY_422_1P:
+ case CRYCBY_422_1P:
+#endif
+ writel(address_1p, base + S5P_JPEG_IMG_BA_PLANE_1_REG);
+ writel(0, base + S5P_JPEG_IMG_BA_PLANE_2_REG);
+ writel(0, base + S5P_JPEG_IMG_BA_PLANE_3_REG);
+ break;
+ case YCBCR_444_2P:
+ case YCRCB_444_2P:
+ case YCRCB_422_2P:
+ case YCBCR_422_2P:
+ case YCBCR_420_2P:
+ case YCRCB_420_2P:
+ writel(address_1p, base + S5P_JPEG_IMG_BA_PLANE_1_REG);
+ writel(address_2p, base + S5P_JPEG_IMG_BA_PLANE_2_REG);
+ writel(0, base + S5P_JPEG_IMG_BA_PLANE_3_REG);
+ break;
+ case YCBCR_444_3P:
+ writel(address_1p, base + S5P_JPEG_IMG_BA_PLANE_1_REG);
+ writel(address_2p, base + S5P_JPEG_IMG_BA_PLANE_2_REG);
+ writel(address_3p, base + S5P_JPEG_IMG_BA_PLANE_3_REG);
+ break;
+ case YCBYCR_422_3P:
+ writel(address_1p, base + S5P_JPEG_IMG_BA_PLANE_1_REG);
+ writel(address_2p, base + S5P_JPEG_IMG_BA_PLANE_2_REG);
+ writel(address_3p, base + S5P_JPEG_IMG_BA_PLANE_3_REG);
+ break;
+ case YCBCR_420_3P:
+ writel(address_1p, base + S5P_JPEG_IMG_BA_PLANE_1_REG);
+ writel(address_2p, base + S5P_JPEG_IMG_BA_PLANE_2_REG);
+ writel(address_3p, base + S5P_JPEG_IMG_BA_PLANE_3_REG);
+ break;
+ case YCRCB_420_3P:
+ writel(address_1p, base + S5P_JPEG_IMG_BA_PLANE_1_REG);
+ writel(address_3p, base + S5P_JPEG_IMG_BA_PLANE_2_REG);
+ writel(address_2p, base + S5P_JPEG_IMG_BA_PLANE_3_REG);
+ break;
+ default:
+ break;
+ }
+}
+void jpeg_set_encode_tbl_select(void __iomem *base,
+ enum jpeg_img_quality_level level)
+{
+ unsigned int reg;
+
+ switch (level) {
+ case QUALITY_LEVEL_1:
+ reg = S5P_JPEG_Q_TBL_COMP1_0 | S5P_JPEG_Q_TBL_COMP2_1 |
+ S5P_JPEG_Q_TBL_COMP3_1 |
+ S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 |
+ S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 |
+ S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1;
+ break;
+ case QUALITY_LEVEL_2:
+ reg = S5P_JPEG_Q_TBL_COMP1_0 | S5P_JPEG_Q_TBL_COMP2_3 |
+ S5P_JPEG_Q_TBL_COMP3_3 |
+ S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 |
+ S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 |
+ S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1;
+ break;
+ case QUALITY_LEVEL_3:
+ reg = S5P_JPEG_Q_TBL_COMP1_2 | S5P_JPEG_Q_TBL_COMP2_1 |
+ S5P_JPEG_Q_TBL_COMP3_1 |
+ S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 |
+ S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 |
+ S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1;
+ break;
+ case QUALITY_LEVEL_4:
+ reg = S5P_JPEG_Q_TBL_COMP1_2 | S5P_JPEG_Q_TBL_COMP2_3 |
+ S5P_JPEG_Q_TBL_COMP3_3 |
+ S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 |
+ S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 |
+ S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1;
+ break;
+ default:
+ reg = S5P_JPEG_Q_TBL_COMP1_0 | S5P_JPEG_Q_TBL_COMP2_1 |
+ S5P_JPEG_Q_TBL_COMP3_1 |
+ S5P_JPEG_HUFF_TBL_COMP1_AC_0_DC_0 |
+ S5P_JPEG_HUFF_TBL_COMP2_AC_1_DC_1 |
+ S5P_JPEG_HUFF_TBL_COMP3_AC_1_DC_1;
+ break;
+ }
+ writel(reg, base + S5P_JPEG_TBL_SEL_REG);
+}
+
+void jpeg_set_encode_hoff_cnt(void __iomem *base, enum jpeg_stream_format fmt)
+{
+ if (fmt == JPEG_GRAY)
+ writel(0xd2, base + S5P_JPEG_HUFF_CNT_REG);
+ else
+ writel(0x1a2, base + S5P_JPEG_HUFF_CNT_REG);
+}
+
+unsigned int jpeg_get_stream_size(void __iomem *base)
+{
+ unsigned int size;
+
+ size = readl(base + S5P_JPEG_BITSTREAM_SIZE_REG);
+ return size;
+}
+
+void jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
+{
+ writel(size, base + S5P_JPEG_BITSTREAM_SIZE_REG);
+}
+
+void jpeg_set_timer_count(void __iomem *base, unsigned int size)
+{
+ writel(size, base + S5P_JPEG_INT_TIMER_COUNT_REG);
+}
+
+void jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height)
+{
+ *width = (readl(base + S5P_JPEG_DECODE_XY_SIZE_REG) &
+ S5P_JPEG_DECODED_SIZE_MASK);
+ *height = (readl(base + S5P_JPEG_DECODE_XY_SIZE_REG) >> 16) &
+ S5P_JPEG_DECODED_SIZE_MASK ;
+}
+
+enum jpeg_stream_format jpeg_get_frame_fmt(void __iomem *base)
+{
+ unsigned int reg;
+ enum jpeg_stream_format out_format;
+
+ reg = readl(base + S5P_JPEG_DECODE_IMG_FMT_REG);
+
+ out_format =
+ ((reg & 0x03) == 0x01) ? JPEG_444 :
+ ((reg & 0x03) == 0x02) ? JPEG_422 :
+ ((reg & 0x03) == 0x03) ? JPEG_420 :
+ ((reg & 0x03) == 0x00) ? JPEG_GRAY : JPEG_RESERVED;
+
+ return out_format;
+}
diff --git a/drivers/media/video/samsung/jpeg_v2x/jpeg_regs.h b/drivers/media/video/samsung/jpeg_v2x/jpeg_regs.h
new file mode 100644
index 0000000..535a3f9
--- /dev/null
+++ b/drivers/media/video/samsung/jpeg_v2x/jpeg_regs.h
@@ -0,0 +1,51 @@
+/* linux/drivers/media/video/samsung/jpeg_v2x/jpeg_regs.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Header file of the register interface for jpeg v2.x driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __JPEG_REGS_H__
+#define __JPEG_REGS_H__
+
+#include "jpeg_core.h"
+
+void jpeg_sw_reset(void __iomem *base);
+void jpeg_set_enc_dec_mode(void __iomem *base, enum jpeg_mode mode);
+void jpeg_set_dec_out_fmt(void __iomem *base,
+ enum jpeg_frame_format out_fmt);
+void jpeg_set_enc_in_fmt(void __iomem *base,
+ enum jpeg_frame_format in_fmt);
+void jpeg_set_enc_out_fmt(void __iomem *base,
+ enum jpeg_stream_format out_fmt);
+void jpeg_set_enc_tbl(void __iomem *base);
+void jpeg_set_interrupt(void __iomem *base);
+void jpeg_clean_interrupt(void __iomem *base);
+unsigned int jpeg_get_int_status(void __iomem *base);
+void jpeg_set_huf_table_enable(void __iomem *base, int value);
+void jpeg_set_dec_scaling(void __iomem *base,
+ enum jpeg_scale_value x_value, enum jpeg_scale_value y_value);
+void jpeg_set_sys_int_enable(void __iomem *base, int value);
+void jpeg_set_stream_buf_address(void __iomem *base, unsigned int address);
+void jpeg_set_stream_size(void __iomem *base,
+ unsigned int x_value, unsigned int y_value);
+void jpeg_set_frame_buf_address(void __iomem *base,
+ enum jpeg_frame_format fmt, unsigned int address,
+ unsigned int address_2p, unsigned int address_3p);
+void jpeg_set_encode_tbl_select(void __iomem *base,
+ enum jpeg_img_quality_level level);
+void jpeg_set_encode_hoff_cnt(void __iomem *base, enum jpeg_stream_format fmt);
+void jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size);
+void jpeg_set_timer_count(void __iomem *base, unsigned int size);
+unsigned int jpeg_get_stream_size(void __iomem *base);
+void jpeg_get_frame_size(void __iomem *base,
+ unsigned int *width, unsigned int *height);
+
+enum jpeg_stream_format jpeg_get_frame_fmt(void __iomem *base);
+
+#endif /* __JPEG_REGS_H__ */
diff --git a/drivers/media/video/samsung/mali/Kconfig b/drivers/media/video/samsung/mali/Kconfig
new file mode 100644
index 0000000..b93bccf
--- /dev/null
+++ b/drivers/media/video/samsung/mali/Kconfig
@@ -0,0 +1,63 @@
+#
+## S3C Multimedia Mali configuration
+##
+#
+# For Mali
+config VIDEO_MALI400MP
+ bool "Enable MALI integration"
+ depends on VIDEO_SAMSUNG
+ default n
+ ---help---
+ This enables MALI integration in the multimedia device driver
+
+choice
+depends on VIDEO_MALI400MP
+prompt "MALI MEMORY OPTION"
+default MALI_OSMEM_ONLY
+config MALI_DED_ONLY
+ bool "mali dedicated memory only"
+ ---help---
+ This enables MALI dedicated memory only option
+config MALI_DED_MMU
+ bool "mali dedicated memory with mmu enable"
+ ---help---
+ This enables MALI dedicated memory with mmu enable option
+config MALI_OSMEM_ONLY
+ bool "mali OS memory only"
+ ---help---
+ This enables MALI OS memory only option
+config MALI_DED_OSMEM
+ bool "mali dedicated memory and OS memory"
+ ---help---
+ This enables MALI dedicated memory and OS memory option
+
+endchoice
+config MALI_MEM_SIZE
+int "Dedicated Memory Size"
+ depends on VIDEO_MALI400MP && (MALI_DED_ONLY || MALI_DED_MMU || MALI_DED_OSMEM)
+ default "128"
+ ---help---
+ This value is dedicated memory size of Mali GPU(unit is MByte).
+
+
+# For DEBUG
+config VIDEO_MALI400MP_DEBUG
+ bool "Enables debug messages"
+ depends on VIDEO_MALI400MP
+ default n
+ help
+ This enables Mali driver debug messages.
+
+config VIDEO_MALI400MP_STREAMLINE_PROFILING
+ bool "Enables mali streamline profiling"
+ depends on VIDEO_MALI400MP
+ default n
+ help
+ This enables Mali streamline profiling.
+
+config VIDEO_MALI400MP_DVFS
+ bool "Enables DVFS"
+ depends on VIDEO_MALI400MP && PM
+ default y
+ help
+ This enables Mali driver DVFS.
diff --git a/drivers/media/video/samsung/mali/Makefile b/drivers/media/video/samsung/mali/Makefile
new file mode 100644
index 0000000..0ce60a3
--- /dev/null
+++ b/drivers/media/video/samsung/mali/Makefile
@@ -0,0 +1,282 @@
+#
+# Copyright (C) 2010 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+OSKOS :=linux
+FILES_PREFIX=
+MALI_FILE_PREFIX := drivers/media/video/samsung/mali
+KBUILDROOT =
+
+ifeq ($(CONFIG_MALI_DED_ONLY),y)
+USING_OS_MEMORY=0
+USING_MMU=0
+USING_DED=1
+endif
+
+ifeq ($(CONFIG_MALI_DED_MMU),y)
+USING_OS_MEMORY=0
+USING_MMU=1
+USING_DED=1
+endif
+
+ifeq ($(CONFIG_MALI_OSMEM_ONLY),y)
+USING_MMU=1
+USING_DED=0
+USING_OS_MEMORY=1
+endif
+
+ifeq ($(CONFIG_MALI_DED_OSMEM),y)
+USING_MMU=1
+USING_DED=1
+USING_OS_MEMORY=1
+endif
+
+ifeq ($(CONFIG_PM),y)
+USING_PMM=1
+endif
+
+ifeq ($(CONFIG_PM_RUNTIME),y)
+USING_MALI_RUN_TIME_PM=1
+endif
+
+ifeq ($(CONFIG_VIDEO_MALI400MP_DVFS),y)
+USING_GPU_UTILIZATION=1
+USING_MALI_DVFS_ENABLED=1
+endif
+
+ifeq ($(CONFIG_VIDEO_MALI400MP_DEBUG),y)
+BUILD=debug
+endif
+
+ifeq ($(CONFIG_VIDEO_MALI400MP_STREAMLINE_PROFILING),y)
+USING_PROFILING=1
+USING_TRACEPOINTS=1
+endif
+
+# set up defaults if not defined by the user
+PANIC_ON_WATCHDOG_TIMEOUT ?= 1
+USING_MALI400 ?= 1
+USING_MMU ?= 1
+USING_DED ?= 0
+USING_UMP ?= 0
+ONLY_ZBT ?= 0
+USING_ZBT ?= 0
+USING_OS_MEMORY ?= 1
+USING_PMM ?= 0
+USING_MALI_RUN_TIME_PM ?= 0
+USING_MALI_PMM_TESTSUITE ?= 0
+USING_MALI_PMU ?= 0
+USING_GPU_UTILIZATION ?= 0
+OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 6
+USING_PROFILING ?= 0
+USING_TRACEPOINTS ?= 0
+USING_MALI_MAJOR_PREDEFINE = 1
+USING_MALI_DVFS_ENABLED ?= 0
+TIMESTAMP ?= default
+BUILD ?= release
+USING_MALI_PMM_EARLYSUSPEND ?= 0
+#USING_KERNEL_WITH_DMA_ALLOC_PHYS_PAGE ?= 0
+CONFIG_MALI_MEM_SIZE ?= 64
+
+#config validtion check
+ifeq ($(USING_OS_MEMORY),1)
+ USING_MMU = 1
+endif
+# Check if a Mali Core sub module should be enabled, true or false returned
+#submodule_enabled = $(shell gcc $(DEFINES) -E $(MALI_FILE_PREFIX)/arch/config.h | grep type | grep -c $(2))
+
+# Inside the kernel build system
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Set up our defines, which will be passed to gcc
+DEFINES += -DUSING_ZBT=$(USING_ZBT)
+DEFINES += -DUSING_OS_MEMORY=$(USING_OS_MEMORY)
+DEFINES += -DUSING_MMU=$(USING_MMU)
+DEFINES += -DUSING_DED=$(USING_DED)
+DEFINES += -DUSING_UMP=$(USING_UMP)
+DEFINES += -DONLY_ZBT=$(ONLY_ZBT)
+DEFINES += -D_MALI_OSK_SPECIFIC_INDIRECT_MMAP
+DEFINES += -DUSING_MALI_PMU=$(USING_MALI_PMU)
+DEFINES += -DMALI_PMM_RUNTIME_JOB_CONTROL_ON=$(USING_MALI_RUN_TIME_PM)
+DEFINES += -DUSING_MALI_PMM=$(USING_PMM)
+DEFINES += -DMALI_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
+DEFINES += -DCONFIG_MALI_MEM_SIZE=$(CONFIG_MALI_MEM_SIZE)
+DEFINES += -D_MALI_OSK_SPECIFIC_INDIRECT_MMAP
+DEFINES += -DMALI_TIMELINE_PROFILING_ENABLED=$(USING_PROFILING)
+DEFINES += -DMALI_POWER_MGMT_TEST_SUITE=$(USING_MALI_PMM_TESTSUITE)
+DEFINES += -DMALI_MAJOR_PREDEFINE=$(USING_MALI_MAJOR_PREDEFINE)
+DEFINES += -DMALI_DVFS_ENABLED=$(USING_MALI_DVFS_ENABLED)
+DEFINES += -DUSING_MALI_PMM_EARLYSUSPEND=$(USING_MALI_PMM_EARLYSUSPEND)
+DEFINES += -DMALI_STATE_TRACKING=1
+DEFINES += -DMALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB=$(OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB)
+DEFINES += -DMALI_TRACEPOINTS_ENABLED=$(USING_TRACEPOINTS)
+DEFINES += -DMALI_REBOOTNOTIFIER
+
+ifeq ($(BUILD),debug)
+DEFINES += -DDEBUG
+endif
+
+# Linux has its own mmap cleanup handlers (see mali_kernel_mem_mmu.o)
+DEFINES += -DMALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+
+# UMP
+ifeq ($(CONFIG_VIDEO_UMP),y)
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=1 -DHAVE_UNLOCKED_IOCTL
+ EXTRA_CFLAGS += -I$(MALI_FILE_PREFIX)/../ump/include
+else
+ DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER=0
+endif
+
+# Target build file
+obj-$(CONFIG_VIDEO_MALI400MP) += mali.o
+
+# Use our defines when compiling
+# MALI
+INCLUDES = \
+ -I$(MALI_FILE_PREFIX)\
+ -I$(MALI_FILE_PREFIX)/platform\
+ -I$(MALI_FILE_PREFIX)/common \
+ -I$(MALI_FILE_PREFIX)/linux
+
+EXTRA_CFLAGS += $(INCLUDES)\
+ $(DEFINES)
+
+
+EXTRA_CFLAGS += -I$(MALI_FILE_PREFIX)/linux/license/gpl
+EXTRA_CFLAGS += -I$(MALI_FILE_PREFIX)/common/pmm
+
+# Source files which always are included in a build
+ifeq ($(CONFIG_VIDEO_UMP),y)
+OSKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_irq.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_low_level_mem.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_mali.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_notification.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_time.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_timers.o
+else
+OSKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_atomics.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_irq.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_locks.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_low_level_mem.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_math.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_memory.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_misc.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_mali.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_notification.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_time.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_timers.o
+endif #($(CONFIG_VIDEO_UMP),y)
+
+ifeq ($(CONFIG_CPU_EXYNOS4210),y)
+ MALI_PLATFORM_FILE = platform/orion-m400/mali_platform.o
+else
+ MALI_PLATFORM_FILE = platform/pegasus-m400/mali_platform.o
+endif #($(CONFIG_CPU_EXYNOS4210),y)
+
+UKKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_mem.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_gp.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_pp.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_core.o \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_vsync.o
+
+mali-y := \
+ $(KBUILDROOT)common/mali_kernel_core.o \
+ $(KBUILDROOT)linux/mali_kernel_linux.o \
+ $(KBUILDROOT)linux/mali_osk_indir_mmap.o \
+ $(KBUILDROOT)common/mali_kernel_rendercore.o \
+ $(KBUILDROOT)common/mali_kernel_descriptor_mapping.o \
+ $(KBUILDROOT)common/mali_kernel_vsync.o \
+ $(KBUILDROOT)linux/mali_kernel_sysfs.o \
+ $(KBUILDROOT)$(MALI_PLATFORM_FILE) \
+ $(KBUILDROOT)$(OSKFILES) \
+ $(KBUILDROOT)$(UKKFILES)
+ #__malidrv_build_info.o
+
+ifeq ($(USING_PROFILING),1)
+EXTRA_CFLAGS += -I$(MALI_FILE_PREFIX)/timestamp-default
+EXTRA_CFLAGS += -I$(MALI_FILE_PREFIX)/profiling/include
+mali-y += \
+ common/mali_kernel_profiling.o \
+ timestamp-$(TIMESTAMP)/mali_timestamp.o \
+ linux/mali_ukk_profiling.o
+
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)/timestamp-$(TIMESTAMP)
+endif
+
+ifeq ($(USING_TRACEPOINTS),1)
+mali-y += \
+ linux/mali_osk_profiling.o
+endif
+
+# Selecting files to compile by parsing the config file
+
+ifeq ($(USING_PMM),1)
+mali-y += \
+ common/pmm/mali_pmm.o \
+ common/pmm/mali_pmm_policy.o \
+ common/pmm/mali_pmm_policy_alwayson.o \
+ common/pmm/mali_pmm_policy_jobcontrol.o \
+ common/pmm/mali_pmm_state.o \
+ linux/mali_kernel_pm.o \
+ linux/mali_osk_pm.o \
+ linux/mali_device_pause_resume.o
+endif
+ifeq ($(USING_MALI_PMU),1)
+mali-y += \
+ common/pmm/mali_pmm_pmu.o
+endif
+
+ifeq ($(USING_GPU_UTILIZATION),1)
+mali-y += \
+ common/mali_kernel_utilization.o
+endif
+
+# Mali-400 PP in use
+EXTRA_CFLAGS += -DUSING_MALI400
+mali-y += common/mali_kernel_MALI200.o
+
+# Mali-400 GP in use
+mali-y += common/mali_kernel_GP2.o
+
+# Mali MMU in use
+mali-y += \
+ common/mali_kernel_mem_mmu.o \
+ common/mali_kernel_memory_engine.o \
+ common/mali_block_allocator.o \
+ common/mali_kernel_mem_os.o
+
+# Mali Level2 cache in use
+EXTRA_CFLAGS += -DUSING_MALI400_L2_CACHE
+mali-y += common/mali_kernel_l2_cache.o
+
+ifeq ($(USING_MALI_DVFS_ENABLED),1)
+ifeq ($(CONFIG_CPU_EXYNOS4210),y)
+mali-y += \
+ platform/orion-m400/mali_platform_dvfs.o
+else
+mali-y += \
+ platform/pegasus-m400/mali_platform_dvfs.o
+endif #($(CONFIG_CPU_EXYNOS4210),y)
+endif #($(USING_MALI_DVFS_ENABLED),1)
+
+ifeq ($(PANIC_ON_WATCHDOG_TIMEOUT),1)
+ EXTRA_CFLAGS += -DUSING_KERNEL_PANIC
+endif
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qv exported && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
diff --git a/drivers/media/video/samsung/mali/Makefile.common b/drivers/media/video/samsung/mali/Makefile.common
new file mode 100644
index 0000000..53d4e5b
--- /dev/null
+++ b/drivers/media/video/samsung/mali/Makefile.common
@@ -0,0 +1,59 @@
+#
+# Copyright (C) 2010 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# Check if a Mali Core sub module should be enabled, true or false returned
+submodule_enabled = $(shell gcc $(DEFINES) -E $1/arch/config.h | grep type | grep -c $(2))
+
+OSKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_atomics.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_irq.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_locks.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_low_level_mem.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_math.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_memory.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_misc.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_mali.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_notification.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_time.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_osk_timers.c
+
+UKKFILES=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_mem.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_gp.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_pp.c \
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_core.c
+
+ifeq ($(USING_PROFILING),1)
+UKKFILES+=\
+ $(FILES_PREFIX)$(OSKOS)/mali_ukk_profiling.c
+endif
+
+ifeq ($(MALI_PLATFORM_FILE),)
+MALI_PLATFORM_FILE=platform/default/mali_platform.c
+endif
+
+# Get subversion revision number, fall back to only ${MALI_RELEASE_NAME} if no svn info is available
+SVN_REV := $(shell (cd $(DRIVER_DIR); (svnversion | grep -qv exported && svnversion) || git svn info | grep '^Revision: '| sed -e 's/^Revision: //' ) 2>/dev/null )
+ifeq ($(SVN_REV),)
+SVN_REV := $(MALI_RELEASE_NAME)
+else
+SVN_REV := $(MALI_RELEASE_NAME)-r$(SVN_REV)
+endif
+
+# Common version-string, will be extended by OS-specifc sections
+VERSION_STRINGS :=
+VERSION_STRINGS += CONFIG=$(CONFIG)
+VERSION_STRINGS += USING_OS_MEMORY=$(USING_OS_MEMORY)
+VERSION_STRINGS += API_VERSION=$(shell cd $(DRIVER_DIR); grep "\#define _MALI_API_VERSION" $(FILES_PREFIX)common\/mali_uk_types.h | cut -d' ' -f 3 )
+VERSION_STRINGS += REPO_URL=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'URL: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^URL: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += REVISION=$(SVN_REV)
+VERSION_STRINGS += CHANGED_REVISION=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Rev: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Rev: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += CHANGE_DATE=$(shell cd $(DRIVER_DIR); (svn info || git svn info || echo 'Last Changed Date: $(MALI_RELEASE_NAME)') 2>/dev/null | grep '^Last Changed Date: ' | cut -d: -f2- | cut -b2-)
+VERSION_STRINGS += BUILD_DATE=$(shell date)
diff --git a/drivers/media/video/samsung/mali/arch b/drivers/media/video/samsung/mali/arch
new file mode 120000
index 0000000..6154ca4
--- /dev/null
+++ b/drivers/media/video/samsung/mali/arch
@@ -0,0 +1 @@
+arch-orion-m400 \ No newline at end of file
diff --git a/drivers/media/video/samsung/mali/arch-orion-m400/config.h b/drivers/media/video/samsung/mali/arch-orion-m400/config.h
new file mode 100644
index 0000000..5c4d79d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/arch-orion-m400/config.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the EB platform with ZBT memory enabled */
+/*zepplin added 2010.08.17 for orion configuration*/
+#define MALI_BASE_ADDR 0x13000000
+#define GP_ADDR MALI_BASE_ADDR
+#define L2_ADDR MALI_BASE_ADDR+0x1000
+#define PMU_ADDR MALI_BASE_ADDR+0x2000
+#define GP_MMU_ADDR MALI_BASE_ADDR+0x3000
+#define PP0_MMU_ADDR MALI_BASE_ADDR+0x4000
+#define PP1_MMU_ADDR MALI_BASE_ADDR+0x5000
+#define PP2_MMU_ADDR MALI_BASE_ADDR+0x6000
+#define PP3_MMU_ADDR MALI_BASE_ADDR+0x7000
+#define PP0_ADDR MALI_BASE_ADDR+0x8000
+#define PP1_ADDR MALI_BASE_ADDR+0xA000
+#define PP2_ADDR MALI_BASE_ADDR+0xC000
+#define PP3_ADDR MALI_BASE_ADDR+0xE000
+
+/*for mmu and os memory*/
+#define MEM_BASE_ADDR 0x40000000
+#define MEM_TOTAL_SIZE 0x40000000
+#define MEM_MALI_OS_SIZE 0x40000000
+
+/*for dedicated memory*/
+//#define MEM_MALI_BASE 0x58000000
+//#define MEM_MALI_SIZE 0x08000000
+#define MEM_MALI_SIZE CONFIG_MALI_MEM_SIZE*1024*1024
+#define MEM_MALI_BASE 0x80000000 - MEM_MALI_SIZE
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = GP_ADDR,
+ .irq = IRQ_GP_3D,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = PP0_ADDR,
+ .irq = IRQ_PP0_3D,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MALI400PP,
+ .base = PP1_ADDR,
+ .irq = IRQ_PP1_3D,
+ .description = "Mali-400 PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MALI400PP,
+ .base = PP2_ADDR,
+ .irq = IRQ_PP2_3D,
+ .description = "Mali-400 PP 2",
+ .mmu_id = 4
+ },
+ {
+ .type = MALI400PP,
+ .base = PP3_ADDR,
+ .irq = IRQ_PP3_3D,
+ .description = "Mali-400 PP 3",
+ .mmu_id = 5
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = GP_MMU_ADDR,
+ .irq = IRQ_GPMMU_3D,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = PP0_MMU_ADDR,
+ .irq = IRQ_PPMMU0_3D,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = MMU,
+ .base = PP1_MMU_ADDR,
+ .irq = IRQ_PPMMU1_3D,
+ .description = "Mali-400 MMU for PP 1",
+ .mmu_id = 3
+ },
+ {
+ .type = MMU,
+ .base = PP2_MMU_ADDR,
+ .irq = IRQ_PPMMU2_3D,
+ .description = "Mali-400 MMU for PP 2",
+ .mmu_id = 4
+ },
+ {
+ .type = MMU,
+ .base = PP3_MMU_ADDR,
+ .irq = IRQ_PPMMU3_3D,
+ .description = "Mali-400 MMU for PP 3",
+ .mmu_id = 5
+ },
+#if USING_OS_MEMORY
+ {
+ .type = OS_MEMORY,
+ .description = "System Memory",
+ .size = MEM_MALI_OS_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE | _MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+#endif
+#if USING_DED /* Dedicated Memory */
+ {
+ .type = MEMORY,
+ .description = "Dedicated Memory",
+ .base = MEM_MALI_BASE,
+ .size = MEM_MALI_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE | _MALI_GP_READABLE | _MALI_GP_WRITEABLE | _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE
+ },
+#endif/* if USING_OS_MEMORY*/
+ {
+ .type = MEM_VALIDATION,
+ .description = "memory validation",
+ .base = MEM_BASE_ADDR,
+ .size = MEM_TOTAL_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE | _MALI_GP_READABLE | _MALI_GP_WRITEABLE | _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE
+ },
+#else /* Not using MMU */
+ {
+ .type = MEMORY,
+ .description = "Dedicated Memory",
+ .base = MEM_MALI_BASE,
+ .size = MEM_MALI_SIZE,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE | _MALI_GP_READABLE | _MALI_GP_WRITEABLE | _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE
+ },
+#endif
+ {
+ .type = MALI400L2,
+ .base = L2_ADDR,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_block_allocator.c b/drivers/media/video/samsung/mali/common/mali_block_allocator.c
new file mode 100644
index 0000000..5f421f0
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_block_allocator.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_osk.h"
+
+#define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+/* The structure used as the handle produced by block_allocator_allocate,
+ * and removed by block_allocator_release */
+typedef struct block_allocator_allocation
+{
+ /* The list will be released in reverse order */
+ block_info *last_allocated;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 start_offset;
+ u32 mapping_length;
+} block_allocator_allocation;
+
+
+typedef struct block_allocator
+{
+ _mali_osk_lock_t *mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 cpu_usage_adjust;
+ u32 num_blocks;
+} block_allocator;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block);
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static void block_allocator_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block );
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ block_allocator * info;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(block_allocator));
+ if (NULL != info)
+ {
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, 105);
+ if (NULL != info->mutex)
+ {
+ info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+ if (NULL != info->all_blocks)
+ {
+ u32 i;
+ info->first_free = NULL;
+ info->num_blocks = num_blocks;
+
+ info->base = base_address;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ for ( i = 0; i < num_blocks; i++)
+ {
+ info->all_blocks[i].next = info->first_free;
+ info->first_free = &info->all_blocks[i];
+ }
+
+ allocator->allocate = block_allocator_allocate;
+ allocator->allocate_page_table_block = block_allocator_allocate_page_table_block;
+ allocator->destroy = block_allocator_destroy;
+ allocator->stat = block_allocator_stat;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_lock_term(info->mutex);
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (block_allocator*)allocator->ctx;
+
+ _mali_osk_free(info->all_blocks);
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block)
+{
+ return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ block_allocator * info;
+ u32 left;
+ block_info * last_allocated = NULL;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ block_allocator_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (block_allocator*)ctx;
+ left = descriptor->size - *offset;
+ MALI_DEBUG_ASSERT(0 != left);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return result;
+ }
+
+ ret_allocation->start_offset = *offset;
+ ret_allocation->mapping_length = 0;
+
+ while ((left > 0) && (info->first_free))
+ {
+ block_info * block;
+ u32 phys_addr;
+ u32 padding;
+ u32 current_mapping_size;
+
+ block = info->first_free;
+ info->first_free = info->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+
+ phys_addr = get_phys(info, block);
+
+ padding = *offset & (MALI_BLOCK_SIZE-1);
+
+ if (MALI_BLOCK_SIZE - padding < left)
+ {
+ current_mapping_size = MALI_BLOCK_SIZE - padding;
+ }
+ else
+ {
+ current_mapping_size = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ /* This relinks every block we've just allocated back into the free-list */
+ block = last_allocated->next;
+ last_allocated->next = info->first_free;
+ info->first_free = last_allocated;
+ last_allocated = block;
+ }
+
+ break;
+ }
+
+ *offset += current_mapping_size;
+ left -= current_mapping_size;
+ ret_allocation->mapping_length += current_mapping_size;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ if (last_allocated)
+ {
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Record all the information about this allocation */
+ ret_allocation->last_allocated = last_allocated;
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+
+ alloc_info->ctx = info;
+ alloc_info->handle = ret_allocation;
+ alloc_info->release = block_allocator_release;
+ }
+ else
+ {
+ /* Free the allocation information - nothing to be passed back */
+ _mali_osk_free( ret_allocation );
+ }
+
+ return result;
+}
+
+static void block_allocator_release(void * ctx, void * handle)
+{
+ block_allocator * info;
+ block_info * block, * next;
+ block_allocator_allocation *allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (block_allocator*)ctx;
+ allocation = (block_allocator_allocation*)handle;
+ block = allocation->last_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* unmap */
+ mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ while (block)
+ {
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ next = block->next;
+
+ /* relink into free-list */
+ block->next = info->first_free;
+ info->first_free = block;
+
+ /* advance the loop */
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free( allocation );
+}
+
+
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ block_allocator * info;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(block);
+ info = (block_allocator*)ctx;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if (NULL != info->first_free)
+ {
+ void * virt;
+ u32 phys;
+ u32 size;
+ block_info * alloc;
+ alloc = info->first_free;
+
+ phys = get_phys(info, alloc); /* Does not modify info or alloc */
+ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
+ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );
+
+ /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
+ * because it's unlikely another allocator will be able to map in. */
+
+ if ( NULL != virt )
+ {
+ block->ctx = info; /* same as incoming ctx */
+ block->handle = alloc;
+ block->phys_base = phys;
+ block->size = size;
+ block->release = block_allocator_release_page_table_block;
+ block->mapping = virt;
+
+ info->first_free = alloc->next;
+
+ alloc->next = NULL; /* Could potentially link many blocks together instead */
+
+ result = MALI_MEM_ALLOC_FINISHED;
+ }
+ }
+ else result = MALI_MEM_ALLOC_NONE;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block )
+{
+ block_allocator * info;
+ block_info * block, * next;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (block_allocator*)page_table_block->ctx;
+ block = (block_info*)page_table_block->handle;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* Unmap all the physical memory at once */
+ _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping );
+
+ /** @note This loop handles the case where more than one block_info was linked.
+ * Probably unnecssary for page table block releasing. */
+ while (block)
+ {
+ next = block->next;
+
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ block->next = info->first_free;
+ info->first_free = block;
+
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
+
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ block_info *block;
+ u32 free_blocks = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+
+ info = (block_allocator*)allocator->ctx;
+ block = info->first_free;
+
+ while(block)
+ {
+ free_blocks++;
+ block = block->next;
+ }
+ return (info->num_blocks - free_blocks) * MALI_BLOCK_SIZE;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_block_allocator.h b/drivers/media/video/samsung/mali/common/mali_block_allocator.h
new file mode 100644
index 0000000..d3f0f9b
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_block_allocator.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_kernel_memory_engine.h"
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_GP2.c b/drivers/media/video/samsung/mali/common/mali_kernel_GP2.c
new file mode 100644
index 0000000..cfd70f4
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_GP2.c
@@ -0,0 +1,1493 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+//added for SPI
+#include <linux/kernel.h>
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_subsystem.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#if defined(USING_MALI400_L2_CACHE)
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h" /* Needed for mali_kernel_mmu_force_bus_reset() */
+#endif
+
+#if defined(USING_MALI200)
+#define MALI_GP_SUBSYSTEM_NAME "MaliGP2"
+#define MALI_GP_CORE_TYPE _MALI_GP2
+#elif defined(USING_MALI400)
+#define MALI_GP_SUBSYSTEM_NAME "Mali-400 GP"
+#define MALI_GP_CORE_TYPE _MALI_400_GP
+#else
+#error "No supported mali core defined"
+#endif
+
+#define GET_JOB_EMBEDDED_PTR(job) (&((job)->embedded_core_job))
+#define GET_JOBGP2_PTR(job_extern) _MALI_OSK_CONTAINER_OF(job_extern, maligp_job, embedded_core_job)
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_gp_id = -1;
+
+static mali_core_renderunit * last_gp_core_cookie = NULL;
+
+/* Describing a maligp job settings */
+typedef struct maligp_job
+{
+ /* The general job struct common for all mali cores */
+ mali_core_job embedded_core_job;
+ _mali_uk_gp_start_job_s user_input;
+
+ u32 irq_status;
+ u32 status_reg_on_stop;
+ u32 perf_counter0;
+ u32 perf_counter1;
+ u32 vscl_stop_addr;
+ u32 plbcl_stop_addr;
+ u32 heap_current_addr;
+
+ /* The data we will return back to the user */
+ _mali_osk_notification_t *notification_obj;
+
+ int is_stalled_waiting_for_more_memory;
+
+ u32 active_mask;
+ /* progress checking */
+ u32 last_vscl;
+ u32 last_plbcl;
+ /* extended progress checking, only enabled when we can use one of the performance counters */
+ u32 have_extended_progress_checking;
+ u32 vertices;
+
+#if defined(USING_MALI400_L2_CACHE)
+ u32 perf_counter_l2_src0;
+ u32 perf_counter_l2_src1;
+ u32 perf_counter_l2_val0;
+ u32 perf_counter_l2_val1;
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 pid;
+ u32 tid;
+#endif
+} maligp_job;
+
+/*Functions Exposed to the General External System through
+ function pointers.*/
+
+static _mali_osk_errcode_t maligp_subsystem_startup(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static _mali_osk_errcode_t maligp_subsystem_mmu_connect(mali_kernel_subsystem_identifier id);
+#endif
+static void maligp_subsystem_terminate(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t maligp_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+static void maligp_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+static _mali_osk_errcode_t maligp_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t maligp_renderunit_create(_mali_osk_resource_t * resource);
+#if USING_MMU
+static void maligp_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+#if MALI_STATE_TRACKING
+u32 maligp_subsystem_dump_state(char *buf, u32 size);
+#endif
+
+/* Internal support functions */
+static _mali_osk_errcode_t maligp_core_version_legal( mali_core_renderunit *core );
+static void maligp_raw_reset( mali_core_renderunit *core);
+static void maligp_reset_hard(struct mali_core_renderunit * core);
+static void maligp_reset(mali_core_renderunit *core);
+static void maligp_initialize_registers_mgmt(mali_core_renderunit *core );
+
+#ifdef DEBUG
+static void maligp_print_regs(int debug_level, mali_core_renderunit *core);
+#endif
+
+/* Functions exposed to mali_core system through functionpointers
+ in the subsystem struct. */
+static _mali_osk_errcode_t subsystem_maligp_start_job(mali_core_job * job, mali_core_renderunit * core);
+static u32 subsystem_maligp_irq_handler_upper_half(mali_core_renderunit * core);
+static int subsystem_maligp_irq_handler_bottom_half(mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_maligp_get_new_job_from_user(struct mali_core_session * session, void * argument);
+static _mali_osk_errcode_t subsystem_maligp_suspend_response(struct mali_core_session * session, void * argument);
+static void subsystem_maligp_return_job_to_user(mali_core_job * job, mali_subsystem_job_end_code end_status);
+static void subsystem_maligp_renderunit_delete(mali_core_renderunit * core);
+static void subsystem_maligp_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style );
+static void subsystem_maligp_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_maligp_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core);
+static void subsystem_maligp_renderunit_stop_bus(struct mali_core_renderunit* core);
+
+/* Variables */
+static register_address_and_value default_mgmt_regs[] =
+{
+ { MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED }
+};
+
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+
+struct mali_kernel_subsystem mali_subsystem_gp2=
+{
+ maligp_subsystem_startup, /* startup */
+ NULL, /*maligp_subsystem_terminate,*/ /* shutdown */
+#if USING_MMU
+ maligp_subsystem_mmu_connect, /* load_complete */
+#else
+ NULL,
+#endif
+ maligp_subsystem_core_system_info_fill, /* system_info_fill */
+ maligp_subsystem_session_begin, /* session_begin */
+ maligp_subsystem_session_end, /* session_end */
+#if USING_MMU
+ maligp_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ maligp_subsystem_dump_state, /* dump_state */
+#endif
+} ;
+
+static mali_core_subsystem subsystem_maligp ;
+
+static _mali_osk_errcode_t maligp_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem * subsystem;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_startup\n") ) ;
+
+ mali_subsystem_gp_id = id;
+
+ /* All values get 0 as default */
+ _mali_osk_memset(&subsystem_maligp, 0, sizeof(*subsystem));
+
+ subsystem = &subsystem_maligp;
+ subsystem->start_job = &subsystem_maligp_start_job;
+ subsystem->irq_handler_upper_half = &subsystem_maligp_irq_handler_upper_half;
+ subsystem->irq_handler_bottom_half = &subsystem_maligp_irq_handler_bottom_half;
+ subsystem->get_new_job_from_user = &subsystem_maligp_get_new_job_from_user;
+ subsystem->suspend_response = &subsystem_maligp_suspend_response;
+ subsystem->return_job_to_user = &subsystem_maligp_return_job_to_user;
+ subsystem->renderunit_delete = &subsystem_maligp_renderunit_delete;
+ subsystem->reset_core = &subsystem_maligp_renderunit_reset_core;
+ subsystem->stop_bus = &subsystem_maligp_renderunit_stop_bus;
+ subsystem->probe_core_irq_trigger = &subsystem_maligp_renderunit_probe_core_irq_trigger;
+ subsystem->probe_core_irq_acknowledge = &subsystem_maligp_renderunit_probe_core_irq_finished;
+
+ /* Setting variables in the general core part of the subsystem.*/
+ subsystem->name = MALI_GP_SUBSYSTEM_NAME;
+ subsystem->core_type = MALI_GP_CORE_TYPE;
+ subsystem->id = id;
+
+ /* Initiates the rest of the general core part of the subsystem */
+ MALI_CHECK_NO_ERROR(mali_core_subsystem_init( subsystem ));
+
+ /* This will register the function for adding MALIGP2 cores to the subsystem */
+#if defined(USING_MALI200)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALIGP2, maligp_renderunit_create));
+#endif
+#if defined(USING_MALI400)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI400GP, maligp_renderunit_create));
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_startup\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+static _mali_osk_errcode_t maligp_subsystem_mmu_connect(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem_attach_mmu(&subsystem_maligp);
+ MALI_SUCCESS; /* OK */
+}
+#endif
+
+static void maligp_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_terminate\n") ) ;
+ mali_core_subsystem_cleanup(&subsystem_maligp);
+}
+
+static _mali_osk_errcode_t maligp_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_session_begin\n") ) ;
+ MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(*session) ), _MALI_OSK_ERR_FAULT);
+
+ _mali_osk_memset(session, 0, sizeof(*session) );
+ *slot = (mali_kernel_subsystem_session_slot)session;
+
+ session->subsystem = &subsystem_maligp;
+
+ session->notification_queue = queue;
+
+#if USING_MMU
+ session->mmu_session = mali_session_data;
+#endif
+
+ mali_core_session_begin(session);
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_session_begin\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+static void maligp_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ mali_core_session * session;
+ /** @note mali_session_data not needed here */
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_subsystem_session_end\n") ) ;
+ if ( NULL==slot || NULL==*slot)
+ {
+ MALI_PRINT_ERROR(("Input slot==NULL"));
+ return;
+ }
+ session = (mali_core_session *)*slot;
+ mali_core_session_close(session);
+
+ _mali_osk_free(session);
+ *slot = NULL;
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_subsystem_session_end\n") ) ;
+}
+
+/**
+ * We fill in info about all the cores we have
+ * @param info Pointer to system info struct to update
+ * @return _MALI_OSK_ERR_OK on success, or another _mali_osk_errcode_t for errors.
+ */
+static _mali_osk_errcode_t maligp_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ return mali_core_subsystem_system_info_fill(&subsystem_maligp, info);
+}
+
+static _mali_osk_errcode_t maligp_renderunit_create(_mali_osk_resource_t * resource)
+{
+ mali_core_renderunit *core;
+ int err;
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: maligp_renderunit_create\n") ) ;
+ /* Checking that the resource settings are correct */
+#if defined(USING_MALI200)
+ if(MALIGP2 != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_GP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#elif defined(USING_MALI400)
+ if(MALI400GP != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_GP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+ if ( 0 != resource->size )
+ {
+ MALI_PRINT_ERROR(("Memory size set to " MALI_GP_SUBSYSTEM_NAME " core should be zero."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if ( NULL == resource->description )
+ {
+ MALI_PRINT_ERROR(("A " MALI_GP_SUBSYSTEM_NAME " core needs a unique description field"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Create a new core object */
+ core = (mali_core_renderunit*) _mali_osk_malloc(sizeof(*core));
+ if ( NULL == core )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Variables set to be able to open and register the core */
+ core->subsystem = &subsystem_maligp ;
+ core->registers_base_addr = resource->base ;
+ core->size = MALIGP2_REGISTER_ADDRESS_SPACE_SIZE ;
+ core->description = resource->description;
+ core->irq_nr = resource->irq ;
+#if USING_MMU
+ core->mmu_id = resource->mmu_id;
+ core->mmu = NULL;
+#endif
+#if USING_MALI_PMM
+ /* Set up core's PMM id */
+ core->pmm_id = MALI_PMM_CORE_GP;
+#endif
+
+ err = mali_core_renderunit_init( core );
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to initialize renderunit\n"));
+ goto exit_on_error0;
+ }
+
+ /* Map the new core object, setting: core->registers_mapped */
+ err = mali_core_renderunit_map_registers(core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error1;
+
+ /* Check that the register mapping of the core works.
+ Return 0 if maligp core is present and accessible. */
+ if (mali_benchmark) {
+ core->core_version = MALI_GP_PRODUCT_ID << 16;
+ } else {
+ core->core_version = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VERSION);
+ }
+
+ err = maligp_core_version_legal(core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error2;
+
+ /* Reset the core. Put the core into a state where it can start to render. */
+ maligp_reset(core);
+
+ /* Registering IRQ, init the work_queue_irq_handle */
+ /* Adding this core as an available renderunit in the subsystem. */
+ err = mali_core_subsystem_register_renderunit(&subsystem_maligp, core);
+ if (_MALI_OSK_ERR_OK != err) goto exit_on_error2;
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Initial Register settings:\n"));
+ maligp_print_regs(4, core);
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_renderunit_create\n") ) ;
+
+ MALI_SUCCESS;
+
+exit_on_error2:
+ mali_core_renderunit_unmap_registers(core);
+exit_on_error1:
+ mali_core_renderunit_term(core);
+exit_on_error0:
+ _mali_osk_free( core ) ;
+ MALI_PRINT_ERROR(("Renderunit NOT created."));
+ MALI_ERROR((_mali_osk_errcode_t)err);
+}
+
+#if USING_MMU
+/* Used currently only for signalling when MMU has a pagefault */
+static void maligp_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ mali_core_subsystem_broadcast_notification(&subsystem_maligp, message, data);
+}
+#endif
+
+#ifdef DEBUG
+static void maligp_print_regs(int debug_level, mali_core_renderunit *core)
+{
+ if (debug_level <= mali_debug_level)
+ {
+ MALI_DEBUG_PRINT(1, (" VS 0x%08X 0x%08X, PLBU 0x%08X 0x%08X ALLOC 0x%08X 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+ MALI_DEBUG_PRINT(1, (" IntRaw 0x%08X IntMask 0x%08X, Status 0x%02X Ver: 0x%08X \n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_MASK),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VERSION)));
+
+ MALI_DEBUG_PRINT(1, (" PERF_CNT Enbl:%d %d Src: %02d %02d VAL: 0x%08X 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+
+ MALI_DEBUG_PRINT(1, (" VS_START 0x%08X PLBU_START 0x%08X AXI_ERR 0x%08X\n",
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ),
+ mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ),
+ mali_core_renderunit_register_read(core, MALIGP2_CONTR_AXI_BUS_ERROR_STAT)));
+ }
+}
+#endif
+
+static _mali_osk_errcode_t maligp_core_version_legal( mali_core_renderunit *core )
+{
+ u32 mali_type;
+
+ mali_type = core->core_version >> 16;
+
+#if defined(USING_MALI400)
+ if ( MALI400_GP_PRODUCT_ID != mali_type && MALI300_GP_PRODUCT_ID != mali_type )
+#else
+ if ( MALI_GP_PRODUCT_ID != mali_type )
+#endif
+ {
+ MALI_PRINT_ERROR(("Error: reading this from maligp version register: 0x%x\n", core->core_version));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali GP: core_version_legal: Reads correct mali version: %d\n", core->core_version )) ;
+ MALI_SUCCESS;
+}
+
+static void subsystem_maligp_renderunit_stop_bus(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+static void maligp_reset( mali_core_renderunit *core )
+{
+ if (!mali_benchmark) {
+ maligp_raw_reset(core);
+ maligp_initialize_registers_mgmt(core);
+ }
+}
+
+
+static void maligp_reset_hard( mali_core_renderunit *core )
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_invalid_value);
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_core_renderunit_register_read(core, reset_wait_target_register))
+ {
+ MALI_DEBUG_PRINT(5, ("Reset loop exiting after %d iterations\n", i));
+ break;
+ }
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_DEBUG_PRINT(1, ("The reset loop didn't work\n"));
+ }
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+
+
+}
+
+static void maligp_raw_reset( mali_core_renderunit *core )
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: maligp_raw_reset: %s\n", core->description)) ;
+ if (mali_benchmark) return;
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+
+#if defined(USING_MALI200)
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, request_loop_count == i, ("Mali GP: Bus was never stopped during core reset\n"));
+
+ if (request_loop_count==i)
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ MALI_DEBUG_PRINT(1, ("Mali GP: Forcing MMU bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow GP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+
+ /* the bus was stopped OK, complete the reset */
+ /* use the hard reset routine to do the actual reset */
+ maligp_reset_hard(core);
+
+#elif defined(USING_MALI400)
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & /*Bitwise OR*/
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if ( request_loop_count==i )
+ {
+#if USING_MMU
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ MALI_DEBUG_PRINT(1, ("Mali GP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow GP to stop its bus, system failure, unable to recover\n"));
+ }
+ else
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ }
+
+#else
+#error "no supported mali core defined"
+#endif
+}
+
+/* Sets the registers on maligp according to the const default_mgmt_regs array. */
+static void maligp_initialize_registers_mgmt(mali_core_renderunit *core )
+{
+ int i;
+
+ MALI_DEBUG_PRINT(6, ("Mali GP: maligp_initialize_registers_mgmt: %s\n", core->description)) ;
+ for(i=0 ; i< (sizeof(default_mgmt_regs)/sizeof(*default_mgmt_regs)) ; ++i)
+ {
+ mali_core_renderunit_register_write_relaxed(core, default_mgmt_regs[i].address, default_mgmt_regs[i].value);
+ }
+ _mali_osk_write_mem_barrier();
+}
+
+
+/* Start this job on this core. Return MALI_TRUE if the job was started. */
+static _mali_osk_errcode_t subsystem_maligp_start_job(mali_core_job * job, mali_core_renderunit * core)
+{
+ maligp_job *jobgp;
+ u32 startcmd;
+ /* The local extended version of the general structs */
+ jobgp = _MALI_OSK_CONTAINER_OF(job, maligp_job, embedded_core_job);
+
+ startcmd = 0;
+ if ( jobgp->user_input.frame_registers[0] != jobgp->user_input.frame_registers[1] )
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if ( jobgp->user_input.frame_registers[2] != jobgp->user_input.frame_registers[3] )
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ if(0 == startcmd)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job: 0x%08x WILL NOT START SINCE JOB HAS ILLEGAL ADDRESSES\n",
+ (u32)jobgp->user_input.user_job_ptr));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Registers Start\n"));
+ maligp_print_regs(4, core);
+#endif
+
+
+ mali_core_renderunit_register_write_array(
+ core,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR,
+ &(jobgp->user_input.frame_registers[0]),
+ sizeof(jobgp->user_input.frame_registers)/sizeof(jobgp->user_input.frame_registers[0]));
+
+#if MALI_TRACEPOINTS_ENABLED
+ jobgp->user_input.perf_counter_flag = 0;
+
+ if( counter_table[7] != 0xFFFFFFFF ) {
+ jobgp->user_input.perf_counter_flag |= _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE;
+ jobgp->user_input.perf_counter_src0 = counter_table[7];
+ }
+ if( counter_table[8] != 0xFFFFFFFF ) {
+ jobgp->user_input.perf_counter_flag |= _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE;
+ jobgp->user_input.perf_counter_src1 = counter_table[8];
+ }
+#endif
+
+ /* This selects which performance counters we are reading */
+ if ( 0 != jobgp->user_input.perf_counter_flag )
+ {
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC,
+ jobgp->user_input.perf_counter_src0);
+
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ jobgp->user_input.perf_counter_src1);
+
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if ( jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ int force_reset = ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET ) ? 1 : 0;
+ u32 src0 = 0;
+ u32 src1 = 0;
+
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE )
+ {
+ src0 = jobgp->user_input.perf_counter_l2_src0;
+ }
+ if ( jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE )
+ {
+ src1 = jobgp->user_input.perf_counter_l2_src1;
+ }
+
+ mali_kernel_l2_cache_set_perf_counters(src0, src1, force_reset); /* will activate and possibly reset counters */
+
+ /* Now, retrieve the current values, so we can substract them when the job has completed */
+ mali_kernel_l2_cache_get_perf_counters(&jobgp->perf_counter_l2_src0,
+ &jobgp->perf_counter_l2_val0,
+ &jobgp->perf_counter_l2_src1,
+ &jobgp->perf_counter_l2_val1);
+ }
+#endif
+ }
+
+ if ( 0 == (jobgp->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE))
+ {
+ /* extended progress checking can be enabled */
+
+ jobgp->have_extended_progress_checking = 1;
+
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED
+ );
+
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ subsystem_flush_mapped_mem_cache();
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: STARTING GP WITH CMD: 0x%x\n", startcmd));
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_started);
+#endif
+
+ /* This is the command that starts the Core */
+ mali_core_renderunit_register_write(core,
+ MALIGP2_REG_ADDR_MGMT_CMD,
+ startcmd);
+ _mali_osk_write_mem_barrier();
+
+ pr_debug("SPI_GPU_GP Start\n");
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ jobgp->user_input.frame_builder_id, jobgp->user_input.flush_id, 0, 0, 0);
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), jobgp->pid, jobgp->tid, 0, 0, 0);
+#endif
+
+ MALI_SUCCESS;
+}
+
+/* Check if given core has an interrupt pending. Return MALI_TRUE and set mask to 0 if pending */
+
+static u32 subsystem_maligp_irq_handler_upper_half(mali_core_renderunit * core)
+{
+ u32 irq_readout;
+
+ if (mali_benchmark) {
+ return (core->current_job ? 1 : 0); /* simulate irq is pending when a job is pending */
+ }
+
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+
+ MALI_DEBUG_PRINT(5, ("Mali GP: IRQ: %04x\n", irq_readout)) ;
+
+ if ( MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout )
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+#endif
+
+ /* We do need to handle this in a bottom half, return 1 */
+ return 1;
+ }
+ return 0;
+}
+
+/* This function should check if the interrupt indicates that job was finished.
+If so it should update the job-struct, reset the core registers, and return MALI_TRUE, .
+If the job is still working after this function it should return MALI_FALSE.
+The function must also enable the bits in the interrupt mask for the core.
+Called by the bottom half interrupt function. */
+static int subsystem_maligp_irq_handler_bottom_half(mali_core_renderunit* core)
+{
+ mali_core_job * job;
+ maligp_job * jobgp;
+ u32 irq_readout;
+ u32 core_status;
+ u32 vscl;
+ u32 plbcl;
+
+ job = core->current_job;
+
+ if (mali_benchmark) {
+ MALI_DEBUG_PRINT(3, ("MaliGP: Job: Benchmark\n") );
+ irq_readout = MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+ core_status = 0;
+ } else {
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+ core_status = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ }
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_ASSERT(CORE_IDLE==core->state);
+ if ( 0 != irq_readout )
+ {
+ MALI_PRINT_ERROR(("Interrupt from a core not running a job. IRQ: 0x%04x Status: 0x%04x", irq_readout, core_status));
+ }
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ MALI_DEBUG_ASSERT(CORE_IDLE!=core->state);
+
+ jobgp = GET_JOBGP2_PTR(job);
+
+ jobgp->heap_current_addr = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR);
+
+ vscl = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR);
+ plbcl = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Job: 0x%08x IRQ RECEIVED Rawstat: 0x%x Status: 0x%x\n",
+ (u32)jobgp->user_input.user_job_ptr, irq_readout , core_status )) ;
+
+ jobgp->irq_status |= irq_readout;
+ jobgp->status_reg_on_stop = core_status;
+
+ if ( 0 != jobgp->is_stalled_waiting_for_more_memory )
+ {
+ /* Readback the performance counters */
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ jobgp->perf_counter0 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ jobgp->perf_counter1 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+
+#if MALI_TRACEPOINTS_ENABLED
+ //TODO magic numbers should come from mali_linux_trace.h instead
+ _mali_profiling_add_counter(7, jobgp->perf_counter0);
+ _mali_profiling_add_counter(8, jobgp->perf_counter1);
+#endif
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (jobgp->perf_counter_l2_src0 == src0)
+ {
+ jobgp->perf_counter_l2_val0 = val0 - jobgp->perf_counter_l2_val0;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val0 = 0;
+ }
+
+ if (jobgp->perf_counter_l2_src1 == src1)
+ {
+ jobgp->perf_counter_l2_val1 = val1 - jobgp->perf_counter_l2_val1;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Mali GP: Job aborted - userspace would not provide more heap memory.\n"));
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_ended);
+#endif
+ return JOB_STATUS_END_OOM; /* Core is ready for more jobs.*/
+ }
+ /* finished ? */
+ else if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
+ {
+#ifdef DEBUG
+ MALI_DEBUG_PRINT(4, ("Mali GP: Registers On job end:\n"));
+ maligp_print_regs(4, core);
+#endif
+ MALI_DEBUG_PRINT_IF(5, irq_readout & 0x04, ("OOM when done, ignoring (reg.current = 0x%x, reg.end = 0x%x)\n",
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+
+
+ if (0 != jobgp->user_input.perf_counter_flag )
+ {
+ /* Readback the performance counters */
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+ jobgp->perf_counter0 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ jobgp->perf_counter1 = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+
+#if MALI_TRACEPOINTS_ENABLED
+ //TODO magic numbers should come from mali_linux_trace.h instead
+ _mali_profiling_add_counter(7, jobgp->perf_counter0);
+ _mali_profiling_add_counter(8, jobgp->perf_counter1);
+#endif
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (jobgp->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (jobgp->perf_counter_l2_src0 == src0)
+ {
+ jobgp->perf_counter_l2_val0 = val0 - jobgp->perf_counter_l2_val0;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val0 = 0;
+ }
+
+ if (jobgp->perf_counter_l2_src1 == src1)
+ {
+ jobgp->perf_counter_l2_val1 = val1 - jobgp->perf_counter_l2_val1;
+ }
+ else
+ {
+ jobgp->perf_counter_l2_val1 = 0;
+ }
+ }
+#endif
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number),
+ jobgp->perf_counter0, jobgp->perf_counter1,
+ jobgp->user_input.perf_counter_src0 | (jobgp->user_input.perf_counter_src1 << 8)
+#if defined(USING_MALI400_L2_CACHE)
+ | (jobgp->user_input.perf_counter_l2_src0 << 16) | (jobgp->user_input.perf_counter_l2_src1 << 24),
+ jobgp->perf_counter_l2_val0,
+ jobgp->perf_counter_l2_val1
+#else
+ ,0, 0
+#endif
+ );
+#endif
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_ended);
+#endif
+ pr_debug("SPI_GPU_GP Idle\n");
+ return JOB_STATUS_END_SUCCESS; /* core idle */
+ }
+ /* sw watchdog timeout handling or time to do hang checking ? */
+ else if (
+ (CORE_WATCHDOG_TIMEOUT == core->state) ||
+ (
+ (CORE_HANG_CHECK_TIMEOUT == core->state) &&
+ (
+ (jobgp->have_extended_progress_checking ? (mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE) == jobgp->vertices) : 1/*TRUE*/) &&
+ ((core_status & MALIGP2_REG_VAL_STATUS_VS_ACTIVE) ? (vscl == jobgp->last_vscl) : 1/*TRUE*/) &&
+ ((core_status & MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) ? (plbcl == jobgp->last_plbcl) : 1/*TRUE*/)
+ )
+ )
+ )
+ {
+ /* no progress detected, killed by the watchdog */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_PRINT( ("Mali GP: SW-Timeout.\n"));
+ if (core_status & MALIGP2_REG_VAL_STATUS_VS_ACTIVE) MALI_DEBUG_PRINT(1, ("vscl current = 0x%x last = 0x%x\n", (void*)vscl, (void*)jobgp->last_vscl));
+ if (core_status & MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE) MALI_DEBUG_PRINT(1, ("plbcl current = 0x%x last = 0x%x\n", (void*)plbcl, (void*)jobgp->last_plbcl));
+ if (jobgp->have_extended_progress_checking) MALI_DEBUG_PRINT(1, ("vertices processed = %d, last = %d\n", mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE),
+ jobgp->vertices));
+#ifdef DEBUG
+ maligp_print_regs(2, core);
+#endif
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_ended);
+#endif
+ MALI_PANIC("%s Watchdog timeout\n", MALI_GP_SUBSYSTEM_NAME);
+ return JOB_STATUS_END_HANG;
+ }
+ /* if hang timeout checking was enabled and we detected progress, will be fall down to this check */
+ /* check for PLBU OOM before the hang check to avoid the race condition of the hw wd trigging while waiting for us to handle the OOM interrupt */
+ else if ( 0 != (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM))
+ {
+ mali_core_session *session;
+ _mali_osk_notification_t *notific;
+ _mali_uk_gp_job_suspended_s * suspended_job;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ session = job->session;
+
+ MALI_DEBUG_PRINT(4, ("OOM, new heap requested by GP\n"));
+ MALI_DEBUG_PRINT(4, ("Status when OOM: current = 0x%x, end = 0x%x\n",
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR),
+ (void*)mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR))
+ );
+
+ notific = _mali_osk_notification_create(
+
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof( _mali_uk_gp_job_suspended_s )
+ );
+ if ( NULL == notific)
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not get notification object\n")) ;
+ return JOB_STATUS_END_OOM; /* Core is ready for more jobs.*/
+ }
+
+ core->state = CORE_WORKING;
+ jobgp->is_stalled_waiting_for_more_memory = 1;
+ suspended_job = (_mali_uk_gp_job_suspended_s *)notific->result_buffer; /* this is ok - result_buffer was malloc'd */
+
+ suspended_job->user_job_ptr = jobgp->user_input.user_job_ptr;
+ suspended_job->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY ;
+ suspended_job->cookie = (u32) core;
+ last_gp_core_cookie = core;
+
+ _mali_osk_notification_queue_send( session->notification_queue, notific);
+
+#ifdef DEBUG
+ maligp_print_regs(4, core);
+#endif
+
+ /* stop all active timers */
+ _mali_osk_timer_del( core->timer);
+ _mali_osk_timer_del( core->timer_hang_detection);
+ MALI_DEBUG_PRINT(4, ("Mali GP: PLBU heap empty, sending memory request to userspace\n"));
+ /* save to watchdog_jiffies what was remaining WD timeout value when OOM was triggered */
+ job->watchdog_jiffies = (long)job->watchdog_jiffies - (long)_mali_osk_time_tickcount();
+ /* reuse core->timer as the userspace response timeout handler */
+ _mali_osk_timer_add( core->timer, _mali_osk_time_mstoticks(1000) ); /* wait max 1 sec for userspace to respond */
+ return JOB_STATUS_CONTINUE_RUN; /* The core is NOT available for new jobs. */
+ }
+ /* hw watchdog is reporting a new hang or an existing progress-during-hang check passed? */
+ else if ((CORE_HANG_CHECK_TIMEOUT == core->state) || (irq_readout & jobgp->active_mask & MALIGP2_REG_VAL_IRQ_HANG))
+ {
+ /* check interval in ms */
+ u32 timeout = mali_core_hang_check_timeout_get();
+ MALI_DEBUG_PRINT(3, ("Mali GP: HW/SW Watchdog triggered, checking for progress in %d ms\n", timeout));
+ core->state = CORE_WORKING;
+
+ /* save state for the progress checking */
+ jobgp->last_vscl = vscl;
+ jobgp->last_plbcl = plbcl;
+ if (jobgp->have_extended_progress_checking)
+ {
+ jobgp->vertices = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ }
+ /* hw watchdog triggered, set up a progress checker every HANGCHECK ms */
+ _mali_osk_timer_add( core->timer_hang_detection, _mali_osk_time_mstoticks(timeout));
+ jobgp->active_mask &= ~MALIGP2_REG_VAL_IRQ_HANG; /* ignore the hw watchdog from now on */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* not finihsed */ }
+ /* no errors, but still working */
+ else if ( ( 0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ERROR)) &&
+ ( 0 != (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE ))
+ )
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ return JOB_STATUS_CONTINUE_RUN;
+ }
+ /* Else there must be some error */
+ else
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status? */
+#endif
+
+ MALI_PRINT( ("Mali GP: Core crashed? *IRQ: 0x%x Status: 0x%x\n", irq_readout, core_status ));
+ #ifdef DEBUG
+ MALI_DEBUG_PRINT(1, ("Mali GP: Registers Before reset:\n"));
+ maligp_print_regs(1, core);
+ #endif
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_ended);
+#endif
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+}
+
+
+/* This function is called from the ioctl function and should return a mali_core_job pointer
+to a created mali_core_job object with the data given from userspace */
+static _mali_osk_errcode_t subsystem_maligp_get_new_job_from_user(struct mali_core_session * session, void * argument)
+{
+ maligp_job *jobgp;
+ mali_core_job *job = NULL;
+ mali_core_job *previous_replaced_job;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_uk_gp_start_job_s * user_ptr_job_input;
+
+ user_ptr_job_input = (_mali_uk_gp_start_job_s *)argument;
+
+ MALI_CHECK_NON_NULL(jobgp = (maligp_job *) _mali_osk_calloc(1, sizeof(maligp_job)), _MALI_OSK_ERR_FAULT);
+
+ /* Copy the job data from the U/K interface */
+ if ( NULL == _mali_osk_memcpy(&jobgp->user_input, user_ptr_job_input, sizeof(_mali_uk_gp_start_job_s) ) )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not copy data from U/K interface.\n")) ;
+ err = _MALI_OSK_ERR_FAULT;
+ goto function_exit;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: subsystem_maligp_get_new_job_from_user 0x%x\n", (void*)jobgp->user_input.user_job_ptr));
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Job Regs: 0x%08X 0x%08X, 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ jobgp->user_input.frame_registers[0],
+ jobgp->user_input.frame_registers[1],
+ jobgp->user_input.frame_registers[2],
+ jobgp->user_input.frame_registers[3],
+ jobgp->user_input.frame_registers[4],
+ jobgp->user_input.frame_registers[5]) );
+
+
+ job = GET_JOB_EMBEDDED_PTR(jobgp);
+
+ job->session = session;
+ job->flags = MALI_UK_START_JOB_FLAG_DEFAULT; /* Current flags only make sence for PP jobs */
+ job_priority_set(job, jobgp->user_input.priority);
+ job_watchdog_set(job, jobgp->user_input.watchdog_msecs );
+ jobgp->heap_current_addr = jobgp->user_input.frame_registers[4];
+
+ job->abort_id = jobgp->user_input.abort_id;
+
+ jobgp->is_stalled_waiting_for_more_memory = 0;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ jobgp->pid = _mali_osk_get_pid();
+ jobgp->tid = _mali_osk_get_tid();
+#endif
+
+ if (mali_job_queue_full(session))
+ {
+ /* Cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ goto function_exit;
+ }
+
+ /* We now know that we have a job, and a slot to put it in */
+
+ jobgp->active_mask = MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+ /* Allocating User Return Data */
+ jobgp->notification_obj = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_FINISHED,
+ sizeof(_mali_uk_gp_job_finished_s) );
+
+ if ( NULL == jobgp->notification_obj )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Could not get notification_obj.\n")) ;
+ err = _MALI_OSK_ERR_NOMEM;
+ goto function_exit;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD( &(job->list) ) ;
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job: 0x%08x INPUT from user.\n", (u32)jobgp->user_input.user_job_ptr)) ;
+
+ /* This should not happen since we have the checking of priority above */
+ err = mali_core_session_add_job(session, job, &previous_replaced_job);
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_PRINT_ERROR( ("Mali GP: Internal error\n")) ;
+ /* Cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ _mali_osk_notification_delete( jobgp->notification_obj );
+ goto function_exit;
+ }
+
+ /* If MALI_TRUE: This session had a job with lower priority which were removed.
+ This replaced job is given back to userspace. */
+ if ( NULL != previous_replaced_job )
+ {
+ maligp_job *previous_replaced_jobgp;
+
+ previous_replaced_jobgp = GET_JOBGP2_PTR(previous_replaced_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP: Replacing job: 0x%08x\n", (u32)previous_replaced_jobgp->user_input.user_job_ptr)) ;
+
+ /* Copy to the input data (which also is output data) the
+ pointer to the job that were replaced, so that the userspace
+ driver can put this job in the front of its job-queue */
+ user_ptr_job_input->returned_user_job_ptr = previous_replaced_jobgp->user_input.user_job_ptr;
+
+ /** @note failure to 'copy to user' at this point must not free jobgp,
+ * and so no transaction rollback required in the U/K interface */
+
+ /* This does not cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED;
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_get_new_job_from_user: Job added, prev returned\n")) ;
+ }
+ else
+ {
+ /* This does not cause jobgp to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED;
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_get_new_job_from_user: Job added\n")) ;
+ }
+
+function_exit:
+ if ( _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE == user_ptr_job_input->status
+ || _MALI_OSK_ERR_OK != err )
+ {
+ _mali_osk_free(jobgp);
+ }
+#if MALI_STATE_TRACKING
+ if (_MALI_UK_START_JOB_STARTED==user_ptr_job_input->status)
+ {
+ if(job)
+ {
+ job->job_nr=_mali_osk_atomic_inc_return(&session->jobs_received);
+ }
+ }
+#endif
+
+ MALI_ERROR(err);
+}
+
+
+static _mali_osk_errcode_t subsystem_maligp_suspend_response(struct mali_core_session * session, void * argument)
+{
+ mali_core_renderunit *core;
+ maligp_job *jobgp;
+ mali_core_job *job;
+
+ _mali_uk_gp_suspend_response_s * suspend_response;
+
+ MALI_DEBUG_PRINT(5, ("subsystem_maligp_suspend_response\n"));
+
+ suspend_response = (_mali_uk_gp_suspend_response_s *)argument;
+
+ /* We read job data from User */
+ /* On a single mali_gp system we can only have one Stalled GP,
+ and therefore one stalled request with a cookie. This checks
+ that we get the correct cookie */
+ if ( last_gp_core_cookie != (mali_core_renderunit *)suspend_response->cookie )
+ {
+ MALI_DEBUG_PRINT(2, ("Mali GP: Got an illegal cookie from Userspace.\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ core = (mali_core_renderunit *)suspend_response->cookie;
+ last_gp_core_cookie = NULL;
+ job = core->current_job;
+ jobgp = GET_JOBGP2_PTR(job);
+
+ switch( suspend_response->code )
+ {
+ case _MALIGP_JOB_RESUME_WITH_NEW_HEAP :
+ MALI_DEBUG_PRINT(5, ("MALIGP_JOB_RESUME_WITH_NEW_HEAP jiffies: %li\n", _mali_osk_time_tickcount()));
+ MALI_DEBUG_PRINT(4, ("New Heap addr 0x%08x - 0x%08x\n", suspend_response->arguments[0], suspend_response->arguments[1]));
+
+ jobgp->is_stalled_waiting_for_more_memory = 0;
+ job->watchdog_jiffies += _mali_osk_time_tickcount(); /* convert to absolute time again */
+ _mali_osk_timer_mod( core->timer, job->watchdog_jiffies); /* update the timer */
+
+
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, jobgp->active_mask);
+ mali_core_renderunit_register_write_relaxed(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, suspend_response->arguments[0]);
+ mali_core_renderunit_register_write_relaxed(core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, suspend_response->arguments[1]);
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+ _mali_osk_write_mem_barrier();
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(core->core_number), 0, 0, 0, 0, 0);
+#endif
+
+ MALI_DEBUG_PRINT(4, ("GP resumed with new heap\n"));
+
+ break;
+
+ case _MALIGP_JOB_ABORT:
+ MALI_DEBUG_PRINT(3, ("MALIGP_JOB_ABORT on heap extend request\n"));
+ _mali_osk_irq_schedulework( core->irq );
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Wrong Suspend response from userspace\n"));
+ }
+ MALI_SUCCESS;
+}
+
+/* This function is called from the ioctl function and should write the necessary data
+to userspace telling which job was finished and the status and debuginfo for this job.
+The function must also free and cleanup the input job object. */
+static void subsystem_maligp_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status )
+{
+ maligp_job *jobgp;
+ _mali_uk_gp_job_finished_s * job_out;
+ _mali_uk_gp_start_job_s* job_input;
+ mali_core_session *session;
+
+
+ jobgp = _MALI_OSK_CONTAINER_OF(job, maligp_job, embedded_core_job);
+ job_out = (_mali_uk_gp_job_finished_s *)jobgp->notification_obj->result_buffer; /* OK - this should've been malloc'd */
+ job_input= &(jobgp->user_input);
+ session = job->session;
+
+ MALI_DEBUG_PRINT(5, ("Mali GP: Job: 0x%08x OUTPUT to user. Runtime: %d us, irq readout %x\n",
+ (u32)jobgp->user_input.user_job_ptr,
+ job->render_time_usecs,
+ jobgp->irq_status)) ;
+
+ _mali_osk_memset(job_out, 0 , sizeof(_mali_uk_gp_job_finished_s));
+
+ job_out->user_job_ptr = job_input->user_job_ptr;
+
+ switch( end_status )
+ {
+ case JOB_STATUS_CONTINUE_RUN:
+ case JOB_STATUS_END_SUCCESS:
+ case JOB_STATUS_END_OOM:
+ case JOB_STATUS_END_ABORT:
+ case JOB_STATUS_END_TIMEOUT_SW:
+ case JOB_STATUS_END_HANG:
+ case JOB_STATUS_END_SEG_FAULT:
+ case JOB_STATUS_END_ILLEGAL_JOB:
+ case JOB_STATUS_END_UNKNOWN_ERR:
+ case JOB_STATUS_END_SHUTDOWN:
+ case JOB_STATUS_END_SYSTEM_UNUSABLE:
+ job_out->status = (mali_subsystem_job_end_code) end_status;
+ break;
+ default:
+ job_out->status = JOB_STATUS_END_UNKNOWN_ERR ;
+ }
+
+ job_out->irq_status = jobgp->irq_status;
+ job_out->status_reg_on_stop = jobgp->status_reg_on_stop;
+ job_out->vscl_stop_addr = 0;
+ job_out->plbcl_stop_addr = 0;
+ job_out->heap_current_addr = jobgp->heap_current_addr;
+ job_out->perf_counter0 = jobgp->perf_counter0;
+ job_out->perf_counter1 = jobgp->perf_counter1;
+ job_out->perf_counter_src0 = jobgp->user_input.perf_counter_src0 ;
+ job_out->perf_counter_src1 = jobgp->user_input.perf_counter_src1 ;
+ job_out->render_time = job->render_time_usecs;
+#if defined(USING_MALI400_L2_CACHE)
+ job_out->perf_counter_l2_src0 = jobgp->perf_counter_l2_src0;
+ job_out->perf_counter_l2_src1 = jobgp->perf_counter_l2_src1;
+ job_out->perf_counter_l2_val0 = jobgp->perf_counter_l2_val0;
+ job_out->perf_counter_l2_val1 = jobgp->perf_counter_l2_val1;
+#endif
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&session->jobs_returned);
+#endif
+ _mali_osk_notification_queue_send( session->notification_queue, jobgp->notification_obj);
+ jobgp->notification_obj = NULL;
+
+ _mali_osk_free(jobgp);
+
+ last_gp_core_cookie = NULL;
+}
+
+static void subsystem_maligp_renderunit_delete(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP: maligp_renderunit_delete\n"));
+ _mali_osk_free(core);
+}
+
+static void subsystem_maligp_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP: renderunit_reset_core\n"));
+
+ switch (style)
+ {
+ case MALI_CORE_RESET_STYLE_RUNABLE:
+ maligp_reset(core);
+ break;
+ case MALI_CORE_RESET_STYLE_DISABLE:
+ maligp_raw_reset(core); /* do the raw reset */
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* then disable the IRQs */
+ break;
+ case MALI_CORE_RESET_STYLE_HARD:
+ maligp_reset_hard(core);
+ maligp_initialize_registers_mgmt(core);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown reset type %d\n", style));
+ break;
+ }
+}
+
+static void subsystem_maligp_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core , MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_core_renderunit_register_write(core , MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG );
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t subsystem_maligp_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+
+ irq_readout = mali_core_renderunit_register_read(core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+
+ if ( MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout )
+ {
+ mali_core_renderunit_register_write(core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_start_job(session, args);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_number_of_cores_get(session, &args->number_of_cores);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_core_version_get(session, &args->version);
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_suspend_response(session, args);
+}
+
+void _mali_ukk_gp_abort_job( _mali_uk_gp_abort_job_s * args)
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ if (NULL == args->ctx) return;
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_gp_id);
+ if (NULL == session) return;
+ mali_core_subsystem_ioctl_abort_job(session, args->abort_id);
+
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t maligp_signal_power_up( mali_bool queue_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali GP: signal power up core - queue_only: %d\n", queue_only ));
+
+ return( mali_core_subsystem_signal_power_up( &subsystem_maligp, 0, queue_only ) );
+}
+
+_mali_osk_errcode_t maligp_signal_power_down( mali_bool immediate_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali GP: signal power down core - immediate_only: %d\n", immediate_only ));
+
+ return( mali_core_subsystem_signal_power_down( &subsystem_maligp, 0, immediate_only ) );
+}
+
+#endif
+
+#if MALI_STATE_TRACKING
+u32 maligp_subsystem_dump_state(char *buf, u32 size)
+{
+ return mali_core_renderunit_dump_state(&subsystem_maligp, buf, size);
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_MALI200.c b/drivers/media/video/samsung/mali/common/mali_kernel_MALI200.c
new file mode 100644
index 0000000..0f5ebd0
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_MALI200.c
@@ -0,0 +1,1304 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+//added for SPI
+#include <linux/kernel.h>
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_core.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_rendercore.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#ifdef USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h" /* Needed for mali_kernel_mmu_force_bus_reset() */
+#endif
+
+#include "mali_osk_list.h"
+
+#if defined(USING_MALI200)
+#define MALI_PP_SUBSYSTEM_NAME "Mali200"
+#define MALI_PP_CORE_TYPE _MALI_200
+#elif defined(USING_MALI400)
+#define MALI_PP_SUBSYSTEM_NAME "Mali-400 PP"
+#define MALI_PP_CORE_TYPE _MALI_400_PP
+#else
+#error "No supported mali core defined"
+#endif
+
+#define GET_JOB_EMBEDDED_PTR(job) (&((job)->embedded_core_job))
+#define GET_JOB200_PTR(job_extern) _MALI_OSK_CONTAINER_OF(job_extern, mali200_job, embedded_core_job)
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_mali200_id = -1;
+
+/* Describing a mali200 job settings */
+typedef struct mali200_job
+{
+ /* The general job struct common for all mali cores */
+ mali_core_job embedded_core_job;
+ _mali_uk_pp_start_job_s user_input;
+
+ u32 irq_status;
+ u32 perf_counter0;
+ u32 perf_counter1;
+ u32 last_tile_list_addr; /* Neccessary to continue a stopped job */
+
+ u32 active_mask;
+
+ /* The data we will return back to the user */
+ _mali_osk_notification_t *notification_obj;
+
+#if defined(USING_MALI400_L2_CACHE)
+ u32 perf_counter_l2_src0;
+ u32 perf_counter_l2_src1;
+ u32 perf_counter_l2_val0;
+ u32 perf_counter_l2_val1;
+ u32 perf_counter_l2_val0_raw;
+ u32 perf_counter_l2_val1_raw;
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 pid;
+ u32 tid;
+#endif
+} mali200_job;
+
+
+/*Functions Exposed to the General External System through
+ funciont pointers.*/
+
+static _mali_osk_errcode_t mali200_subsystem_startup(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static _mali_osk_errcode_t mali200_subsystem_mmu_connect(mali_kernel_subsystem_identifier id);
+#endif
+static void mali200_subsystem_terminate(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+static void mali200_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+static _mali_osk_errcode_t mali200_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t mali200_renderunit_create(_mali_osk_resource_t * resource);
+#if USING_MMU
+static void mali200_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+#if MALI_STATE_TRACKING
+u32 mali200_subsystem_dump_state(char *buf, u32 size);
+#endif
+
+/* Internal support functions */
+static _mali_osk_errcode_t mali200_core_version_legal( mali_core_renderunit *core );
+static void mali200_reset(mali_core_renderunit *core);
+static void mali200_reset_hard(struct mali_core_renderunit * core);
+static void mali200_raw_reset(mali_core_renderunit * core);
+static void mali200_initialize_registers_mgmt(mali_core_renderunit *core );
+
+/* Functions exposed to mali_core system through functionpointers
+ in the subsystem struct. */
+static _mali_osk_errcode_t subsystem_mali200_start_job(mali_core_job * job, mali_core_renderunit * core);
+static _mali_osk_errcode_t subsystem_mali200_get_new_job_from_user(struct mali_core_session * session, void * argument);
+static void subsystem_mali200_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status);
+static void subsystem_mali200_renderunit_delete(mali_core_renderunit * core);
+static void subsystem_mali200_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style);
+static void subsystem_mali200_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core);
+static _mali_osk_errcode_t subsystem_mali200_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core);
+
+static void subsystem_mali200_renderunit_stop_bus(struct mali_core_renderunit* core);
+static u32 subsystem_mali200_irq_handler_upper_half(struct mali_core_renderunit * core);
+static int subsystem_mali200_irq_handler_bottom_half(struct mali_core_renderunit* core);
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+
+struct mali_kernel_subsystem mali_subsystem_mali200=
+{
+ mali200_subsystem_startup, /* startup */
+ NULL, /*mali200_subsystem_terminate,*/ /* shutdown */
+#if USING_MMU
+ mali200_subsystem_mmu_connect, /* load_complete */
+#else
+ NULL,
+#endif
+ mali200_subsystem_core_system_info_fill, /* system_info_fill */
+ mali200_subsystem_session_begin, /* session_begin */
+ mali200_subsystem_session_end, /* session_end */
+#if USING_MMU
+ mali200_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ mali200_subsystem_dump_state, /* dump_state */
+#endif
+} ;
+
+static mali_core_subsystem subsystem_mali200 ;
+
+static _mali_osk_errcode_t mali200_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem * subsystem;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_startup\n") ) ;
+
+ mali_subsystem_mali200_id = id;
+
+ /* All values get 0 as default */
+ _mali_osk_memset(&subsystem_mali200, 0, sizeof(subsystem_mali200));
+
+ subsystem = &subsystem_mali200;
+ subsystem->start_job = &subsystem_mali200_start_job;
+ subsystem->irq_handler_upper_half = &subsystem_mali200_irq_handler_upper_half;
+ subsystem->irq_handler_bottom_half = &subsystem_mali200_irq_handler_bottom_half;
+ subsystem->get_new_job_from_user = &subsystem_mali200_get_new_job_from_user;
+ subsystem->return_job_to_user = &subsystem_mali200_return_job_to_user;
+ subsystem->renderunit_delete = &subsystem_mali200_renderunit_delete;
+ subsystem->reset_core = &subsystem_mali200_renderunit_reset_core;
+ subsystem->stop_bus = &subsystem_mali200_renderunit_stop_bus;
+ subsystem->probe_core_irq_trigger = &subsystem_mali200_renderunit_probe_core_irq_trigger;
+ subsystem->probe_core_irq_acknowledge = &subsystem_mali200_renderunit_probe_core_irq_finished;
+
+ /* Setting variables in the general core part of the subsystem.*/
+ subsystem->name = MALI_PP_SUBSYSTEM_NAME;
+ subsystem->core_type = MALI_PP_CORE_TYPE;
+ subsystem->id = id;
+
+ /* Initiates the rest of the general core part of the subsystem */
+ MALI_CHECK_NO_ERROR(mali_core_subsystem_init( subsystem ));
+
+ /* This will register the function for adding MALI200 cores to the subsystem */
+#if defined(USING_MALI200)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI200, mali200_renderunit_create));
+#endif
+#if defined(USING_MALI400)
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MALI400PP, mali200_renderunit_create));
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_startup\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+static _mali_osk_errcode_t mali200_subsystem_mmu_connect(mali_kernel_subsystem_identifier id)
+{
+ mali_core_subsystem_attach_mmu(&subsystem_mali200);
+ MALI_SUCCESS; /* OK */
+}
+#endif
+
+static void mali200_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_terminate\n") ) ;
+ mali_core_subsystem_cleanup(&subsystem_mali200);
+}
+
+static _mali_osk_errcode_t mali200_subsystem_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_begin\n") ) ;
+ MALI_CHECK_NON_NULL(session = _mali_osk_malloc( sizeof(mali_core_session) ), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(session, 0, sizeof(*session) );
+ *slot = (mali_kernel_subsystem_session_slot)session;
+
+ session->subsystem = &subsystem_mali200;
+
+ session->notification_queue = queue;
+
+#if USING_MMU
+ session->mmu_session = mali_session_data;
+#endif
+
+ mali_core_session_begin(session);
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_begin\n") ) ;
+
+ MALI_SUCCESS;
+}
+
+static void mali200_subsystem_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ mali_core_session * session;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_subsystem_session_end\n") ) ;
+ if ( NULL==slot || NULL==*slot)
+ {
+ MALI_PRINT_ERROR(("Input slot==NULL"));
+ return;
+ }
+ session = (mali_core_session*) *slot;
+ mali_core_session_close(session);
+
+ _mali_osk_free(session);
+ *slot = NULL;
+
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_subsystem_session_end\n") ) ;
+}
+
+/**
+ * We fill in info about all the cores we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali200_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ return mali_core_subsystem_system_info_fill(&subsystem_mali200, info);
+}
+
+
+static _mali_osk_errcode_t mali200_renderunit_create(_mali_osk_resource_t * resource)
+{
+ mali_core_renderunit *core;
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: mali200_renderunit_create\n") ) ;
+ /* Checking that the resource settings are correct */
+#if defined(USING_MALI200)
+ if(MALI200 != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_PP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#elif defined(USING_MALI400)
+ if(MALI400PP != resource->type)
+ {
+ MALI_PRINT_ERROR(("Can not register this resource as a " MALI_PP_SUBSYSTEM_NAME " core."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+ if ( 0 != resource->size )
+ {
+ MALI_PRINT_ERROR(("Memory size set to " MALI_PP_SUBSYSTEM_NAME " core should be zero."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if ( NULL == resource->description )
+ {
+ MALI_PRINT_ERROR(("A " MALI_PP_SUBSYSTEM_NAME " core needs a unique description field"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Create a new core object */
+ core = (mali_core_renderunit*) _mali_osk_malloc(sizeof(*core));
+ if ( NULL == core )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* Variables set to be able to open and register the core */
+ core->subsystem = &subsystem_mali200 ;
+ core->registers_base_addr = resource->base ;
+ core->size = MALI200_REG_SIZEOF_REGISTER_BANK ;
+ core->irq_nr = resource->irq ;
+ core->description = resource->description;
+#if USING_MMU
+ core->mmu_id = resource->mmu_id;
+ core->mmu = NULL;
+#endif
+#if USING_MALI_PMM
+ /* Set up core's PMM id */
+ switch( subsystem_mali200.number_of_cores )
+ {
+ case 0:
+ core->pmm_id = MALI_PMM_CORE_PP0;
+ break;
+ case 1:
+ core->pmm_id = MALI_PMM_CORE_PP1;
+ break;
+ case 2:
+ core->pmm_id = MALI_PMM_CORE_PP2;
+ break;
+ case 3:
+ core->pmm_id = MALI_PMM_CORE_PP3;
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown supported core for PMM\n"));
+ err = _MALI_OSK_ERR_FAULT;
+ goto exit_on_error0;
+ }
+#endif
+
+ err = mali_core_renderunit_init( core );
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to initialize renderunit\n"));
+ goto exit_on_error0;
+ }
+
+ /* Map the new core object, setting: core->registers_mapped */
+ err = mali_core_renderunit_map_registers(core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to map register\n"));
+ goto exit_on_error1;
+ }
+
+ /* Check that the register mapping of the core works.
+ Return 0 if Mali PP core is present and accessible. */
+ if (mali_benchmark) {
+#if defined(USING_MALI200)
+ core->core_version = (((u32)MALI_PP_PRODUCT_ID) << 16) | 5 /* Fake Mali200-r0p5 */;
+#elif defined(USING_MALI400)
+ core->core_version = (((u32)MALI_PP_PRODUCT_ID) << 16) | 0x0101 /* Fake Mali400-r1p1 */;
+#else
+#error "No supported mali core defined"
+#endif
+ } else {
+ core->core_version = mali_core_renderunit_register_read(
+ core,
+ MALI200_REG_ADDR_MGMT_VERSION);
+ }
+
+ err = mali200_core_version_legal(core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid core\n"));
+ goto exit_on_error2;
+ }
+
+ /* Reset the core. Put the core into a state where it can start to render. */
+ mali200_reset(core);
+
+ /* Registering IRQ, init the work_queue_irq_handle */
+ /* Adding this core as an available renderunit in the subsystem. */
+ err = mali_core_subsystem_register_renderunit(&subsystem_mali200, core);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to register with core\n"));
+ goto exit_on_error2;
+ }
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_renderunit_create\n") ) ;
+
+ MALI_SUCCESS;
+
+exit_on_error2:
+ mali_core_renderunit_unmap_registers(core);
+exit_on_error1:
+ mali_core_renderunit_term(core);
+exit_on_error0:
+ _mali_osk_free( core ) ;
+ MALI_PRINT_ERROR(("Renderunit NOT created."));
+ MALI_ERROR(err);
+}
+
+#if USING_MMU
+/* Used currently only for signalling when MMU has a pagefault */
+static void mali200_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ mali_core_subsystem_broadcast_notification(&subsystem_mali200, message, data);
+}
+#endif
+
+static _mali_osk_errcode_t mali200_core_version_legal( mali_core_renderunit *core )
+{
+ u32 mali_type;
+
+ mali_type = core->core_version >> 16;
+#if defined(USING_MALI400)
+ /* Mali300 and Mali400 is compatible, accept either core. */
+ if (MALI400_PP_PRODUCT_ID != mali_type && MALI300_PP_PRODUCT_ID != mali_type)
+#else
+ if (MALI_PP_PRODUCT_ID != mali_type)
+#endif
+ {
+ MALI_PRINT_ERROR(("Error: reading this from " MALI_PP_SUBSYSTEM_NAME " version register: 0x%x\n", core->core_version));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP: core_version_legal: Reads correct mali version: %d\n", mali_type) ) ;
+ MALI_SUCCESS;
+}
+
+static void subsystem_mali200_renderunit_stop_bus(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+static void mali200_raw_reset( mali_core_renderunit *core )
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: mali200_raw_reset: %s\n", core->description));
+ if (mali_benchmark) return;
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable IRQs */
+
+#if defined(USING_MALI200)
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, request_loop_count == i, ("Mali PP: Bus was never stopped during core reset\n"));
+
+
+ if (request_loop_count==i)
+ {
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ MALI_DEBUG_PRINT(1, ("Mali PP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow PP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+
+ /* use the hard reset routine to do the actual reset */
+ mali200_reset_hard(core);
+
+#elif defined(USING_MALI400)
+
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count==i)
+ {
+#if USING_MMU
+ if ((NULL!=core->mmu) && (MALI_FALSE == core->error_recovery))
+ {
+ /* Could not stop bus connections from core, probably because some of the already pending
+ bus request has had a page fault, and therefore can not complete before the MMU does PageFault
+ handling. This can be treated as a heavier reset function - which unfortunately reset all
+ the cores on this MMU in addition to the MMU itself */
+ MALI_DEBUG_PRINT(1, ("Mali PP: Forcing Bus reset\n"));
+ mali_kernel_mmu_force_bus_reset(core->mmu);
+ return;
+ }
+#endif
+ MALI_PRINT(("A MMU reset did not allow PP to stop its bus, system failure, unable to recover\n"));
+ return;
+ }
+ else
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+
+#else
+#error "no supported mali core defined"
+#endif
+}
+
+static void mali200_reset( mali_core_renderunit *core )
+{
+ if (!mali_benchmark) {
+ mali200_raw_reset(core);
+ mali200_initialize_registers_mgmt(core);
+ }
+}
+
+/* Sets the registers on mali200 according to the const default_mgmt_regs array. */
+static void mali200_initialize_registers_mgmt(mali_core_renderunit *core )
+{
+ MALI_DEBUG_PRINT(6, ("Mali PP: mali200_initialize_registers_mgmt: %s\n", core->description)) ;
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+}
+
+/* Start this job on this core. Return MALI_TRUE if the job was started. */
+static _mali_osk_errcode_t subsystem_mali200_start_job(mali_core_job * job, mali_core_renderunit * core)
+{
+ mali200_job *job200;
+
+ /* The local extended version of the general structs */
+ job200 = _MALI_OSK_CONTAINER_OF(job, mali200_job, embedded_core_job);
+
+ if ( (0 == job200->user_input.frame_registers[0]) ||
+ (0 == job200->user_input.frame_registers[1]) )
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x WILL NOT START SINCE JOB HAS ILLEGAL ADDRESSES\n",
+ (u32)job200->user_input.user_job_ptr));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x START_RENDER Tile_list: 0x%08x\n",
+ (u32)job200->user_input.user_job_ptr,
+ job200->user_input.frame_registers[0]));
+ MALI_DEBUG_PRINT(6, ("Mali PP: RSW base addr: 0x%08x Vertex base addr: 0x%08x\n",
+ job200->user_input.frame_registers[1], job200->user_input.frame_registers[2]));
+
+ /* Frame registers. Copy from mem to physical registers */
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_FRAME,
+ &(job200->user_input.frame_registers[0]),
+ MALI200_NUM_REGS_FRAME);
+
+ /* Write Back unit 0. Copy from mem to physical registers only if the WB unit will be used. */
+ if (job200->user_input.wb0_registers[0])
+ {
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB0,
+ &(job200->user_input.wb0_registers[0]),
+ MALI200_NUM_REGS_WBx);
+ }
+
+ /* Write Back unit 1. Copy from mem to physical registers only if the WB unit will be used. */
+ if (job200->user_input.wb1_registers[0])
+ {
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB1,
+ &(job200->user_input.wb1_registers[0]),
+ MALI200_NUM_REGS_WBx);
+ }
+
+ /* Write Back unit 2. Copy from mem to physical registers only if the WB unit will be used. */
+ if (job200->user_input.wb2_registers[0])
+ {
+ mali_core_renderunit_register_write_array(
+ core,
+ MALI200_REG_ADDR_WB2,
+ &(job200->user_input.wb2_registers[0]),
+ MALI200_NUM_REGS_WBx);
+ }
+
+#if MALI_TRACEPOINTS_ENABLED
+ {
+ int counter = ((core->core_number)*2)+9; /* magic numbers for FP0 are 9 & 10 */
+
+ //printk("FP core->number = %d\n", core->core_number);
+ //TODO we are using magic numbers again... these are from gator_events_mali.c
+ job200->user_input.perf_counter_flag = 0;
+
+ if( counter>=9 && counter<=16) {
+
+ if( counter_table[counter] != 0xFFFFFFFF ) {
+ job200->user_input.perf_counter_flag |= _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE;
+ job200->user_input.perf_counter_src0 = counter_table[counter];
+ }
+ if( counter_table[counter+1] != 0xFFFFFFFF ) {
+ job200->user_input.perf_counter_flag |= _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE;
+ job200->user_input.perf_counter_src1 = counter_table[counter+1];
+ }
+
+ } else {
+ MALI_DEBUG_PRINT(2, ("core->core_number out of the range (0-3) (%d)\n", core->core_number));
+ }
+ }
+#if defined(USING_MALI400_L2_CACHE)
+ if( counter_table[5] != 0xFFFFFFFF ) {
+ job200->user_input.perf_counter_flag |= _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE | _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET;
+ job200->user_input.perf_counter_l2_src0 = counter_table[5];
+ }
+ if( counter_table[6] != 0xFFFFFFFF ) {
+ job200->user_input.perf_counter_flag |= _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE | _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET;
+ job200->user_input.perf_counter_l2_src1 = counter_table[6];
+ }
+#endif
+#endif
+
+ /* This selects which performance counters we are reading */
+ if ( 0 != job200->user_input.perf_counter_flag )
+ {
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE,
+ MALI200_REG_VAL_PERF_CNT_ENABLE);
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC,
+ job200->user_input.perf_counter_src0);
+
+ }
+
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE,
+ MALI200_REG_VAL_PERF_CNT_ENABLE);
+ mali_core_renderunit_register_write_relaxed(
+ core,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC,
+ job200->user_input.perf_counter_src1);
+
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if ( job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ int force_reset = ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET ) ? 1 : 0;
+ u32 src0 = 0;
+ u32 src1 = 0;
+
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE )
+ {
+ src0 = job200->user_input.perf_counter_l2_src0;
+ }
+ if ( job200->user_input.perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE )
+ {
+ src1 = job200->user_input.perf_counter_l2_src1;
+ }
+
+ mali_kernel_l2_cache_set_perf_counters(src0, src1, force_reset); /* will activate and possibly reset counters */
+
+ /* Now, retrieve the current values, so we can substract them when the job has completed */
+ mali_kernel_l2_cache_get_perf_counters(&job200->perf_counter_l2_src0,
+ &job200->perf_counter_l2_val0,
+ &job200->perf_counter_l2_src1,
+ &job200->perf_counter_l2_val1);
+ }
+#endif
+ }
+
+ subsystem_flush_mapped_mem_cache();
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_started);
+#endif
+
+ /* This is the command that starts the Core */
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT,
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+ _mali_osk_write_mem_barrier();
+
+
+ pr_debug("SPI_GPU_PP%u Start\n", core->core_number);
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, job200->user_input.frame_builder_id, job200->user_input.flush_id, 0, 0, 0);
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), job200->pid, job200->tid,
+#if defined(USING_MALI400_L2_CACHE)
+ (job200->user_input.perf_counter_l2_src0 << 16) | (job200->user_input.perf_counter_l2_src1 << 24),
+ job200->perf_counter_l2_val0, job200->perf_counter_l2_val1
+#else
+ 0, 0, 0
+#endif
+ );
+#endif
+
+ MALI_SUCCESS;
+}
+
+static u32 subsystem_mali200_irq_handler_upper_half(mali_core_renderunit * core)
+{
+ u32 irq_readout;
+
+ if (mali_benchmark) {
+ return (core->current_job ? 1 : 0); /* simulate irq is pending when a job is pending */
+ }
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: subsystem_mali200_irq_handler_upper_half: %s\n", core->description)) ;
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+
+ if ( MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout )
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+#endif
+
+ return 1;
+ }
+ return 0;
+}
+
+static int subsystem_mali200_irq_handler_bottom_half(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+ u32 current_tile_addr;
+ u32 core_status;
+ mali_core_job * job;
+ mali200_job * job200;
+
+ job = core->current_job;
+ job200 = GET_JOB200_PTR(job);
+
+
+ if (mali_benchmark) {
+ irq_readout = MALI200_REG_VAL_IRQ_END_OF_FRAME;
+ current_tile_addr = 0;
+ core_status = 0;
+ } else {
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+ current_tile_addr = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR);
+ core_status = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_STATUS);
+ }
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_ASSERT(CORE_IDLE==core->state);
+ if ( 0 != irq_readout )
+ {
+ MALI_PRINT_ERROR(("Interrupt from a core not running a job. IRQ: 0x%04x Status: 0x%04x", irq_readout, core_status));
+ }
+ return JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ MALI_DEBUG_ASSERT(CORE_IDLE!=core->state);
+
+ job200->irq_status |= irq_readout;
+
+ MALI_DEBUG_PRINT_IF( 3, ( 0 != irq_readout ),
+ ("Mali PP: Job: 0x%08x IRQ RECEIVED Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x\n",
+ (u32)job200->user_input.user_job_ptr, irq_readout ,current_tile_addr ,core_status));
+
+ if ( MALI200_REG_VAL_IRQ_END_OF_FRAME & irq_readout)
+ {
+#if defined(USING_MALI200)
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES);
+#endif
+
+ if (0 != job200->user_input.perf_counter_flag )
+ {
+ if (job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE) )
+ {
+#if MALI_TRACEPOINTS_ENABLED
+ //TODO magic numbers should come from mali_linux_trace.h instead
+ unsigned int counter = (core->core_number * 2) + 9;
+#endif
+
+ job200->perf_counter0 = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ job200->perf_counter1 = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+
+#if MALI_TRACEPOINTS_ENABLED
+ _mali_profiling_add_counter(counter, job200->perf_counter0);
+ _mali_profiling_add_counter(counter + 1, job200->perf_counter1);
+#endif
+
+ }
+
+#if defined(USING_MALI400_L2_CACHE)
+ if (job200->user_input.perf_counter_flag & (_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE|_MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE) )
+ {
+ u32 src0;
+ u32 val0;
+ u32 src1;
+ u32 val1;
+ mali_kernel_l2_cache_get_perf_counters(&src0, &val0, &src1, &val1);
+
+ if (job200->perf_counter_l2_src0 == src0)
+ {
+ job200->perf_counter_l2_val0_raw = val0;
+ job200->perf_counter_l2_val0 = val0 - job200->perf_counter_l2_val0;
+ }
+ else
+ {
+ job200->perf_counter_l2_val0_raw = 0;
+ job200->perf_counter_l2_val0 = 0;
+ }
+
+ if (job200->perf_counter_l2_src1 == src1)
+ {
+ job200->perf_counter_l2_val1_raw = val1;
+ job200->perf_counter_l2_val1 = val1 - job200->perf_counter_l2_val1;
+ }
+ else
+ {
+ job200->perf_counter_l2_val1_raw = 0;
+ job200->perf_counter_l2_val1 = 0;
+ }
+
+#if MALI_TRACEPOINTS_ENABLED
+ //TODO magic numbers should come from mali_linux_trace.h instead
+ _mali_profiling_add_counter(5, val0);
+ _mali_profiling_add_counter(6, val1);
+#endif
+ }
+#endif
+
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number),
+ job200->perf_counter0, job200->perf_counter1,
+ job200->user_input.perf_counter_src0 | (job200->user_input.perf_counter_src1 << 8)
+#if defined(USING_MALI400_L2_CACHE)
+ | (job200->user_input.perf_counter_l2_src0 << 16) | (job200->user_input.perf_counter_l2_src1 << 24),
+ job200->perf_counter_l2_val0, job200->perf_counter_l2_val1
+#else
+ , 0, 0
+#endif
+ );
+#endif
+
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_ended);
+#endif
+
+ pr_debug("SPI_GPU_PP%u Idle\n", core->core_number);
+
+ return JOB_STATUS_END_SUCCESS; /* reschedule */
+ }
+ /* Overall SW watchdog timeout or (time to do hang checking and progress detected)? */
+ else if (
+ (CORE_WATCHDOG_TIMEOUT == core->state) ||
+ ((CORE_HANG_CHECK_TIMEOUT == core->state) && (current_tile_addr == job200->last_tile_list_addr))
+ )
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+ /* no progress detected, killed by the watchdog */
+ MALI_PRINT( ("M200: SW-Timeout Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x.\n", irq_readout ,current_tile_addr ,core_status) );
+ /* In this case will the system outside cleanup and reset the core */
+
+ MALI_PANIC("%s Watchdog timeout (rawstat: 0x%x tile_addr: 0x%x status: 0x%x)\n", MALI_PP_SUBSYSTEM_NAME, irq_readout, current_tile_addr, core_status);
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_ended);
+#endif
+
+ return JOB_STATUS_END_HANG;
+ }
+ /* HW watchdog triggered or an existing hang check passed? */
+ else if ((CORE_HANG_CHECK_TIMEOUT == core->state) || (irq_readout & job200->active_mask & MALI200_REG_VAL_IRQ_HANG))
+ {
+ /* check interval in ms */
+ u32 timeout = mali_core_hang_check_timeout_get();
+ MALI_PRINT( ("M200: HW/SW Watchdog triggered, checking for progress in %d ms\n", timeout));
+ job200->last_tile_list_addr = current_tile_addr;
+ /* hw watchdog triggered, set up a progress checker every HANGCHECK ms */
+ _mali_osk_timer_add(core->timer_hang_detection, _mali_osk_time_mstoticks(timeout));
+ job200->active_mask &= ~MALI200_REG_VAL_IRQ_HANG; /* ignore the hw watchdoig from now on */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, irq_readout & ~MALI200_REG_VAL_IRQ_HANG);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, job200->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* not finished */
+ }
+ /* No irq pending, core still busy */
+ else if ((0 == (irq_readout & MALI200_REG_VAL_IRQ_MASK_USED)) && ( 0 != (core_status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)))
+ {
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, irq_readout);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, job200->active_mask);
+ return JOB_STATUS_CONTINUE_RUN; /* Not finished */
+ }
+ else
+ {
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_number), 0, 0, 0, 0, 0); /* add GP and L2 counters and return status */
+#endif
+
+ MALI_PRINT( ("Mali PP: Job: 0x%08x CRASH? Rawstat: 0x%x Tile_addr: 0x%x Status: 0x%x\n",
+ (u32)job200->user_input.user_job_ptr, irq_readout ,current_tile_addr ,core_status) ) ;
+
+ if (irq_readout & MALI200_REG_VAL_IRQ_BUS_ERROR)
+ {
+ u32 bus_error = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS);
+
+ MALI_PRINT(("Bus error status: 0x%08X\n", bus_error));
+ MALI_DEBUG_PRINT_IF(1, (bus_error & 0x01), ("Bus write error from id 0x%02x\n", (bus_error>>2) & 0x0F));
+ MALI_DEBUG_PRINT_IF(1, (bus_error & 0x02), ("Bus read error from id 0x%02x\n", (bus_error>>6) & 0x0F));
+ MALI_DEBUG_PRINT_IF(1, (0 == (bus_error & 0x03)), ("Bus error but neither read or write was set as the error reason\n"));
+ (void)bus_error;
+ }
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&job->session->jobs_ended);
+#endif
+ return JOB_STATUS_END_UNKNOWN_ERR; /* reschedule */
+ }
+}
+
+
+/* This function is called from the ioctl function and should return a mali_core_job pointer
+to a created mali_core_job object with the data given from userspace */
+static _mali_osk_errcode_t subsystem_mali200_get_new_job_from_user(struct mali_core_session * session, void * argument)
+{
+ mali200_job *job200;
+ mali_core_job *job = NULL;
+ mali_core_job *previous_replaced_job;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_uk_pp_start_job_s * user_ptr_job_input;
+
+ user_ptr_job_input = (_mali_uk_pp_start_job_s *)argument;
+
+ MALI_CHECK_NON_NULL(job200 = (mali200_job *) _mali_osk_malloc(sizeof(mali200_job)), _MALI_OSK_ERR_NOMEM);
+ _mali_osk_memset(job200, 0 , sizeof(mali200_job) );
+
+ /* We read job data from Userspace pointer */
+ if ( NULL == _mali_osk_memcpy((void*)&job200->user_input, user_ptr_job_input, sizeof(job200->user_input)) )
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Could not copy data from U/K interface.\n")) ;
+ err = _MALI_OSK_ERR_FAULT;
+ goto function_exit;
+ }
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: subsystem_mali200_get_new_job_from_user 0x%x\n", (void*)job200->user_input.user_job_ptr));
+
+ MALI_DEBUG_PRINT(5, ("Mali PP: Frameregs: 0x%x 0x%x 0x%x Writeback[1] 0x%x, Pri:%d; Watchd:%d\n",
+ job200->user_input.frame_registers[0], job200->user_input.frame_registers[1], job200->user_input.frame_registers[2],
+ job200->user_input.wb0_registers[1], job200->user_input.priority,
+ job200->user_input.watchdog_msecs));
+
+ if ( job200->user_input.perf_counter_flag)
+ {
+#if defined(USING_MALI400_L2_CACHE)
+ MALI_DEBUG_PRINT(5, ("Mali PP: Performance counters: flag:0x%x src0:0x%x src1:0x%x l2_src0:0x%x l2_src1:0x%x\n",
+ job200->user_input.perf_counter_flag,
+ job200->user_input.perf_counter_src0,
+ job200->user_input.perf_counter_src1,
+ job200->user_input.perf_counter_l2_src0,
+ job200->user_input.perf_counter_l2_src1));
+#else
+ MALI_DEBUG_PRINT(5, ("Mali PP: Performance counters: flag:0x%x src0:0x%x src1:0x%x\n",
+ job200->user_input.perf_counter_flag,
+ job200->user_input.perf_counter_src0,
+ job200->user_input.perf_counter_src1));
+#endif
+ }
+
+ job = GET_JOB_EMBEDDED_PTR(job200);
+
+ job->session = session;
+ job->flags = user_ptr_job_input->flags;
+ job_priority_set(job, job200->user_input.priority);
+ job_watchdog_set(job, job200->user_input.watchdog_msecs );
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ job200->pid = _mali_osk_get_pid();
+ job200->tid = _mali_osk_get_tid();
+#endif
+
+ job->abort_id = job200->user_input.abort_id;
+ if (mali_job_queue_full(session))
+ {
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ goto function_exit;
+ }
+
+ /* We now know that we has a job, and a empty session slot to put it in */
+
+ job200->active_mask = MALI200_REG_VAL_IRQ_MASK_USED;
+
+ /* Allocating User Return Data */
+ job200->notification_obj = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_PP_FINISHED,
+ sizeof(_mali_uk_pp_job_finished_s) );
+
+ if ( NULL == job200->notification_obj )
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Could not get notification_obj.\n")) ;
+ err = _MALI_OSK_ERR_NOMEM;
+ goto function_exit;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD( &(job->list) ) ;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x INPUT from user.\n", (u32)job200->user_input.user_job_ptr)) ;
+
+ /* This should not happen since we have the checking of priority above */
+ if ( _MALI_OSK_ERR_OK != mali_core_session_add_job(session, job, &previous_replaced_job))
+ {
+ MALI_PRINT_ERROR( ("Mali PP: Internal error\n")) ;
+ user_ptr_job_input->status = _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE;
+ _mali_osk_notification_delete( job200->notification_obj );
+ goto function_exit;
+ }
+
+ /* If MALI_TRUE: This session had a job with lower priority which were removed.
+ This replaced job is given back to userspace. */
+ if ( NULL != previous_replaced_job )
+ {
+ mali200_job *previous_replaced_job200;
+
+ previous_replaced_job200 = GET_JOB200_PTR(previous_replaced_job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Replacing job: 0x%08x\n", (u32)previous_replaced_job200->user_input.user_job_ptr)) ;
+
+ /* Copy to the input data (which also is output data) the
+ pointer to the job that were replaced, so that the userspace
+ driver can put this job in the front of its job-queue */
+
+ user_ptr_job_input->returned_user_job_ptr = previous_replaced_job200->user_input.user_job_ptr;
+
+ /** @note failure to 'copy to user' at this point must not free job200,
+ * and so no transaction rollback required in the U/K interface */
+
+ /* This does not cause job200 to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED;
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_get_new_job_from_user: Job added, prev returned\n")) ;
+ }
+ else
+ {
+ /* This does not cause job200 to free: */
+ user_ptr_job_input->status = _MALI_UK_START_JOB_STARTED;
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_get_new_job_from_user: Job added\n")) ;
+ }
+
+function_exit:
+ if (_MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE == user_ptr_job_input->status
+ || _MALI_OSK_ERR_OK != err )
+ {
+ _mali_osk_free(job200);
+ }
+#if MALI_STATE_TRACKING
+ if (_MALI_UK_START_JOB_STARTED==user_ptr_job_input->status)
+ {
+ if(job)
+ {
+ job->job_nr=_mali_osk_atomic_inc_return(&session->jobs_received);
+ }
+ }
+#endif
+
+ MALI_ERROR(err);
+}
+
+/* This function is called from the ioctl function and should write the necessary data
+to userspace telling which job was finished and the status and debuginfo for this job.
+The function must also free and cleanup the input job object. */
+static void subsystem_mali200_return_job_to_user( mali_core_job * job, mali_subsystem_job_end_code end_status)
+{
+ mali200_job *job200;
+ _mali_uk_pp_job_finished_s * job_out;
+ _mali_uk_pp_start_job_s * job_input;
+ mali_core_session *session;
+
+ if (NULL == job)
+ {
+ MALI_DEBUG_PRINT(1, ("subsystem_mali200_return_job_to_user received a NULL ptr\n"));
+ return;
+ }
+
+ job200 = _MALI_OSK_CONTAINER_OF(job, mali200_job, embedded_core_job);
+
+ if (NULL == job200->notification_obj)
+ {
+ MALI_DEBUG_PRINT(1, ("Found job200 with NULL notification object, abandoning userspace sending\n"));
+ return;
+ }
+
+ job_out = job200->notification_obj->result_buffer;
+ job_input= &(job200->user_input);
+ session = job->session;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Job: 0x%08x OUTPUT to user. Runtime: %dus\n",
+ (u32)job200->user_input.user_job_ptr,
+ job->render_time_usecs)) ;
+
+ _mali_osk_memset(job_out, 0 , sizeof(_mali_uk_pp_job_finished_s));
+
+ job_out->user_job_ptr = job_input->user_job_ptr;
+
+ switch( end_status )
+ {
+ case JOB_STATUS_CONTINUE_RUN:
+ case JOB_STATUS_END_SUCCESS:
+ case JOB_STATUS_END_OOM:
+ case JOB_STATUS_END_ABORT:
+ case JOB_STATUS_END_TIMEOUT_SW:
+ case JOB_STATUS_END_HANG:
+ case JOB_STATUS_END_SEG_FAULT:
+ case JOB_STATUS_END_ILLEGAL_JOB:
+ case JOB_STATUS_END_UNKNOWN_ERR:
+ case JOB_STATUS_END_SHUTDOWN:
+ case JOB_STATUS_END_SYSTEM_UNUSABLE:
+ job_out->status = (mali_subsystem_job_end_code) end_status;
+ break;
+
+ default:
+ job_out->status = JOB_STATUS_END_UNKNOWN_ERR ;
+ }
+ job_out->irq_status = job200->irq_status;
+ job_out->perf_counter0 = job200->perf_counter0;
+ job_out->perf_counter1 = job200->perf_counter1;
+ job_out->render_time = job->render_time_usecs;
+
+#if defined(USING_MALI400_L2_CACHE)
+ job_out->perf_counter_l2_src0 = job200->perf_counter_l2_src0;
+ job_out->perf_counter_l2_src1 = job200->perf_counter_l2_src1;
+ job_out->perf_counter_l2_val0 = job200->perf_counter_l2_val0;
+ job_out->perf_counter_l2_val1 = job200->perf_counter_l2_val1;
+ job_out->perf_counter_l2_val0_raw = job200->perf_counter_l2_val0_raw;
+ job_out->perf_counter_l2_val1_raw = job200->perf_counter_l2_val1_raw;
+#endif
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_inc(&session->jobs_returned);
+#endif
+ _mali_osk_notification_queue_send( session->notification_queue, job200->notification_obj);
+ job200->notification_obj = NULL;
+
+ _mali_osk_free(job200);
+}
+
+static void subsystem_mali200_renderunit_delete(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP: mali200_renderunit_delete\n"));
+ _mali_osk_free(core);
+}
+
+static void mali200_reset_hard(struct mali_core_renderunit * core)
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_PRINT(5, ("subsystem_mali200_renderunit_reset_core_hard called for core %s\n", core->description));
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_invalid_value);
+
+ mali_core_renderunit_register_write(
+ core,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT,
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_core_renderunit_register_read(core, reset_wait_target_register))
+ {
+ MALI_DEBUG_PRINT(5, ("Reset loop exiting after %d iterations\n", i));
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_DEBUG_PRINT(1, ("The reset loop didn't work\n"));
+ }
+
+ mali_core_renderunit_register_write(core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+}
+
+static void subsystem_mali200_renderunit_reset_core(struct mali_core_renderunit * core, mali_core_reset_style style)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP: renderunit_reset_core\n"));
+
+ switch (style)
+ {
+ case MALI_CORE_RESET_STYLE_RUNABLE:
+ mali200_reset(core);
+ break;
+ case MALI_CORE_RESET_STYLE_DISABLE:
+ mali200_raw_reset(core); /* do the raw reset */
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* then disable the IRQs */
+ break;
+ case MALI_CORE_RESET_STYLE_HARD:
+ mali200_reset_hard(core);
+ break;
+ default:
+ MALI_DEBUG_PRINT(1, ("Unknown reset type %d\n", style));
+ }
+}
+
+static void subsystem_mali200_renderunit_probe_core_irq_trigger(struct mali_core_renderunit* core)
+{
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t subsystem_mali200_renderunit_probe_core_irq_finished(struct mali_core_renderunit* core)
+{
+ u32 irq_readout;
+
+ irq_readout = mali_core_renderunit_register_read(core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+
+ if ( MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout )
+ {
+ mali_core_renderunit_register_write(core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_start_job(session, args);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_number_of_cores_get(session, &args->number_of_cores);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args )
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ MALI_CHECK_NON_NULL(session, _MALI_OSK_ERR_FAULT);
+ return mali_core_subsystem_ioctl_core_version_get(session, &args->version);
+}
+
+void _mali_ukk_pp_abort_job( _mali_uk_pp_abort_job_s * args)
+{
+ mali_core_session * session;
+ MALI_DEBUG_ASSERT_POINTER(args);
+ if (NULL == args->ctx) return;
+ session = (mali_core_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_mali200_id);
+ if (NULL == session) return;
+ mali_core_subsystem_ioctl_abort_job(session, args->abort_id);
+
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t malipp_signal_power_up( u32 core_num, mali_bool queue_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali PP: signal power up core: %d - queue_only: %d\n", core_num, queue_only ));
+
+ return( mali_core_subsystem_signal_power_up( &subsystem_mali200, core_num, queue_only ) );
+}
+
+_mali_osk_errcode_t malipp_signal_power_down( u32 core_num, mali_bool immediate_only )
+{
+ MALI_DEBUG_PRINT(4, ("Mali PP: signal power down core: %d - immediate_only: %d\n", core_num, immediate_only ));
+
+ return( mali_core_subsystem_signal_power_down( &subsystem_mali200, core_num, immediate_only ) );
+}
+
+#endif
+
+#if MALI_STATE_TRACKING
+u32 mali200_subsystem_dump_state(char *buf, u32 size)
+{
+ return mali_core_renderunit_dump_state(&subsystem_mali200, buf, size);
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_common.h b/drivers/media/video/samsung/mali/common/mali_kernel_common.h
new file mode 100644
index 0000000..ab6f143
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_common.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+ #if defined(_DEBUG)
+ #define DEBUG
+ #endif
+#endif
+
+/* Macro for generating a kernel panic.
+ * Turned on off by compile-time Makefile settings
+ */
+#if defined(USING_KERNEL_PANIC)
+#include <linux/kernel.h>
+ #define MALI_PANIC(fmt, args...) panic( fmt, ## args );
+#else
+ #define MALI_PANIC(fmt, args...)
+#endif
+
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+ /**
+ * Fundamental error macro. Reports an error code. This is abstracted to allow us to
+ * easily switch to a different error reporting method if we want, and also to allow
+ * us to search for error returns easily.
+ *
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+ */
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ * then the value is returned from the enclosing function as an error code. This effectively
+ * acts as a guard clause, and propagates error values up the call stack. This uses a
+ * temporary value to ensure that the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ * returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+extern int mali_debug_level;
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_core.c b/drivers/media/video/samsung/mali/common/mali_kernel_core.c
new file mode 100644
index 0000000..be1889d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_core.c
@@ -0,0 +1,911 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_gp.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_rendercore.h"
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif /* USING_MALI_PMM */
+
+/* platform specific set up */
+#include "mali_platform.h"
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_core_id = -1;
+
+/** Pointer to table of resource definitions available to the Mali driver.
+ * _mali_osk_resources_init() sets up the pointer to this table.
+ */
+static _mali_osk_resource_t *arch_configuration = NULL;
+
+/** Number of resources initialized by _mali_osk_resources_init() */
+static u32 num_resources;
+
+static _mali_osk_errcode_t register_resources( _mali_osk_resource_t **arch_configuration, u32 num_resources );
+
+static _mali_osk_errcode_t initialize_subsystems(void);
+static void terminate_subsystems(void);
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_setup(mali_kernel_subsystem_identifier id);
+static void mali_kernel_subsystem_core_cleanup(mali_kernel_subsystem_identifier id);
+static _mali_osk_errcode_t mali_kernel_subsystem_core_system_info_fill(_mali_system_info* info);
+static _mali_osk_errcode_t mali_kernel_subsystem_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+static _mali_osk_errcode_t build_system_info(void);
+static void cleanup_system_info(_mali_system_info *cleanup);
+
+/**
+ * @brief handler for MEM_VALIDATION resources
+ *
+ * This resource handler is common to all memory systems. It provides a default
+ * means for validating requests to map in external memory via
+ * _mali_ukk_map_external_mem. In addition, if _mali_ukk_va_to_pa is
+ * implemented, then _mali_ukk_va_to_pa can make use of this MEM_VALIDATION
+ * resource.
+ *
+ * MEM_VALIDATION also provide a CPU physical to Mali physical address
+ * translation, for use by _mali_ukk_map_external_mem.
+ *
+ * @note MEM_VALIDATION resources are only to handle simple cases where a
+ * certain physical address range is allowed to be mapped in by any process,
+ * e.g. a framebuffer at a fixed location. If the implementor has more complex
+ * mapping requirements, then they must either:
+ * - implement their own memory validation function
+ * - or, integrate with UMP.
+ *
+ * @param resource The resource to handle (type MEM_VALIDATION)
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t mali_kernel_core_resource_mem_validation(_mali_osk_resource_t * resource);
+
+/* MEM_VALIDATION handler state */
+typedef struct
+{
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+ s32 cpu_usage_adjust; /**< Offset to add to Mali Physical address to obtain CPU physical address */
+} _mali_mem_validation_t;
+
+#define INVALID_MEM 0xffffffff
+
+static _mali_mem_validation_t mem_validator = { INVALID_MEM, INVALID_MEM, -1 };
+
+static struct mali_kernel_subsystem mali_subsystem_core =
+{
+ mali_kernel_subsystem_core_setup, /* startup */
+ mali_kernel_subsystem_core_cleanup, /* shutdown */
+ NULL, /* load_complete */
+ mali_kernel_subsystem_core_system_info_fill, /* system_info_fill */
+ mali_kernel_subsystem_core_session_begin, /* session_begin */
+ NULL, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+static struct mali_kernel_subsystem * subsystems[] =
+{
+
+#if USING_MALI_PMM
+ /* The PMM must be initialized before any cores - including L2 cache */
+ &mali_subsystem_pmm,
+#endif
+
+ /* always included */
+ &mali_subsystem_memory,
+
+ /* The rendercore subsystem must be initialized before any subsystem based on the
+ * rendercores is started e.g. mali_subsystem_mali200 and mali_subsystem_gp2 */
+ &mali_subsystem_rendercore,
+
+ /* add reference to the subsystem */
+ &mali_subsystem_mali200,
+
+ /* add reference to the subsystem */
+ &mali_subsystem_gp2,
+
+#if defined USING_MALI400_L2_CACHE
+ &mali_subsystem_l2_cache,
+#endif
+
+ /* always included */
+ /* NOTE Keep the core entry at the tail of the list */
+ &mali_subsystem_core
+};
+
+#define SUBSYSTEMS_COUNT ( sizeof(subsystems) / sizeof(subsystems[0]) )
+
+/* Pointers to this type available as incomplete struct in mali_kernel_session_manager.h */
+struct mali_session_data
+{
+ void * subsystem_data[SUBSYSTEMS_COUNT];
+ _mali_osk_notification_queue_t * ioctl_queue;
+};
+
+static mali_kernel_resource_registrator resource_handler[RESOURCE_TYPE_COUNT] = { NULL, };
+
+/* system info variables */
+static _mali_osk_lock_t *system_info_lock = NULL;
+static _mali_system_info * system_info = NULL;
+static u32 system_info_size = 0;
+
+/* is called from OS specific driver entry point */
+_mali_osk_errcode_t mali_kernel_constructor( void )
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_platform_init();
+ if (_MALI_OSK_ERR_OK != err) goto error1;
+
+ err = _mali_osk_init();
+ if (_MALI_OSK_ERR_OK != err) goto error2;
+
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n",_MALI_API_VERSION));
+ MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
+ MALI_DEBUG_PRINT(2, ("Svn revision: %s\n", SVN_REV_STRING));
+
+ err = initialize_subsystems();
+ if (_MALI_OSK_ERR_OK != err) goto error3;
+
+ MALI_PRINT(("Mali device driver %s loaded\n", SVN_REV_STRING));
+
+ MALI_SUCCESS;
+
+error3:
+ MALI_PRINT(("Mali subsystems failed\n"));
+ _mali_osk_term();
+error2:
+ MALI_PRINT(("Mali device driver init failed\n"));
+ if (_MALI_OSK_ERR_OK != mali_platform_deinit())
+ {
+ MALI_PRINT(("Failed to deinit platform\n"));
+ }
+error1:
+ MALI_PRINT(("Failed to init platform\n"));
+ MALI_ERROR(err);
+}
+
+/* is called from OS specific driver exit point */
+void mali_kernel_destructor( void )
+{
+ MALI_DEBUG_PRINT(2, ("\n"));
+ MALI_DEBUG_PRINT(2, ("Unloading Mali v%d device driver.\n",_MALI_API_VERSION));
+#if USING_MALI_PMM
+ malipmm_force_powerup();
+#endif
+ terminate_subsystems(); /* subsystems are responsible for their registered resources */
+ _mali_osk_term();
+
+ if (_MALI_OSK_ERR_OK != mali_platform_deinit())
+ {
+ MALI_PRINT(("Failed to deinit platform\n"));
+ }
+ MALI_DEBUG_PRINT(2, ("Module unloaded.\n"));
+}
+
+_mali_osk_errcode_t register_resources( _mali_osk_resource_t **arch_configuration, u32 num_resources )
+{
+ _mali_osk_resource_t *arch_resource = *arch_configuration;
+ u32 i;
+#if USING_MALI_PMM
+ u32 is_pmu_first_resource = 1;
+#endif /* USING_MALI_PMM */
+
+ /* loop over arch configuration */
+ for (i = 0; i < num_resources; ++i, arch_resource++)
+ {
+ if ( (arch_resource->type >= RESOURCE_TYPE_FIRST) &&
+ (arch_resource->type < RESOURCE_TYPE_COUNT) &&
+ (NULL != resource_handler[arch_resource->type])
+ )
+ {
+#if USING_MALI_PMM
+ if((arch_resource->type != PMU) && (is_pmu_first_resource == 1))
+ {
+ _mali_osk_resource_t mali_pmu_virtual_resource;
+ mali_pmu_virtual_resource.type = PMU;
+ mali_pmu_virtual_resource.description = "Virtual PMU";
+ mali_pmu_virtual_resource.base = 0x00000000;
+ mali_pmu_virtual_resource.cpu_usage_adjust = 0;
+ mali_pmu_virtual_resource.size = 0;
+ mali_pmu_virtual_resource.irq = 0;
+ mali_pmu_virtual_resource.flags = 0;
+ mali_pmu_virtual_resource.mmu_id = 0;
+ mali_pmu_virtual_resource.alloc_order = 0;
+ MALI_CHECK_NO_ERROR(resource_handler[mali_pmu_virtual_resource.type](&mali_pmu_virtual_resource));
+ }
+ is_pmu_first_resource = 0;
+#endif /* USING_MALI_PMM */
+
+ MALI_CHECK_NO_ERROR(resource_handler[arch_resource->type](arch_resource));
+ /* the subsystem shutdown process will release all the resources already registered */
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("No handler installed for resource %s, type %d\n", arch_resource->description, arch_resource->type));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t initialize_subsystems(void)
+{
+ int i, j;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT; /* default error code */
+
+ MALI_CHECK_NON_NULL(system_info_lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0 ), _MALI_OSK_ERR_FAULT);
+
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->startup)
+ {
+ /* the subsystem has a startup function defined */
+ err = subsystems[i]->startup(i); /* the subsystem identifier is the offset in our subsystems array */
+ if (_MALI_OSK_ERR_OK != err) goto cleanup;
+ }
+ }
+
+ for (j = 0; j < (int)SUBSYSTEMS_COUNT; ++j)
+ {
+ if (NULL != subsystems[j]->load_complete)
+ {
+ /* the subsystem has a load_complete function defined */
+ err = subsystems[j]->load_complete(j);
+ if (_MALI_OSK_ERR_OK != err) goto cleanup;
+ }
+ }
+
+ /* All systems loaded and resources registered */
+ /* Build system info */
+ if (_MALI_OSK_ERR_OK != build_system_info()) goto cleanup;
+
+ MALI_SUCCESS; /* all ok */
+
+cleanup:
+ /* i is index of subsystem which failed to start, all indices before that has to be shut down */
+ for (i = i - 1; i >= 0; --i)
+ {
+ /* the subsystem identifier is the offset in our subsystems array */
+ /* Call possible shutdown notficiation functions */
+ if (NULL != subsystems[i]->shutdown) subsystems[i]->shutdown(i);
+ }
+
+ _mali_osk_lock_term( system_info_lock );
+ MALI_ERROR(err); /* err is what the module which failed its startup returned, or the default */
+}
+
+static void terminate_subsystems(void)
+{
+ int i;
+ /* shut down subsystems in reverse order from startup */
+ for (i = SUBSYSTEMS_COUNT - 1; i >= 0; --i)
+ {
+ /* the subsystem identifier is the offset in our subsystems array */
+ if (NULL != subsystems[i]->shutdown) subsystems[i]->shutdown(i);
+ }
+ if (system_info_lock) _mali_osk_lock_term( system_info_lock );
+
+ /* Free _mali_system_info struct */
+ cleanup_system_info(system_info);
+}
+
+void _mali_kernel_core_broadcast_subsystem_message(mali_core_notification_message message, u32 data)
+{
+ int i;
+
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->broadcast_notification)
+ {
+ subsystems[i]->broadcast_notification(message, data);
+ }
+ }
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_setup(mali_kernel_subsystem_identifier id)
+{
+ mali_subsystem_core_id = id;
+
+ /* Register our own resources */
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MEM_VALIDATION, mali_kernel_core_resource_mem_validation));
+
+ /* parse the arch resource definition and tell all the subsystems */
+ /* this is why the core subsystem has to be specified last in the subsystem array */
+ MALI_CHECK_NO_ERROR(_mali_osk_resources_init(&arch_configuration, &num_resources));
+
+ MALI_CHECK_NO_ERROR(register_resources(&arch_configuration, num_resources));
+
+ /* resource parsing succeeded and the subsystem have corretly accepted their resources */
+ MALI_SUCCESS;
+}
+
+static void mali_kernel_subsystem_core_cleanup(mali_kernel_subsystem_identifier id)
+{
+ _mali_osk_resources_term(&arch_configuration, num_resources);
+}
+
+static void cleanup_system_info(_mali_system_info *cleanup)
+{
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+
+ /* delete all the core info structs */
+ while (NULL != cleanup->core_info)
+ {
+ current_core = cleanup->core_info;
+ cleanup->core_info = cleanup->core_info->next;
+ _mali_osk_free(current_core);
+ }
+
+ /* delete all the mem info struct */
+ while (NULL != cleanup->mem_info)
+ {
+ current_mem = cleanup->mem_info;
+ cleanup->mem_info = cleanup->mem_info->next;
+ _mali_osk_free(current_mem);
+ }
+
+ /* delete the system info struct itself */
+ _mali_osk_free(cleanup);
+}
+
+static _mali_osk_errcode_t build_system_info(void)
+{
+ unsigned int i;
+ int err = _MALI_OSK_ERR_FAULT;
+ _mali_system_info * new_info, * cleanup;
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ u32 new_size = 0;
+
+ /* create a new system info struct */
+ MALI_CHECK_NON_NULL(new_info = (_mali_system_info *)_mali_osk_malloc(sizeof(_mali_system_info)), _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(new_info, 0, sizeof(_mali_system_info));
+
+ /* if an error happens during any of the system_info_fill calls cleanup the new info structs */
+ cleanup = new_info;
+
+ /* ask each subsystems to fill in their info */
+ for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->system_info_fill)
+ {
+ err = subsystems[i]->system_info_fill(new_info);
+ if (_MALI_OSK_ERR_OK != err) goto error_exit;
+ }
+ }
+
+ /* building succeeded, calculate the size */
+
+ /* size needed of the system info struct itself */
+ new_size = sizeof(_mali_system_info);
+
+ /* size needed for the cores */
+ for (current_core = new_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+ new_size += sizeof(_mali_core_info);
+ }
+
+ /* size needed for the memory banks */
+ for (current_mem = new_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ new_size += sizeof(_mali_mem_info);
+ }
+
+ /* lock system info access so a user wont't get a corrupted version */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* cleanup the old one */
+ cleanup = system_info;
+ /* set new info */
+ system_info = new_info;
+ system_info_size = new_size;
+
+ /* we're safe */
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* ok result */
+ err = _MALI_OSK_ERR_OK;
+
+ /* we share the cleanup routine with the error case */
+error_exit:
+ if (NULL == cleanup) MALI_ERROR((_mali_osk_errcode_t)err); /* no cleanup needed, return what err contains */
+
+ /* cleanup */
+ cleanup_system_info(cleanup);
+
+ /* return whatever err is, we could end up here in both the error and success cases */
+ MALI_ERROR((_mali_osk_errcode_t)err);
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check compatability */
+ if ( args->version == _MALI_UK_API_VERSION )
+ {
+ args->compatible = 1;
+ }
+ else
+ {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info_size(_mali_uk_get_system_info_size_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ args->size = system_info_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args )
+{
+ _mali_core_info * current_core;
+ _mali_mem_info * current_mem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ void * current_write_pos, ** current_patch_pos;
+ u32 adjust_ptr_base;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->system_info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* lock the system info */
+ _mali_osk_lock_wait( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /* first check size */
+ if (args->size < system_info_size) goto exit_when_locked;
+
+ /* we build a copy of system_info in the user space buffer specified by the user and
+ * patch up the pointers. The ukk_private members of _mali_uk_get_system_info_s may
+ * indicate a different base address for patching the pointers (normally the
+ * address of the provided system_info buffer would be used). This is helpful when
+ * the system_info buffer needs to get copied to user space and the pointers need
+ * to be in user space.
+ */
+ if (0 == args->ukk_private)
+ {
+ adjust_ptr_base = (u32)args->system_info;
+ }
+ else
+ {
+ adjust_ptr_base = args->ukk_private;
+ }
+
+ /* copy each struct into the buffer, and update its pointers */
+ current_write_pos = (void *)args->system_info;
+
+ /* first, the master struct */
+ _mali_osk_memcpy(current_write_pos, system_info, sizeof(_mali_system_info));
+
+ /* advance write pointer */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_system_info));
+
+ /* first we write the core info structs, patch starts at master's core_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, core_info));
+
+ for (current_core = system_info->core_info; NULL != current_core; current_core = current_core->next)
+ {
+
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_core, sizeof(_mali_core_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_core_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_core_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ /* then we write the mem info structs, patch starts at master's mem_info pointer */
+ current_patch_pos = (void **)((u32)args->system_info + offsetof(_mali_system_info, mem_info));
+
+ for (current_mem = system_info->mem_info; NULL != current_mem; current_mem = current_mem->next)
+ {
+ /* patch the pointer pointing to this core */
+ *current_patch_pos = (void*)(adjust_ptr_base + ((u32)current_write_pos - (u32)args->system_info));
+
+ /* copy the core info */
+ _mali_osk_memcpy(current_write_pos, current_mem, sizeof(_mali_mem_info));
+
+ /* update patch pos */
+ current_patch_pos = (void **)((u32)current_write_pos + offsetof(_mali_mem_info, next));
+
+ /* advance write pos in memory */
+ current_write_pos = (void *)((u32)current_write_pos + sizeof(_mali_mem_info));
+ }
+ /* patching of last patch pos is not needed, since we wrote NULL there in the first place */
+
+ err = _MALI_OSK_ERR_OK;
+exit_when_locked:
+ _mali_osk_lock_signal( system_info_lock, _MALI_OSK_LOCKMODE_RW );
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete( notification );
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = (_mali_osk_notification_queue_t *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_core_id);
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if ( NULL == notification)
+ {
+ MALI_PRINT_ERROR( ("Failed to create notification object\n")) ;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_system_info_fill(_mali_system_info* info)
+{
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ info->drivermode = _MALI_DRIVER_MODE_NORMAL;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_subsystem_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ MALI_CHECK_NON_NULL(slot, _MALI_OSK_ERR_INVALID_ARGS);
+ *slot = queue;
+ MALI_SUCCESS;
+}
+
+/* MEM_VALIDATION resource handler */
+static _mali_osk_errcode_t mali_kernel_core_resource_mem_validation(_mali_osk_resource_t * resource)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ MALI_CHECK( ((u32)-1) == mem_validator.phys_base, _MALI_OSK_ERR_FAULT );
+
+ /* Check restrictions on page alignment */
+ MALI_CHECK( 0 == (resource->base & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == (resource->size & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == (resource->cpu_usage_adjust & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+
+ mem_validator.phys_base = resource->base;
+ mem_validator.size = resource->size;
+ mem_validator.cpu_usage_adjust = resource->cpu_usage_adjust;
+ MALI_DEBUG_PRINT( 2, ("Memory Validator '%s' installed for Mali physical address base==0x%08X, size==0x%08X, cpu_adjust==0x%08X\n",
+ resource->description, mem_validator.phys_base, mem_validator.size, mem_validator.cpu_usage_adjust ));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_kernel_core_translate_cpu_to_mali_phys_range( u32 *phys_base, u32 size )
+{
+ u32 mali_phys_base;
+
+ mali_phys_base = *phys_base - mem_validator.cpu_usage_adjust;
+
+ MALI_CHECK( 0 == ( mali_phys_base & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+ MALI_CHECK( 0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)), _MALI_OSK_ERR_FAULT );
+
+ MALI_CHECK_NO_ERROR( mali_kernel_core_validate_mali_phys_range( mali_phys_base, size ) );
+
+ *phys_base = mali_phys_base;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_kernel_core_validate_mali_phys_range( u32 phys_base, u32 size )
+{
+ MALI_CHECK_GOTO( 0 == ( phys_base & (~_MALI_OSK_CPU_PAGE_MASK)), failure );
+ MALI_CHECK_GOTO( 0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)), failure );
+
+ if ( phys_base >= mem_validator.phys_base
+ && (phys_base + size) >= mem_validator.phys_base
+ && phys_base <= (mem_validator.phys_base + mem_validator.size)
+ && (phys_base + size) <= (mem_validator.phys_base + mem_validator.size) )
+ {
+ MALI_SUCCESS;
+ }
+
+ failure:
+ MALI_PRINTF( ("*******************************************************************************\n") );
+ MALI_PRINTF( ("MALI PHYSICAL RANGE VALIDATION ERROR!\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("We failed to validate a Mali-Physical range that the user-side wished to map in\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("It is likely that the user-side wished to do Direct Rendering, but a suitable\n") );
+ MALI_PRINTF( ("address range validation mechanism has not been correctly setup\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_base, size) );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("Please refer to the ARM Mali Software Integration Guide for more information.\n") );
+ MALI_PRINTF( ("\n") );
+ MALI_PRINTF( ("*******************************************************************************\n") );
+
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+}
+
+
+_mali_osk_errcode_t _mali_kernel_core_register_resource_handler(_mali_osk_resource_type_t type, mali_kernel_resource_registrator handler)
+{
+ MALI_CHECK(type < RESOURCE_TYPE_COUNT, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT(NULL == resource_handler[type]); /* A handler for resource already exists */
+ resource_handler[type] = handler;
+ MALI_SUCCESS;
+}
+
+void * mali_kernel_session_manager_slot_get(struct mali_session_data * session_data, int id)
+{
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ if(id >= SUBSYSTEMS_COUNT) { MALI_DEBUG_PRINT(3, ("mali_kernel_session_manager_slot_get: id %d out of range\n", id)); return NULL; }
+
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3, ("mali_kernel_session_manager_slot_get: got NULL session data\n")); return NULL; }
+ return session_data->subsystem_data[id];
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ int i;
+ _mali_osk_errcode_t err;
+ struct mali_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct mali_session_data *)_mali_osk_malloc(sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM);
+
+ _mali_osk_memset(session_data->subsystem_data, 0, sizeof(session_data->subsystem_data));
+
+ /* create a response queue for this session */
+ session_data->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session_data->ioctl_queue)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Session starting\n"));
+
+ /* call session_begin on all subsystems */
+ for (i = 0; i < (int)SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->session_begin)
+ {
+ /* subsystem has a session_begin */
+ err = subsystems[i]->session_begin(session_data, &session_data->subsystem_data[i], session_data->ioctl_queue);
+ MALI_CHECK_GOTO(err == _MALI_OSK_ERR_OK, cleanup);
+ }
+ }
+
+ *context = (void*)session_data;
+
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ MALI_SUCCESS;
+
+cleanup:
+ MALI_DEBUG_PRINT(2, ("Session startup failed\n"));
+ /* i is index of subsystem which failed session begin, all indices before that has to be ended */
+ /* end subsystem sessions in the reverse order they where started in */
+ for (i = i - 1; i >= 0; --i)
+ {
+ if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
+ }
+
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+
+ /* return what the subsystem which failed session start returned */
+ MALI_ERROR(err);
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ int i;
+ struct mali_session_data * session_data;
+
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(2, ("Session ending\n"));
+
+ /* end subsystem sessions in the reverse order they where started in */
+ for (i = SUBSYSTEMS_COUNT - 1; i >= 0; --i)
+ {
+ if (NULL != subsystems[i]->session_end) subsystems[i]->session_end(session_data, &session_data->subsystem_data[i]);
+ }
+
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t mali_core_signal_power_up( mali_pmm_core_id core, mali_bool queue_only )
+{
+ switch( core )
+ {
+ case MALI_PMM_CORE_GP:
+ MALI_CHECK_NO_ERROR(maligp_signal_power_up(queue_only));
+ break;
+#if defined USING_MALI400_L2_CACHE
+ case MALI_PMM_CORE_L2:
+ if( !queue_only )
+ {
+ /* Enable L2 cache due to power up */
+ mali_kernel_l2_cache_do_enable();
+
+ /* Invalidate the cache on power up */
+ MALI_DEBUG_PRINT(5, ("L2 Cache: Invalidate all\n"));
+ MALI_CHECK_NO_ERROR(mali_kernel_l2_cache_invalidate_all());
+ }
+ break;
+#endif
+ case MALI_PMM_CORE_PP0:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(0, queue_only));
+ break;
+ case MALI_PMM_CORE_PP1:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(1, queue_only));
+ break;
+ case MALI_PMM_CORE_PP2:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(2, queue_only));
+ break;
+ case MALI_PMM_CORE_PP3:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_up(3, queue_only));
+ break;
+ default:
+ /* Unknown core */
+ MALI_DEBUG_PRINT_ERROR( ("Unknown core signalled with power up: %d\n", core) );
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_signal_power_down( mali_pmm_core_id core, mali_bool immediate_only )
+{
+ switch( core )
+ {
+ case MALI_PMM_CORE_GP:
+ MALI_CHECK_NO_ERROR(maligp_signal_power_down(immediate_only));
+ break;
+#if defined USING_MALI400_L2_CACHE
+ case MALI_PMM_CORE_L2:
+ /* Nothing to do */
+ break;
+#endif
+ case MALI_PMM_CORE_PP0:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(0, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP1:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(1, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP2:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(2, immediate_only));
+ break;
+ case MALI_PMM_CORE_PP3:
+ MALI_CHECK_NO_ERROR(malipp_signal_power_down(3, immediate_only));
+ break;
+ default:
+ /* Unknown core */
+ MALI_DEBUG_PRINT_ERROR( ("Unknown core signalled with power down: %d\n", core) );
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+ }
+
+ MALI_SUCCESS;
+}
+
+#endif
+
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char* buf, u32 size)
+{
+ int i, n;
+ char *original_buf = buf;
+ for (i = 0; i < SUBSYSTEMS_COUNT; ++i)
+ {
+ if (NULL != subsystems[i]->dump_state)
+ {
+ n = subsystems[i]->dump_state(buf, size);
+ size -= n;
+ buf += n;
+ }
+ }
+#if USING_MALI_PMM
+ n = mali_pmm_dump_os_thread_state(buf, size);
+ size -= n;
+ buf += n;
+#endif
+ /* Return number of bytes written to buf */
+ return (u32)(buf - original_buf);
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_core.h b/drivers/media/video/samsung/mali/common/mali_kernel_core.h
new file mode 100644
index 0000000..715c1cd
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_core.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+#if USING_MALI_PMM
+#include "mali_ukk.h"
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#endif
+
+_mali_osk_errcode_t mali_kernel_constructor( void );
+void mali_kernel_destructor( void );
+
+/**
+ * @brief Tranlate CPU physical to Mali physical addresses.
+ *
+ * This function is used to convert CPU physical addresses to Mali Physical
+ * addresses, such that _mali_ukk_map_external_mem may be used to map them
+ * into Mali. This will be used by _mali_ukk_va_to_mali_pa.
+ *
+ * This function only supports physically contiguous regions.
+ *
+ * A default implementation is provided, which uses a registered MEM_VALIDATION
+ * resource to do a static translation. Only an address range which will lie
+ * in the range specified by MEM_VALIDATION will be successfully translated.
+ *
+ * If a more complex, or non-static translation is required, then the
+ * implementor has the following options:
+ * - Rewrite this function to provide such a translation
+ * - Integrate the provider of the memory with UMP.
+ *
+ * @param[in,out] phys_base pointer to the page-aligned base address of the
+ * physical range to be translated
+ *
+ * @param[in] size size of the address range to be translated, which must be a
+ * multiple of the physical page size.
+ *
+ * @return on success, _MALI_OSK_ERR_OK and *phys_base is translated. If the
+ * cpu physical address range is not in the valid range, then a suitable
+ * _mali_osk_errcode_t error.
+ *
+ */
+_mali_osk_errcode_t mali_kernel_core_translate_cpu_to_mali_phys_range( u32 *phys_base, u32 size );
+
+
+/**
+ * @brief Validate a Mali physical address range.
+ *
+ * This function is used to ensure that an address range passed to
+ * _mali_ukk_map_external_mem is allowed to be mapped into Mali.
+ *
+ * This function only supports physically contiguous regions.
+ *
+ * A default implementation is provided, which uses a registered MEM_VALIDATION
+ * resource to do a static translation. Only an address range which will lie
+ * in the range specified by MEM_VALIDATION will be successfully validated.
+ *
+ * If a more complex, or non-static validation is required, then the
+ * implementor has the following options:
+ * - Rewrite this function to provide such a validation
+ * - Integrate the provider of the memory with UMP.
+ *
+ * @param phys_base page-aligned base address of the Mali physical range to be
+ * validated.
+ *
+ * @param size size of the address range to be validated, which must be a
+ * multiple of the physical page size.
+ *
+ * @return _MALI_OSK_ERR_OK if the Mali physical range is valid. Otherwise, a
+ * suitable _mali_osk_errcode_t error.
+ *
+ */
+_mali_osk_errcode_t mali_kernel_core_validate_mali_phys_range( u32 phys_base, u32 size );
+
+#if USING_MALI_PMM
+/**
+ * @brief Signal a power up on a Mali core.
+ *
+ * This function flags a core as powered up.
+ * For PP and GP cores it calls functions that move the core from a power off
+ * queue into the idle queue ready to run jobs. It also tries to schedule any
+ * pending jobs to run on it.
+ *
+ * This function will fail if the core is not powered off - either running or
+ * already idle.
+ *
+ * @param core The PMM core id to power up.
+ * @param queue_only When MALI_TRUE only re-queue the core - do not reset.
+ *
+ * @return _MALI_OSK_ERR_OK if the core has been powered up. Otherwise a
+ * suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_core_signal_power_up( mali_pmm_core_id core, mali_bool queue_only );
+
+/**
+ * @brief Signal a power down on a Mali core.
+ *
+ * This function flags a core as powered down.
+ * For PP and GP cores it calls functions that move the core from an idle
+ * queue into the power off queue.
+ *
+ * This function will fail if the core is not idle - either running or
+ * already powered down.
+ *
+ * @param core The PMM core id to power up.
+ * @param immediate_only Do not set the core to pending power down if it can't
+ * power down immediately
+ *
+ * @return _MALI_OSK_ERR_OK if the core has been powered up. Otherwise a
+ * suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_core_signal_power_down( mali_pmm_core_id core, mali_bool immediate_only );
+
+#endif
+
+/**
+ * Flag to indicate whether or not mali_benchmark is turned on.
+ */
+extern int mali_benchmark;
+
+
+#endif /* __MALI_KERNEL_CORE_H__ */
+
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.c b/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.c
new file mode 100644
index 0000000..8b2a97d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+#if !USING_MMU
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 20);
+#else
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 116);
+#endif
+ if (NULL != map->lock)
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings)
+ {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table * new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i)
+ {
+ if (_mali_osk_test_bit(i, map->table->usage))
+ {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+ mali_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.h b/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.h
new file mode 100644
index 0000000..745be92
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_gp.h b/drivers/media/video/samsung/mali/common/mali_kernel_gp.h
new file mode 100644
index 0000000..efd3b43
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_gp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_GP2_H__
+#define __MALI_KERNEL_GP2_H__
+
+extern struct mali_kernel_subsystem mali_subsystem_gp2;
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t maligp_signal_power_up( mali_bool queue_only );
+_mali_osk_errcode_t maligp_signal_power_down( mali_bool immediate_only );
+#endif
+
+#endif /* __MALI_KERNEL_GP2_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_l2_cache.c b/drivers/media/video/samsung/mali/common/mali_kernel_l2_cache.c
new file mode 100644
index 0000000..e4d4ab1
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_l2_cache.c
@@ -0,0 +1,517 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+#include "mali_kernel_core.h"
+#include "mali_kernel_pp.h"
+#include "mali_kernel_subsystem.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_kernel_l2_cache.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0002,
+ /*unused = 0x0003 */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0004, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0005,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0006, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x0007, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0008,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0009,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x000A,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x000B,
+} mali_l2_cache_register;
+
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command
+{
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable
+{
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status
+{
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+typedef struct mali_kernel_l2_cache_core
+{
+ unsigned long base; /**< Physical address of the registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ u32 mapping_size; /**< Size of registers in bytes */
+ _mali_osk_list_t list; /**< Used to link multiple cache cores into a list */
+ _mali_osk_lock_t *lock; /**< Serialize all L2 cache commands */
+} mali_kernel_l2_cache_core;
+
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+
+/**
+ * Mali L2 cache subsystem startup function
+ * Called by the driver core when the driver is loaded.
+ *
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Mali L2 cache subsystem shutdown function
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ */
+static void mali_l2_cache_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * L2 cache subsystem complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * @param id Identifier assigned by the core to the L2 cache subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_load_complete(mali_kernel_subsystem_identifier id);
+
+/**
+ * Mali L2 cache subsystem's notification handler for a Mali L2 cache resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each Mali L2 cache described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MALI400L2)
+ * @return 0 if the Mali L2 cache was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_l2_cache_core_create(_mali_osk_resource_t * resource);
+
+/**
+ * Write to a L2 cache register
+ * Writes the given value to the specified register
+ * @param unit The L2 cache to write to
+ * @param reg The register to write to
+ * @param val The value to write to the register
+ */
+static void mali_l2_cache_register_write(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg, u32 val);
+
+
+
+/**
+ * Invalidate specified L2 cache
+ * @param cache The L2 cache to invalidate
+ * @return 0 if Mali L2 cache was successfully invalidated, otherwise error
+ */
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all_cache(mali_kernel_l2_cache_core *cache);
+
+
+/*
+ The fixed Mali L2 cache system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_l2_cache =
+{
+ mali_l2_cache_initialize, /**< startup */
+ NULL, /*mali_l2_cache_terminate,*/ /**< shutdown */
+ mali_l2_cache_load_complete, /**< load_complete */
+ NULL, /**< system_info_fill */
+ NULL, /**< session_begin */
+ NULL, /**< session_end */
+ NULL, /**< broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /**< dump_state */
+#endif
+};
+
+
+
+static _MALI_OSK_LIST_HEAD(caches_head);
+
+
+
+
+/* called during module init */
+static _mali_osk_errcode_t mali_l2_cache_initialize(mali_kernel_subsystem_identifier id)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_IGNORE( id );
+
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system initializing\n"));
+
+ _MALI_OSK_INIT_LIST_HEAD(&caches_head);
+
+ /* This will register the function for adding Mali L2 cache cores to the subsystem */
+ err = _mali_kernel_core_register_resource_handler(MALI400L2, mali_l2_cache_core_create);
+
+ MALI_ERROR(err);
+}
+
+
+
+/* called if/when our module is unloaded */
+static void mali_l2_cache_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system terminating\n"));
+
+ /* loop over all L2 cache units and shut them down */
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list )
+ {
+ /* reset to defaults */
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+ /* remove from the list of cacges on the system */
+ _mali_osk_list_del( &cache->list );
+
+ /* release resources */
+ _mali_osk_mem_unmapioregion( cache->base, cache->mapping_size, cache->mapped_registers );
+ _mali_osk_mem_unreqregion( cache->base, cache->mapping_size );
+ _mali_osk_lock_term( cache->lock );
+ _mali_osk_free( cache );
+
+ #if USING_MALI_PMM
+ /* Unregister the L2 cache with the PMM */
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+ #endif
+ }
+}
+
+static _mali_osk_errcode_t mali_l2_cache_core_create(_mali_osk_resource_t * resource)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT ;
+ mali_kernel_l2_cache_core * cache = NULL;
+
+ MALI_DEBUG_PRINT(2, ( "Creating Mali L2 cache: %s\n", resource->description));
+
+#if USING_MALI_PMM
+ /* Register the L2 cache with the PMM */
+ err = malipmm_core_register( MALI_PMM_CORE_L2 );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to register L2 cache unit with PMM"));
+ return err;
+ }
+#endif
+
+ err = _mali_osk_mem_reqregion( resource->base, MALI400_L2_CACHE_REGISTERS_SIZE, resource->description);
+
+ MALI_CHECK_GOTO( _MALI_OSK_ERR_OK == err, err_cleanup_requestmem_failed);
+
+ /* Reset error that might be passed out */
+ err = _MALI_OSK_ERR_FAULT;
+
+ cache = _mali_osk_malloc(sizeof(mali_kernel_l2_cache_core));
+
+ MALI_CHECK_GOTO( NULL != cache, err_cleanup);
+
+ cache->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 104 );
+
+ MALI_CHECK_GOTO( NULL != cache->lock, err_cleanup);
+
+ /* basic setup */
+ _MALI_OSK_INIT_LIST_HEAD(&cache->list);
+
+ cache->base = resource->base;
+ cache->mapping_size = MALI400_L2_CACHE_REGISTERS_SIZE;
+
+ /* map the registers */
+ cache->mapped_registers = _mali_osk_mem_mapioregion( cache->base, cache->mapping_size, resource->description );
+
+ MALI_CHECK_GOTO( NULL != cache->mapped_registers, err_cleanup);
+
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ err = mali_kernel_l2_cache_invalidate_all_cache(cache);
+
+ MALI_CHECK_GOTO( _MALI_OSK_ERR_OK == err, err_cleanup);
+
+ /* add to our list of L2 caches */
+ _mali_osk_list_add( &cache->list, &caches_head );
+
+ MALI_SUCCESS;
+
+err_cleanup:
+ /* This cleanup used when resources have been requested successfully */
+
+ if ( NULL != cache )
+ {
+ if (NULL != cache->mapped_registers)
+ {
+ _mali_osk_mem_unmapioregion( cache->base, cache->mapping_size, cache->mapped_registers);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to map Mali L2 cache registers at 0x%08lX\n", cache->base));
+ }
+
+ if( NULL != cache->lock )
+ {
+ _mali_osk_lock_term( cache->lock );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to allocate a lock for handling a L2 cache unit"));
+ }
+
+ _mali_osk_free( cache );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ( "Failed to allocate memory for handling a L2 cache unit"));
+ }
+
+ /* A call is to request region, so this must always be reversed */
+ _mali_osk_mem_unreqregion( resource->base, MALI400_L2_CACHE_REGISTERS_SIZE);
+#if USING_MALI_PMM
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+#endif
+ return err;
+
+err_cleanup_requestmem_failed:
+ MALI_DEBUG_PRINT(1, ("Failed to request Mali L2 cache '%s' register address space at (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI400_L2_CACHE_REGISTERS_SIZE - 1) );
+#if USING_MALI_PMM
+ malipmm_core_unregister( MALI_PMM_CORE_L2 );
+#endif
+ return err;
+
+}
+
+
+static void mali_l2_cache_register_write(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg, u32 val)
+{
+ _mali_osk_mem_iowrite32(unit->mapped_registers, (u32)reg * sizeof(u32), val);
+}
+
+
+static u32 mali_l2_cache_register_read(mali_kernel_l2_cache_core * unit, mali_l2_cache_register reg)
+{
+ return _mali_osk_mem_ioread32(unit->mapped_registers, (u32)reg * sizeof(u32));
+}
+
+void mali_kernel_l2_cache_do_enable(void)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and enable them*/
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+ }
+}
+
+
+static _mali_osk_errcode_t mali_l2_cache_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_l2_cache_do_enable();
+ MALI_DEBUG_PRINT(2, ( "Mali L2 cache system load complete\n"));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_send_command(mali_kernel_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ _mali_osk_lock_wait(cache->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++)
+ {
+ if (!(_mali_osk_mem_ioread32(cache->mapped_registers , (u32)MALI400_L2_CACHE_REGISTER_STATUS * sizeof(u32)) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
+ {
+ break;
+ }
+ }
+
+ if (i == loop_count)
+ {
+ _mali_osk_lock_signal(cache->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+
+ /* then issue the command */
+ mali_l2_cache_register_write(cache, reg, val);
+
+ _mali_osk_lock_signal(cache->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_SUCCESS;
+}
+
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all_cache(mali_kernel_l2_cache_core *cache)
+{
+ return mali_kernel_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+}
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all(void)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and invalidate them */
+
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ MALI_CHECK_NO_ERROR( mali_kernel_l2_cache_invalidate_all_cache(cache) );
+ }
+
+ MALI_SUCCESS;
+}
+
+
+static _mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page_cache(mali_kernel_l2_cache_core *cache, u32 page)
+{
+ return mali_kernel_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, page);
+}
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page(u32 page)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+
+ /* loop over all L2 cache units and invalidate them */
+
+ _MALI_OSK_LIST_FOREACHENTRY( cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ MALI_CHECK_NO_ERROR( mali_kernel_l2_cache_invalidate_page_cache(cache, page) );
+ }
+
+ MALI_SUCCESS;
+}
+
+
+void mali_kernel_l2_cache_set_perf_counters(u32 src0, u32 src1, int force_reset)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+ int reset0 = force_reset;
+ int reset1 = force_reset;
+ MALI_DEBUG_CODE(
+ int changed0 = 0;
+ int changed1 = 0;
+ )
+
+ /* loop over all L2 cache units and activate the counters on them */
+ _MALI_OSK_LIST_FOREACHENTRY(cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ u32 cur_src0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0);
+ u32 cur_src1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1);
+
+ if (src0 != cur_src0)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, src0);
+ MALI_DEBUG_CODE(changed0 = 1;)
+ reset0 = 1;
+ }
+
+ if (src1 != cur_src1)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, src1);
+ MALI_DEBUG_CODE(changed1 = 1;)
+ reset1 = 1;
+ }
+
+ if (reset0)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
+ }
+
+ if (reset1)
+ {
+ mali_l2_cache_register_write(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
+ }
+
+ MALI_DEBUG_PRINT(5, ("L2 cache counters set: SRC0=%u, CHANGED0=%d, RESET0=%d, SRC1=%u, CHANGED1=%d, RESET1=%d\n",
+ src0, changed0, reset0,
+ src1, changed1, reset1));
+ }
+}
+
+
+void mali_kernel_l2_cache_get_perf_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1)
+{
+ mali_kernel_l2_cache_core * cache, *temp_cache;
+ int first_time = 1;
+ *src0 = 0;
+ *src1 = 0;
+ *val0 = 0;
+ *val1 = 0;
+
+ /* loop over all L2 cache units and read the counters */
+ _MALI_OSK_LIST_FOREACHENTRY(cache, temp_cache, &caches_head, mali_kernel_l2_cache_core, list)
+ {
+ u32 cur_src0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0);
+ u32 cur_src1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1);
+ u32 cur_val0 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ u32 cur_val1 = mali_l2_cache_register_read(cache, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+
+ MALI_DEBUG_PRINT(5, ("L2 cache counters get: SRC0=%u, VAL0=%u, SRC1=%u, VAL1=%u\n", cur_src0, cur_val0, cur_src1, cur_val1));
+
+ /* Only update the counter source once, with the value from the first L2 cache unit. */
+ if (first_time)
+ {
+ *src0 = cur_src0;
+ *src1 = cur_src1;
+ first_time = 0;
+ }
+
+ /* Bail out if the L2 cache units have different counters set. */
+ if (*src0 == cur_src0 && *src1 == cur_src1)
+ {
+ *val0 += cur_val0;
+ *val1 += cur_val1;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Warning: Mali L2 caches has different performance counters set, not retrieving data\n"));
+ }
+ }
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_l2_cache.h b/drivers/media/video/samsung/mali/common/mali_kernel_l2_cache.h
new file mode 100644
index 0000000..8c12b50
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_l2_cache.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_subsystem.h"
+extern struct mali_kernel_subsystem mali_subsystem_l2_cache;
+
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_all(void);
+_mali_osk_errcode_t mali_kernel_l2_cache_invalidate_page(u32 page);
+
+void mali_kernel_l2_cache_do_enable(void);
+void mali_kernel_l2_cache_set_perf_counters(u32 src0, u32 src1, int force_reset);
+void mali_kernel_l2_cache_get_perf_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_mem.h b/drivers/media/video/samsung/mali/common/mali_kernel_mem.h
new file mode 100644
index 0000000..8caafe3
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_mem.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_H__
+#define __MALI_KERNEL_MEM_H__
+
+#include "mali_kernel_subsystem.h"
+extern struct mali_kernel_subsystem mali_subsystem_memory;
+
+#endif /* __MALI_KERNEL_MEM_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_mem_buddy.c b/drivers/media/video/samsung/mali/common/mali_kernel_mem_buddy.c
new file mode 100644
index 0000000..e378f03
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_mem_buddy.c
@@ -0,0 +1,1427 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_core.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_kernel_session_manager.h"
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#ifdef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#include "mali_osk_indir_mmap.h"
+#endif
+
+#error Support for non-MMU builds is no longer supported and is planned for removal.
+
+/**
+ * Minimum memory allocation size
+ */
+#define MIN_BLOCK_SIZE (1024*1024UL)
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 4096
+
+/**
+ * Enum uses to store multiple fields in one u32 to keep the memory block struct small
+ */
+enum MISC_SHIFT { MISC_SHIFT_FREE = 0, MISC_SHIFT_ORDER = 1, MISC_SHIFT_TOPLEVEL = 6 };
+enum MISC_MASK { MISC_MASK_FREE = 0x01, MISC_MASK_ORDER = 0x1F, MISC_MASK_TOPLEVEL = 0x1F };
+
+/* forward declaration of the block struct */
+struct mali_memory_block;
+
+/**
+ * Definition of memory bank type.
+ * Represents a memory bank (separate address space)
+ * Each bank keeps track of its block usage.
+ * A buddy system used to track the usage
+*/
+typedef struct mali_memory_bank
+{
+ _mali_osk_list_t list; /* links multiple banks together */
+ _mali_osk_lock_t *lock;
+ u32 base_addr; /* Mali seen address of bank */
+ u32 cpu_usage_adjust; /* Adjustment factor for what the CPU sees */
+ u32 size; /* the effective size */
+ u32 real_size; /* the real size of the bank, as given by to the subsystem */
+ int min_order;
+ int max_order;
+ struct mali_memory_block * blocklist;
+ _mali_osk_list_t *freelist;
+ _mali_osk_atomic_t num_active_allocations;
+ u32 used_for_flags;
+ u32 alloc_order; /**< Order in which the bank will be used for allocations */
+ const char *name; /**< Descriptive name of the bank */
+} mali_memory_bank;
+
+/**
+ * Definition of the memory block type
+ * Represents a memory block, which is the smallest memory unit operated on.
+ * A block keeps info about its mapping, if in use by a user process
+ */
+typedef struct mali_memory_block
+{
+ _mali_osk_list_t link; /* used for freelist and process usage list*/
+ mali_memory_bank * bank; /* the bank it belongs to */
+ void __user * mapping; /* possible user space mapping of this block */
+ u32 misc; /* used while a block is free to track the number blocks it represents */
+ int descriptor;
+ u32 mmap_cookie; /**< necessary for interaction with _mali_ukk_mem_mmap/munmap */
+} mali_memory_block;
+
+/**
+ * Defintion of the type used to represent memory used by a session.
+ * Containts the head of the list of memory currently in use by a session.
+ */
+typedef struct memory_session
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t memory_head; /* List of the memory blocks used by this session. */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+} memory_session;
+
+/*
+ Subsystem interface implementation
+*/
+/**
+ * Buddy block memory subsystem startup function
+ * Called by the driver core when the driver is loaded.
+ * Registers the memory systems ioctl handler, resource handlers and memory map function with the core.
+ *
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Buddy block memory subsystem shutdown function
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the memory subsystem
+ */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * Buddy block memory load complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * Reports on the memory resources registered
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id);
+
+
+/**
+ * Buddy block memory subsystem session begin notification
+ * Called by the core when a new session to the driver is started.
+ * Creates a memory session object and sets it as the subsystem slot data for this session
+ * @param slot Pointer to the slot to use for storing per-session data
+ * @param queue The user space event sink
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+/**
+ * Buddy block memory subsystem session end notification
+ * Called by the core when a session to the driver has ended.
+ * Cleans up per session data, which includes checking and fixing memory leaks
+ *
+ * @param slot Pointer to the slot to use for storing per-session data
+ */
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+/**
+ * Buddy block memory subsystem system info filler
+ * Called by the core when a system info update is needed
+ * We fill in info about all the memory types we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info);
+
+/* our registered resource handlers */
+/**
+ * Buddy block memory subsystem's notification handler for MEMORY resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each memory bank described in the active architecture's config.h file.
+ * Requests memory region ownership and calls backend.
+ * @param resource The resource to handle (type MEMORY)
+ * @return 0 if the memory was claimed and accepted, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_memory(_mali_osk_resource_t * resource);
+
+/**
+ * Buddy block memory subsystem's notification handler for MMU resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each mmu described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MMU)
+ * @return 0 if the MMU was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource);
+
+/**
+ * Buddy block memory subsystem's notification handler for FPGA_FRAMEWORK resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each fpga framework described in the active architecture's config.h file.
+ * @param resource The resource to handle (type FPGA_FRAMEWORK)
+ * @return 0 if the FPGA framework was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource);
+
+/* ioctl command implementations */
+/**
+ * Buddy block memory subsystem's handler for MALI_IOC_MEM_GET_BIG_BLOCK ioctl
+ * Called by the generic ioctl handler when the MALI_IOC_MEM_GET_BIG_BLOCK command is received.
+ * Finds an available memory block and maps into the current process' address space.
+ * @param ukk_private private word for use by the User/Kernel interface
+ * @param session_data Pointer to the per-session object which will track the memory usage
+ * @param argument The argument from the user. A pointer to an struct mali_dd_get_big_block in user space
+ * @return Zero if successful, a standard Linux error value value on error (a negative value)
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args );
+
+/**
+ * Buddy block memory subsystem's handler for MALI_IOC_MEM_FREE_BIG_BLOCK ioctl
+ * Called by the generic ioctl handler when the MALI_IOC_MEM_FREE_BIG_BLOCK command is received.
+ * Unmaps the memory from the process' address space and marks the block as free.
+ * @param session_data Pointer to the per-session object which tracks the memory usage
+ * @param argument The argument from the user. A pointer to an struct mali_dd_get_big_block in user space
+ * @return Zero if successful, a standard Linux error value value on error (a negative value)
+ */
+
+/* this static version allows us to make use of it while holding the memory_session lock.
+ * This is required for the session_end code */
+static _mali_osk_errcode_t _mali_ukk_free_big_block_internal( struct mali_session_data * mali_session_data, memory_session * session_data, _mali_uk_free_big_block_s *args);
+
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args );
+
+/**
+ * Buddy block memory subsystem's memory bank registration routine
+ * Called when a MEMORY resource has been found.
+ * The memory region has already been reserved for use by this driver.
+ * Create a bank object to represent this region and initialize its slots.
+ * @note Can only be called in an module atomic scope, i.e. during module init since no locking is performed
+ * @param phys_base Physical base address of this bank
+ * @param cpu_usage_adjust Adjustment factor for CPU seen address
+ * @param size Size of the bank in bytes
+ * @param flags Memory type bits
+ * @param alloc_order Order in which the bank will be used for allocations
+ * @param name descriptive name of the bank
+ * @return Zero on success, negative on error
+ */
+static int mali_memory_bank_register(u32 phys_base, u32 cpu_usage_adjust, u32 size, u32 flags, u32 alloc_order, const char *name);
+
+/**
+ * Get a block of mali memory of at least the given size and of the given type
+ * This is the backend for get_big_block.
+ * @param type_id The type id of memory requested.
+ * @param minimum_size The size requested
+ * @return Pointer to a block on success, NULL on failure
+ */
+static mali_memory_block * mali_memory_block_get(u32 type_id, u32 minimum_size);
+
+/**
+ * Get the mali seen address of the memory described by the block
+ * @param block The memory block to return the address of
+ * @return The mali seen address of the memory block
+ */
+MALI_STATIC_INLINE u32 block_mali_addr_get(mali_memory_block * block);
+
+/**
+ * Get the cpu seen address of the memory described by the block
+ * The cpu_usage_adjust will be used to change the mali seen phys address
+ * @param block The memory block to return the address of
+ * @return The mali seen address of the memory block
+ */
+MALI_STATIC_INLINE u32 block_cpu_addr_get(mali_memory_block * block);
+
+/**
+ * Get the size of the memory described by the given block
+ * @param block The memory block to return the size of
+ * @return The size of the memory block described by the object
+ */
+MALI_STATIC_INLINE u32 block_size_get(mali_memory_block * block);
+
+/**
+ * Get the user space accessible mapping the memory described by the given memory block
+ * Returns a pointer in user space to the memory, if one has been created.
+ * @param block The memory block to return the mapping of
+ * @return User space pointer to cpu accessible memory or NULL if not mapped
+ */
+MALI_STATIC_INLINE void __user * block_mapping_get(mali_memory_block * block);
+
+/**
+ * Set the user space accessible mapping the memory described by the given memory block.
+ * Sets the stored pointer to user space for the memory described by this block.
+ * @param block The memory block to set mapping info for
+ * @param ptr User space pointer to cpu accessible memory or NULL if not mapped
+ */
+MALI_STATIC_INLINE void block_mapping_set(mali_memory_block * block, void __user * ptr);
+
+/**
+ * Get the cookie for use with _mali_ukk_mem_munmap().
+ * @param block The memory block to get the cookie from
+ * @return the cookie. A return of 0 is still a valid cookie.
+ */
+MALI_STATIC_INLINE u32 block_mmap_cookie_get(mali_memory_block * block);
+
+/**
+ * Set the cookie returned via _mali_ukk_mem_mmap().
+ * @param block The memory block to set the cookie for
+ * @param cookie the cookie
+ */
+MALI_STATIC_INLINE void block_mmap_cookie_set(mali_memory_block * block, u32 cookie);
+
+
+/**
+ * Get a memory block's free status
+ * @param block The block to get the state of
+ */
+MALI_STATIC_INLINE u32 get_block_free(mali_memory_block * block);
+
+/**
+ * Set a memory block's free status
+ * @param block The block to set the state for
+ * @param state The state to set
+ */
+MALI_STATIC_INLINE void set_block_free(mali_memory_block * block, int state);
+
+/**
+ * Set a memory block's order
+ * @param block The block to set the order for
+ * @param order The order to set
+ */
+MALI_STATIC_INLINE void set_block_order(mali_memory_block * block, u32 order);
+
+/**
+ * Get a memory block's order
+ * @param block The block to get the order for
+ * @return The order this block exists on
+ */
+MALI_STATIC_INLINE u32 get_block_order(mali_memory_block * block);
+
+/**
+ * Tag a block as being a toplevel block.
+ * A toplevel block has no buddy and no parent
+ * @param block The block to tag as being toplevel
+ */
+MALI_STATIC_INLINE void set_block_toplevel(mali_memory_block * block, u32 level);
+
+/**
+ * Check if a block is a toplevel block
+ * @param block The block to check
+ * @return 1 if toplevel, 0 else
+ */
+MALI_STATIC_INLINE u32 get_block_toplevel(mali_memory_block * block);
+
+/**
+ * Checks if the given block is a buddy at the given order and that it's free
+ * @param block The block to check
+ * @param order The order to check against
+ * @return 0 if not valid, else 1
+ */
+MALI_STATIC_INLINE int block_is_valid_buddy(mali_memory_block * block, int order);
+
+/*
+ The buddy system uses the following rules to quickly find a blocks buddy
+ and parent (block representing this block at a higher order level):
+ - Given a block with index i the blocks buddy is at index i ^ ( 1 << order)
+ - Given a block with index i the blocks parent is at i & ~(1 << order)
+*/
+
+/**
+ * Get a blocks buddy
+ * @param block The block to find the buddy for
+ * @param order The order to operate on
+ * @return Pointer to the buddy block
+ */
+MALI_STATIC_INLINE mali_memory_block * block_get_buddy(mali_memory_block * block, u32 order);
+
+/**
+ * Get a blocks parent
+ * @param block The block to find the parent for
+ * @param order The order to operate on
+ * @return Pointer to the parent block
+ */
+MALI_STATIC_INLINE mali_memory_block * block_get_parent(mali_memory_block * block, u32 order);
+
+/**
+ * Release mali memory
+ * Backend for free_big_block.
+ * Will release the mali memory described by the given block struct.
+ * @param block Memory block to free
+ */
+static void block_release(mali_memory_block * block);
+
+/* end interface implementation */
+
+/**
+ * List of all the memory banks registerd with the subsystem.
+ * Access to this list is NOT synchronized since it's only
+ * written to during module init and termination.
+ */
+static _MALI_OSK_LIST_HEAD(memory_banks_list);
+
+/*
+ The buddy memory system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_memory =
+{
+ mali_memory_core_initialize, /* startup */
+ NULL, /*mali_memory_core_terminate,*/ /* shutdown */
+ mali_memory_core_load_complete, /* load_complete */
+ mali_memory_core_system_info_fill, /* system_info_fill */
+ mali_memory_core_session_begin, /* session_begin */
+ mali_memory_core_session_end, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_memory_id = -1;
+
+/* called during module init */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&memory_banks_list);
+
+ mali_subsystem_memory_id = id;
+
+ /* register our handlers */
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MEMORY, mali_memory_core_resource_memory));
+
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(MMU, mali_memory_core_resource_mmu));
+
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(FPGA_FRAMEWORK, mali_memory_core_resource_fpga));
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_memory_bank * bank, *temp;
+
+ /* loop over all memory banks to free them */
+ /* we use the safe version since we delete the current bank in the body */
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ MALI_DEBUG_CODE(int usage_count = _mali_osk_atomic_read(&bank->num_active_allocations));
+ /*
+ Report leaked memory
+ If this happens we have a bug in our session cleanup code.
+ */
+ MALI_DEBUG_PRINT_IF(1, 0 != usage_count, ("%d allocation(s) from memory bank at 0x%X still in use\n", usage_count, bank->base_addr));
+
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+
+ _mali_osk_lock_term(bank->lock);
+
+ /* unlink from bank list */
+ _mali_osk_list_del(&bank->list);
+
+ /* release kernel resources used by the bank */
+ _mali_osk_mem_unreqregion(bank->base_addr, bank->real_size);
+
+ /* remove all resources used to represent this bank*/
+ _mali_osk_free(bank->freelist);
+ _mali_osk_free(bank->blocklist);
+
+ /* destroy the bank object itself */
+ _mali_osk_free(bank);
+ }
+
+ /* No need to de-initialize mali_subsystem_memory_id - it could only be
+ * re-initialized to the same value */
+}
+
+/* load_complete handler */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_memory_bank * bank, *temp;
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest number first) :\n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ if ( NULL != bank->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", bank->alloc_order, bank->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", bank->alloc_order ) );
+ }
+ }
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE u32 order_needed_for_size(u32 size, struct mali_memory_bank * bank)
+{
+ u32 order = 0;
+
+ if (0 < size)
+ {
+ for ( order = sizeof(u32)*8 - 1; ((1UL<<order) & size) == 0; --order)
+ /* nothing */;
+
+ /* check if size is pow2, if not we need increment order by one */
+ if (0 != (size & ((1UL<<order)-1))) ++order;
+ }
+
+ if ((NULL != bank) && (order < bank->min_order)) order = bank->min_order;
+ /* Not capped to max order, that doesn't make sense */
+
+ return order;
+}
+
+MALI_STATIC_INLINE u32 maximum_order_which_fits(u32 size)
+{
+ u32 order = 0;
+ u32 powsize = 1;
+ while (powsize < size)
+ {
+ powsize <<= 1;
+ if (powsize > size) break;
+ order++;
+ }
+
+ return order;
+}
+
+/* called for new MEMORY resources */
+static _mali_osk_errcode_t mali_memory_bank_register(u32 phys_base, u32 cpu_usage_adjust, u32 size, u32 flags, u32 alloc_order, const char *name)
+{
+ /* no locking performed due to function contract */
+ int i;
+ u32 left, offset;
+ mali_memory_bank * bank;
+ mali_memory_bank * bank_enum, *temp;
+
+ _mali_osk_errcode_t err;
+
+ /* Only a multiple of MIN_BLOCK_SIZE is usable */
+ u32 usable_size = size & ~(MIN_BLOCK_SIZE - 1);
+
+ /* handle zero sized banks and bank smaller than the fixed block size */
+ if (0 == usable_size)
+ {
+ MALI_PRINT(("Usable size == 0\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ /* warn for banks not a muliple of the block size */
+ MALI_DEBUG_PRINT_IF(1, usable_size != size, ("Memory bank @ 0x%X not a multiple of minimum block size. %d bytes wasted\n", phys_base, size - usable_size));
+
+ /* check against previous registrations */
+ MALI_DEBUG_CODE(
+ {
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ /* duplicate ? */
+ if (bank->base_addr == phys_base)
+ {
+ MALI_PRINT(("Duplicate registration of a memory bank at 0x%X detected\n", phys_base));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ /* overlapping ? */
+ else if (
+ ( (phys_base > bank->base_addr) && (phys_base < (bank->base_addr + bank->real_size)) ) ||
+ ( (phys_base + size) > bank->base_addr && ((phys_base + size) < (bank->base_addr + bank->real_size)) )
+ )
+ {
+ MALI_PRINT(("Overlapping memory blocks found. Memory at 0x%X overlaps with memory at 0x%X size 0x%X\n", bank->base_addr, phys_base, size));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ }
+ }
+ );
+
+ /* create an object to represent this memory bank */
+ MALI_CHECK_NON_NULL(bank = (mali_memory_bank*)_mali_osk_malloc(sizeof(mali_memory_bank)), _MALI_OSK_ERR_NOMEM);
+
+ /* init the fields */
+ _MALI_OSK_INIT_LIST_HEAD(&bank->list);
+ bank->base_addr = phys_base;
+ bank->cpu_usage_adjust = cpu_usage_adjust;
+ bank->size = usable_size;
+ bank->real_size = size;
+ bank->alloc_order = alloc_order;
+ bank->name = name;
+
+ err = _mali_osk_atomic_init(&bank->num_active_allocations, 0);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ _mali_osk_free(bank);
+ MALI_ERROR(err);
+ }
+
+ bank->used_for_flags = flags;
+ bank->min_order = order_needed_for_size(MIN_BLOCK_SIZE, NULL);
+ bank->max_order = maximum_order_which_fits(usable_size);
+ bank->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0);
+ if (NULL == bank->lock)
+ {
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ bank->blocklist = _mali_osk_calloc(1, sizeof(struct mali_memory_block) * (usable_size / MIN_BLOCK_SIZE));
+ if (NULL == bank->blocklist)
+ {
+ _mali_osk_lock_term(bank->lock);
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ for (i = 0; i < (usable_size / MIN_BLOCK_SIZE); i++)
+ {
+ bank->blocklist[i].bank = bank;
+ }
+
+ bank->freelist = _mali_osk_calloc(1, sizeof(_mali_osk_list_t) * (bank->max_order - bank->min_order + 1));
+ if (NULL == bank->freelist)
+ {
+ _mali_osk_lock_term(bank->lock);
+ _mali_osk_free(bank->blocklist);
+ _mali_osk_atomic_term(&bank->num_active_allocations);
+ _mali_osk_free(bank);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ for (i = 0; i < (bank->max_order - bank->min_order + 1); i++) _MALI_OSK_INIT_LIST_HEAD(&bank->freelist[i]);
+
+ /* init slot info */
+ for (offset = 0, left = usable_size; offset < (usable_size / MIN_BLOCK_SIZE); /* updated inside the body */)
+ {
+ u32 block_order;
+ mali_memory_block * block;
+
+ /* the maximum order which fits in the remaining area */
+ block_order = maximum_order_which_fits(left);
+
+ /* find the block pointer */
+ block = &bank->blocklist[offset];
+
+ /* tag the block as being toplevel */
+ set_block_toplevel(block, block_order);
+
+ /* tag it as being free */
+ set_block_free(block, 1);
+
+ /* set the order */
+ set_block_order(block, block_order);
+
+ _mali_osk_list_addtail(&block->link, bank->freelist + (block_order - bank->min_order));
+
+ left -= (1 << block_order);
+ offset += ((1 << block_order) / MIN_BLOCK_SIZE);
+ }
+
+ /* add bank to list of banks on the system */
+ _MALI_OSK_LIST_FOREACHENTRY( bank_enum, temp, &memory_banks_list, mali_memory_bank, list )
+ {
+ if ( bank_enum->alloc_order >= alloc_order )
+ {
+ /* Found insertion point - our item must go before this one */
+ break;
+ }
+ }
+ _mali_osk_list_addtail(&bank->list, &bank_enum->list);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_memory_mmu_register(u32 type, u32 phys_base)
+{
+ /* not supported */
+ return _MALI_OSK_ERR_INVALID_FUNC;
+}
+
+void mali_memory_mmu_unregister(u32 phys_base)
+{
+ /* not supported */
+ return;
+}
+
+static mali_memory_block * mali_memory_block_get(u32 type_id, u32 minimum_size)
+{
+ mali_memory_bank * bank;
+ mali_memory_block * block = NULL;
+ u32 requested_order, current_order;
+
+ /* input validation */
+ if (0 == minimum_size)
+ {
+ /* bad size */
+ MALI_DEBUG_PRINT(2, ("Zero size block requested by mali_memory_block_get\n"));
+ return NULL;
+ }
+
+ bank = (mali_memory_bank*)type_id;
+
+ requested_order = order_needed_for_size(minimum_size, bank);
+
+ MALI_DEBUG_PRINT(4, ("For size %d we need order %d (%d)\n", minimum_size, requested_order, 1 << requested_order));
+
+ _mali_osk_lock_wait(bank->lock, _MALI_OSK_LOCKMODE_RW);
+ /* ! critical section begin */
+
+ MALI_DEBUG_PRINT(7, ("Bank 0x%x locked\n", bank));
+
+ for (current_order = requested_order; current_order <= bank->max_order; ++current_order)
+ {
+ _mali_osk_list_t * list = bank->freelist + (current_order - bank->min_order);
+ MALI_DEBUG_PRINT(7, ("Checking freelist 0x%x for order %d\n", list, current_order));
+ if (0 != _mali_osk_list_empty(list)) continue; /* empty list */
+
+ MALI_DEBUG_PRINT(7, ("Found an entry on the freelist for order %d\n", current_order));
+
+
+ block = _MALI_OSK_LIST_ENTRY(list->next, mali_memory_block, link);
+ _mali_osk_list_delinit(&block->link);
+
+ while (current_order > requested_order)
+ {
+ mali_memory_block * buddy_block;
+ MALI_DEBUG_PRINT(7, ("Splitting block 0x%x\n", block));
+ current_order--;
+ list--;
+ buddy_block = block_get_buddy(block, current_order - bank->min_order);
+ set_block_order(buddy_block, current_order);
+ set_block_free(buddy_block, 1);
+ _mali_osk_list_add(&buddy_block->link, list);
+ }
+
+ set_block_order(block, current_order);
+ set_block_free(block, 0);
+
+ /* update usage count */
+ _mali_osk_atomic_inc(&bank->num_active_allocations);
+
+ break;
+ }
+
+ /* ! critical section end */
+ _mali_osk_lock_signal(bank->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(7, ("Lock released for bank 0x%x\n", bank));
+
+ MALI_DEBUG_PRINT_IF(7, NULL != block, ("Block 0x%x allocated\n", block));
+
+ return block;
+}
+
+
+static void block_release(mali_memory_block * block)
+{
+ mali_memory_bank * bank;
+ u32 current_order;
+
+ if (NULL == block) return;
+
+ bank = block->bank;
+
+ /* we're manipulating the free list, so we need to lock it */
+ _mali_osk_lock_wait(bank->lock, _MALI_OSK_LOCKMODE_RW);
+ /* ! critical section begin */
+
+ set_block_free(block, 1);
+ current_order = get_block_order(block);
+
+ while (current_order <= bank->max_order)
+ {
+ mali_memory_block * buddy_block;
+ buddy_block = block_get_buddy(block, current_order - bank->min_order);
+ if (!block_is_valid_buddy(buddy_block, current_order)) break;
+ _mali_osk_list_delinit(&buddy_block->link); /* remove from free list */
+ /* clear tracked data in both blocks */
+ set_block_order(block, 0);
+ set_block_free(block, 0);
+ set_block_order(buddy_block, 0);
+ set_block_free(buddy_block, 0);
+ /* make the parent control the new state */
+ block = block_get_parent(block, current_order - bank->min_order);
+ set_block_order(block, current_order + 1); /* merged has a higher order */
+ set_block_free(block, 1); /* mark it as free */
+ current_order++;
+ if (get_block_toplevel(block) == current_order) break; /* stop the merge if we've arrived at a toplevel block */
+ }
+
+ _mali_osk_list_add(&block->link, &bank->freelist[current_order - bank->min_order]);
+
+ /* update bank usage statistics */
+ _mali_osk_atomic_dec(&block->bank->num_active_allocations);
+
+ /* !critical section end */
+ _mali_osk_lock_signal(bank->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return;
+}
+
+MALI_STATIC_INLINE u32 block_get_offset(mali_memory_block * block)
+{
+ return block - block->bank->blocklist;
+}
+
+MALI_STATIC_INLINE u32 block_mali_addr_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->bank->base_addr + MIN_BLOCK_SIZE * block_get_offset(block);
+ else return 0;
+}
+
+MALI_STATIC_INLINE u32 block_cpu_addr_get(mali_memory_block * block)
+{
+ if (NULL != block) return (block->bank->base_addr + MIN_BLOCK_SIZE * block_get_offset(block)) + block->bank->cpu_usage_adjust;
+ else return 0;
+}
+
+MALI_STATIC_INLINE u32 block_size_get(mali_memory_block * block)
+{
+ if (NULL != block) return 1 << get_block_order(block);
+ else return 0;
+}
+
+MALI_STATIC_INLINE void __user * block_mapping_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->mapping;
+ else return NULL;
+}
+
+MALI_STATIC_INLINE void block_mapping_set(mali_memory_block * block, void __user * ptr)
+{
+ if (NULL != block) block->mapping = ptr;
+}
+
+MALI_STATIC_INLINE u32 block_mmap_cookie_get(mali_memory_block * block)
+{
+ if (NULL != block) return block->mmap_cookie;
+ else return 0;
+}
+
+/**
+ * Set the cookie returned via _mali_ukk_mem_mmap().
+ * @param block The memory block to set the cookie for
+ * @param cookie the cookie
+ */
+MALI_STATIC_INLINE void block_mmap_cookie_set(mali_memory_block * block, u32 cookie)
+{
+ if (NULL != block) block->mmap_cookie = cookie;
+}
+
+
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (NULL != *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("The slot given to memory session begin already contains data"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ /* create the session data object */
+ MALI_CHECK_NON_NULL(session_data = _mali_osk_malloc(sizeof(memory_session)), _MALI_OSK_ERR_NOMEM);
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->memory_head); /* no memory in use */
+ session_data->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, 0);
+ if (NULL == session_data->lock)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ *slot = session_data; /* slot will point to our data object */
+
+ MALI_SUCCESS;
+}
+
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ return;
+ }
+
+ if (NULL == *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL memory_session found in current session object"));
+ return;
+ }
+
+ _mali_osk_lock_wait(((memory_session*)*slot)->lock, _MALI_OSK_LOCKMODE_RW);
+ session_data = (memory_session *)*slot;
+ /* clear our slot */
+ *slot = NULL;
+
+ /*
+ First free all memory still being used.
+ This can happen if the caller has leaked memory or
+ the application has crashed forcing an auto-session end.
+ */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_block * block, * temp;
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ /* use the _safe version since fre_big_block removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(block, temp, &session_data->memory_head, mali_memory_block, link)
+ {
+ _mali_osk_errcode_t err;
+ _mali_uk_free_big_block_s uk_args;
+
+ MALI_DEBUG_PRINT(4, ("Freeing block 0x%x with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ block,
+ (void*)block_mali_addr_get(block),
+ block_size_get(block),
+ block_mapping_get(block))
+ );
+
+ /* free the block */
+ /** @note manual type safety check-point */
+ uk_args.ctx = mali_session_data;
+ uk_args.cookie = (u32)block->descriptor;
+ err = _mali_ukk_free_big_block_internal( mali_session_data, session_data, &uk_args );
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT_ERROR(("_mali_ukk_free_big_block_internal() failed during session termination on block with cookie==0x%X\n",
+ uk_args.cookie)
+ );
+ }
+ }
+ }
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_term(session_data->lock);
+
+ /* free the session data object */
+ _mali_osk_free(session_data);
+
+ return;
+}
+
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info)
+{
+ mali_memory_bank * bank, *temp;
+ _mali_mem_info **mem_info_tail;
+
+ /* check input */
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* make sure we won't leak any memory. It could also be that it's an uninitialized variable, but that would be a bug in the caller */
+ MALI_DEBUG_ASSERT(NULL == info->mem_info);
+
+ mem_info_tail = &info->mem_info;
+
+ _MALI_OSK_LIST_FOREACHENTRY(bank, temp, &memory_banks_list, mali_memory_bank, list)
+ {
+ _mali_mem_info * mem_info;
+
+ mem_info = (_mali_mem_info *)_mali_osk_calloc(1, sizeof(_mali_mem_info));
+ if (NULL == mem_info) return _MALI_OSK_ERR_NOMEM; /* memory already allocated will be freed by the caller */
+
+ /* set info */
+ mem_info->size = bank->size;
+ mem_info->flags = (_mali_bus_usage)bank->used_for_flags;
+ mem_info->maximum_order_supported = bank->max_order;
+ mem_info->identifier = (u32)bank;
+
+ /* add to system info linked list */
+ (*mem_info_tail) = mem_info;
+ mem_info_tail = &mem_info->next;
+ }
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_memory(_mali_osk_resource_t * resource)
+{
+ _mali_osk_errcode_t err;
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* call backend */
+ err = mali_memory_bank_register(resource->base, resource->cpu_usage_adjust, resource->size, resource->flags, resource->alloc_order, resource->description);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ /* if backend refused the memory we have to release the region again */
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(err);
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource)
+{
+ /* Not supported by the fixed block memory system */
+ MALI_DEBUG_PRINT(1, ("MMU resource not supported by non-MMU driver!\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_FUNC);
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource)
+{
+ mali_io_address mapping;
+
+ MALI_DEBUG_PRINT(5, ("FPGA framework '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + sizeof(u32) * 2 - 1
+ ));
+
+ mapping = _mali_osk_mem_mapioregion(resource->base + 0x1000, sizeof(u32) * 2, "fpga framework");
+ if (mapping)
+ {
+ u32 data;
+ data = _mali_osk_mem_ioread32(mapping, 0);
+ MALI_DEBUG_PRINT(2, ("FPGA framwork '%s' @ 0x%08X:\n", resource->description, resource->base));
+ MALI_DEBUG_PRINT(2, ("\tBitfile date: %d%02d%02d_%02d%02d\n",
+ (data >> 20),
+ (data >> 16) & 0xF,
+ (data >> 11) & 0x1F,
+ (data >> 6) & 0x1F,
+ (data >> 0) & 0x3F));
+ data = _mali_osk_mem_ioread32(mapping, sizeof(u32));
+ MALI_DEBUG_PRINT(2, ("\tBitfile SCCS rev: %d\n", data));
+
+ _mali_osk_mem_unmapioregion(resource->base + 0x1000, sizeof(u32) *2, mapping);
+ }
+ else MALI_DEBUG_PRINT(1, ("Failed to access FPGA framwork '%s' @ 0x%08X\n", resource->description, resource->base));
+
+ MALI_SUCCESS;
+}
+
+/* static _mali_osk_errcode_t get_big_block(void * ukk_private, struct mali_session_data * mali_session_data, void __user * argument) */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args )
+{
+ _mali_uk_mem_mmap_s args_mmap = {0, };
+ int md;
+ mali_memory_block * block;
+ _mali_osk_errcode_t err;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ MALI_DEBUG_ASSERT_POINTER( args->ctx );
+
+ /** @note manual type safety check-point */
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (!args->type_id)
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* at least min block size */
+ if (MIN_BLOCK_SIZE > args->minimum_size_requested) args->minimum_size_requested = MIN_BLOCK_SIZE;
+
+ /* perform the actual allocation */
+ block = mali_memory_block_get(args->type_id, args->minimum_size_requested);
+ if ( NULL == block )
+ {
+ /* no memory available with requested type_id */
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, block, &md))
+ {
+ block_release(block);
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+ block->descriptor = md;
+
+
+ /* fill in response */
+ args->mali_address = block_mali_addr_get(block);
+ args->block_size = block_size_get(block);
+ args->cookie = (u32)md;
+ args->flags = block->bank->used_for_flags;
+
+ /* map the block into the process' address space */
+
+ /** @note manual type safety check-point */
+ args_mmap.ukk_private = (void *)args->ukk_private;
+ args_mmap.ctx = args->ctx;
+ args_mmap.size = args->block_size;
+ args_mmap.phys_addr = block_cpu_addr_get(block);
+
+#ifndef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+ err = _mali_ukk_mem_mmap( &args_mmap );
+#else
+ err = _mali_osk_specific_indirect_mmap( &args_mmap );
+#endif
+
+ /* check if the mapping failed */
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(1, ("Memory mapping failed 0x%x\n", args->cpuptr));
+ /* mapping failed */
+
+ /* remove descriptor entry */
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+
+ /* free the mali memory */
+ block_release(block);
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+
+ args->cpuptr = args_mmap.mapping;
+ block_mmap_cookie_set(block, args_mmap.cookie);
+ block_mapping_set(block, args->cpuptr);
+
+ MALI_DEBUG_PRINT(2, ("Mali memory 0x%x (size %d) mapped in process memory space at 0x%x\n", (void*)args->mali_address, args->block_size, args->cpuptr));
+
+ /* track memory in use for the session */
+ _mali_osk_list_addtail(&block->link, &session_data->memory_head);
+
+ /* memory assigned to the session, memory mapped into the process' view */
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_SUCCESS;
+}
+
+/* Internal code that assumes the memory session lock is held */
+static _mali_osk_errcode_t _mali_ukk_free_big_block_internal( struct mali_session_data * mali_session_data, memory_session * session_data, _mali_uk_free_big_block_s *args)
+{
+ mali_memory_block * block = NULL;
+ _mali_osk_errcode_t err;
+ _mali_uk_mem_munmap_s args_munmap = {0,};
+
+ MALI_DEBUG_ASSERT_POINTER( mali_session_data );
+ MALI_DEBUG_ASSERT_POINTER( session_data );
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ err = mali_descriptor_mapping_get(session_data->descriptor_mapping, (int)args->cookie, (void**)&block);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release memory pages\n", (int)args->cookie));
+ MALI_ERROR(err);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ MALI_DEBUG_PRINT(4, ("Asked to free block 0x%x with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ block,
+ (void*)block_mali_addr_get(block),
+ block_size_get(block),
+ block_mapping_get(block))
+ );
+
+ /** @note manual type safety check-point */
+ args_munmap.ctx = (void*)mali_session_data;
+ args_munmap.mapping = block_mapping_get( block );
+ args_munmap.size = block_size_get( block );
+ args_munmap.cookie = block_mmap_cookie_get( block );
+
+#ifndef _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+ _mali_ukk_mem_munmap( &args_munmap );
+#else
+ _mali_osk_specific_indirect_munmap( &args_munmap );
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Session data 0x%x, lock 0x%x\n", session_data, &session_data->lock));
+
+ /* unlink from session usage list */
+ MALI_DEBUG_PRINT(5, ("unlink from session usage list\n"));
+ _mali_osk_list_delinit(&block->link);
+
+ /* remove descriptor entry */
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, (int)args->cookie);
+
+ /* free the mali memory */
+ block_release(block);
+ MALI_DEBUG_PRINT(5, ("Block freed\n"));
+
+ MALI_SUCCESS;
+}
+
+/* static _mali_osk_errcode_t free_big_block( struct mali_session_data * mali_session_data, void __user * argument) */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args )
+{
+ _mali_osk_errcode_t err;
+ struct mali_session_data * mali_session_data;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER( args );
+
+ MALI_DEBUG_ASSERT_POINTER( args->ctx );
+
+ /** @note manual type safety check-point */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ /* Must always verify this, since these are provided by the user */
+ MALI_CHECK_NON_NULL(mali_session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /** @note this has been separated out so that the session_end handler can call this while it has the memory_session lock held */
+ err = _mali_ukk_free_big_block_internal( mali_session_data, session_data, args );
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return err;
+}
+
+MALI_STATIC_INLINE u32 get_block_free(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_FREE) & MISC_MASK_FREE;
+}
+
+MALI_STATIC_INLINE void set_block_free(mali_memory_block * block, int state)
+{
+ if (state) block->misc |= (MISC_MASK_FREE << MISC_SHIFT_FREE);
+ else block->misc &= ~(MISC_MASK_FREE << MISC_SHIFT_FREE);
+}
+
+MALI_STATIC_INLINE void set_block_order(mali_memory_block * block, u32 order)
+{
+ block->misc &= ~(MISC_MASK_ORDER << MISC_SHIFT_ORDER);
+ block->misc |= ((order & MISC_MASK_ORDER) << MISC_SHIFT_ORDER);
+}
+
+MALI_STATIC_INLINE u32 get_block_order(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_ORDER) & MISC_MASK_ORDER;
+}
+
+MALI_STATIC_INLINE void set_block_toplevel(mali_memory_block * block, u32 level)
+{
+ block->misc |= ((level & MISC_MASK_TOPLEVEL) << MISC_SHIFT_TOPLEVEL);
+}
+
+MALI_STATIC_INLINE u32 get_block_toplevel(mali_memory_block * block)
+{
+ return (block->misc >> MISC_SHIFT_TOPLEVEL) & MISC_MASK_TOPLEVEL;
+}
+
+MALI_STATIC_INLINE int block_is_valid_buddy(mali_memory_block * block, int order)
+{
+ if (get_block_free(block) && (get_block_order(block) == order)) return 1;
+ else return 0;
+}
+
+MALI_STATIC_INLINE mali_memory_block * block_get_buddy(mali_memory_block * block, u32 order)
+{
+ return block + ( (block_get_offset(block) ^ (1 << order)) - block_get_offset(block));
+}
+
+MALI_STATIC_INLINE mali_memory_block * block_get_parent(mali_memory_block * block, u32 order)
+{
+ return block + ((block_get_offset(block) & ~(1 << order)) - block_get_offset(block));
+}
+
+/* This handler registered to mali_mmap for non-MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ _mali_osk_errcode_t ret;
+ struct mali_session_data * mali_session_data;
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ if (NULL == mali_session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: mali_session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ MALI_DEBUG_ASSERT( mali_subsystem_memory_id >= 0 );
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+
+ ret = _mali_osk_mem_mapregion_init( descriptor );
+ if ( _MALI_OSK_ERR_OK != ret )
+ {
+ MALI_DEBUG_PRINT(3, ("_mali_osk_mem_mapregion_init() failed\n"));
+ _mali_osk_free(descriptor);
+ MALI_ERROR(ret);
+ }
+
+ ret = _mali_osk_mem_mapregion_map( descriptor, 0, &descriptor->mali_address, descriptor->size );
+ if ( _MALI_OSK_ERR_OK != ret )
+ {
+ MALI_DEBUG_PRINT(3, ("_mali_osk_mem_mapregion_map() failed\n"));
+ _mali_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ MALI_ERROR(ret);
+ }
+
+ args->mapping = descriptor->mapping;
+
+ /**
+ * @note we do not require use of mali_descriptor_mapping here:
+ * the cookie gets stored in the mali_memory_block struct, which itself is
+ * protected by mali_descriptor_mapping, and so this cookie never leaves
+ * kernel space (on any OS).
+ *
+ * In the MMU case, we must use a mali_descriptor_mapping, since on _some_
+ * OSs, the cookie leaves kernel space.
+ */
+ args->cookie = (u32)descriptor;
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_munmap for non-MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+
+ /** see note in _mali_ukk_mem_mmap() - no need to use descriptor mapping */
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* args->mapping and args->size are also discarded. They are only necessary for certain do_munmap implementations. However, they could be used to check the descriptor at this point. */
+ _mali_osk_mem_mapregion_unmap( descriptor, 0, descriptor->size, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_mem_mapregion_term( descriptor );
+
+ _mali_osk_free(descriptor);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_mem_mmu.c b/drivers/media/video/samsung/mali/common/mali_kernel_mem_mmu.c
new file mode 100644
index 0000000..c993ad5
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_mem_mmu.c
@@ -0,0 +1,3157 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_mem.h"
+#include "mali_kernel_ioctl.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_kernel_mem_mmu.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_kernel_mem_os.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_rendercore.h"
+
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+#include "ump_kernel_interface.h"
+#endif
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+/**
+ * Calculate memory address from PDE and PTE
+ */
+#define MALI_MMU_ADDRESS(pde, pte) (((pde)<<22) | ((pte)<<12))
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+/**
+ * Used to disallow more than one core to run a MMU at the same time
+ *
+ * @note This value is hardwired into some systems' configuration files,
+ * which \em might not be a header file (e.g. some external data configuration
+ * file). Therefore, if this value is modified, its occurance must be
+ * \b manually checked for in the entire driver source tree.
+ */
+#define MALI_MMU_DISALLOW_PARALLELL_WORK_OF_MALI_CORES 1
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags
+{
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_MASK = 0x07
+} mali_mmu_entry_flags;
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0001, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0002, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x0003, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x004, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0005, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0006, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x0007, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0008 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt
+{
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command
+{
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_SOFT_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+typedef enum mali_mmu_status_bits
+{
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+} mali_mmu_status_bits;
+
+/**
+ * Defintion of the type used to represent memory used by a session.
+ * Containts the pointer to the huge user space virtual memory area
+ * used to access the Mali memory.
+ */
+typedef struct memory_session
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting the vm manipulation */
+
+ u32 mali_base_address; /**< Mali virtual memory area used by this session */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+
+ _mali_osk_list_t active_mmus; /**< The MMUs in this session, in increasing order of ID (so we can lock them in the correct order when necessary) */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+} memory_session;
+
+typedef struct mali_kernel_memory_mmu_idle_callback
+{
+ _mali_osk_list_t link;
+ void (*callback)(void*);
+ void * callback_argument;
+} mali_kernel_memory_mmu_idle_callback;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+typedef struct mali_kernel_memory_mmu
+{
+ int id; /**< ID of the MMU, no duplicate IDs may exist on the system */
+ const char * description; /**< Description text received from the resource manager to help identify the resource for people */
+ int irq_nr; /**< IRQ number */
+ u32 base; /**< Physical address of the registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ u32 mapping_size; /**< Size of registers in bytes */
+ _mali_osk_list_t list; /**< Used to link multiple MMU's into a list */
+ _mali_osk_irq_t *irq;
+ u32 flags; /**< Used to store if there is something special with this mmu. */
+
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the usage fields */
+ /* usage fields */
+ memory_session * active_session; /**< Active session, NULL if no session is active */
+ u32 usage_count; /**< Number of nested activations of the active session */
+ _mali_osk_list_t callbacks; /**< Callback registered for MMU idle notification */
+ void *core;
+
+ int in_page_fault_handler;
+
+ _mali_osk_list_t session_link;
+} mali_kernel_memory_mmu;
+
+typedef struct dedicated_memory_info
+{
+ u32 base;
+ u32 size;
+ struct dedicated_memory_info * next;
+} dedicated_memory_info;
+
+/* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+typedef struct ump_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size_allocated;
+ ump_dd_handle ump_mem;
+} ump_mem_allocation ;
+#endif
+
+typedef struct external_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size;
+} external_mem_allocation;
+
+/*
+ Subsystem interface implementation
+*/
+/**
+ * Fixed block memory subsystem startup function.
+ * Called by the driver core when the driver is loaded.
+ * Registers the memory systems ioctl handler, resource handlers and memory map function with the core.
+ *
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id);
+
+/**
+ * Fixed block memory subsystem shutdown function.
+ * Called by the driver core when the driver is unloaded.
+ * Cleans up
+ * @param id Identifier assigned by the core to the memory subsystem
+ */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id);
+
+/**
+ * MMU Memory load complete notification function.
+ * Called by the driver core when all drivers have loaded and all resources has been registered
+ * Builds the memory overall memory list
+ * @param id Identifier assigned by the core to the memory subsystem
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id);
+
+/**
+ * Fixed block memory subsystem session begin notification
+ * Called by the core when a new session to the driver is started.
+ * Creates a memory session object and sets it as the subsystem slot data for this session
+ * @param slot Pointer to the slot to use for storing per-session data
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+
+/**
+ * Fixed block memory subsystem session end notification
+ * Called by the core when a session to the driver has ended.
+ * Cleans up per session data, which includes checking and fixing memory leaks
+ *
+ * @param slot Pointer to the slot to use for storing per-session data
+ */
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+/**
+ * Fixed block memory subsystem system info filler
+ * Called by the core when a system info update is needed
+ * We fill in info about all the memory types we have
+ * @param info Pointer to system info struct to update
+ * @return 0 on success, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info);
+
+/* our registered resource handlers */
+
+/**
+ * Fixed block memory subsystem's notification handler for MMU resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each mmu described in the active architecture's config.h file.
+ * @param resource The resource to handle (type MMU)
+ * @return 0 if the MMU was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource);
+
+/**
+ * Fixed block memory subsystem's notification handler for FPGA_FRAMEWORK resource instances.
+ * Registered with the core during startup.
+ * Called by the core for each fpga framework described in the active architecture's config.h file.
+ * @param resource The resource to handle (type FPGA_FRAMEWORK)
+ * @return 0 if the FPGA framework was found and initialized, negative on error
+ */
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource);
+
+
+static _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource);
+static _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource);
+
+/**
+ * @brief Internal function for unmapping memory
+ *
+ * Worker function for unmapping memory from a user-process. We assume that the
+ * session/descriptor's lock was obtained before entry. For example, the
+ * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this
+ * function to do the actual unmapping. mali_memory_core_session_end() could
+ * also call this directly (depending on compilation options), having locked
+ * the descriptor.
+ *
+ * This function will fail if it is unable to put the MMU in stall mode (which
+ * might be the case if a page fault is also being processed).
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args );
+
+/**
+ * The MMU interrupt handler
+ * Upper half of the MMU interrupt processing.
+ * Called by the kernel when the MMU has triggered an interrupt.
+ * The interrupt function supports IRQ sharing. So it'll probe the MMU in question
+ * @param irq The irq number (not used)
+ * @param dev_id Points to the MMU object being handled
+ * @param regs Registers of interrupted process (not used)
+ * @return Standard Linux interrupt result.
+ * Subset used by the driver is IRQ_HANDLED processed
+ * IRQ_NONE Not processed
+ */
+static _mali_osk_errcode_t mali_kernel_memory_mmu_interrupt_handler_upper_half(void * data);
+
+/**
+ * The MMU reset hander
+ * Bottom half of the MMU interrupt processing for page faults and bus errors
+ * @param work The item to operate on, NULL in our case
+ */
+static void mali_kernel_memory_mmu_interrupt_handler_bottom_half ( void *data );
+
+/**
+ * Read MMU register value
+ * Reads the contents of the specified register.
+ * @param unit The MMU to read from
+ * @param reg The register to read
+ * @return The contents of the register
+ */
+static u32 mali_mmu_register_read(mali_kernel_memory_mmu * unit, mali_mmu_register reg);
+
+/**
+ * Write to a MMU register
+ * Writes the given value to the specified register
+ * @param unit The MMU to write to
+ * @param reg The register to write to
+ * @param val The value to write to the register
+ */
+static void mali_mmu_register_write(mali_kernel_memory_mmu * unit, mali_mmu_register reg, u32 val);
+
+/**
+ * Issues the reset command to the MMU and waits for HW to be ready again
+ * @param mmu The MMU to reset
+ */
+static void mali_mmu_raw_reset(mali_kernel_memory_mmu * mmu);
+
+/**
+ * Issues the enable paging command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+static void mali_mmu_enable_paging(mali_kernel_memory_mmu * mmu);
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+static mali_bool mali_mmu_enable_stall(mali_kernel_memory_mmu * mmu);
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+static void mali_mmu_disable_stall(mali_kernel_memory_mmu * mmu);
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static void ump_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0*/
+
+
+static void external_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+
+
+
+
+/* nop functions */
+
+/* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+static void mali_address_manager_release(mali_memory_allocation * descriptor);
+
+static void mali_mmu_activate_address_space(mali_kernel_memory_mmu * mmu, u32 page_directory);
+
+_mali_osk_errcode_t mali_mmu_page_table_cache_create(void);
+void mali_mmu_page_table_cache_destroy(void);
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping);
+void mali_mmu_release_table_page(u32 pa);
+
+static _mali_osk_errcode_t mali_allocate_empty_page_directory(void);
+
+static void mali_free_empty_page_directory(void);
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+static _mali_osk_errcode_t mali_allocate_fault_flush_pages(void);
+
+static void mali_free_fault_flush_pages(void);
+
+static void mali_mmu_probe_irq_trigger(mali_kernel_memory_mmu * mmu);
+static _mali_osk_errcode_t mali_mmu_probe_irq_acknowledge(mali_kernel_memory_mmu * mmu);
+
+/* MMU variables */
+
+typedef struct mali_mmu_page_table_allocation
+{
+ _mali_osk_list_t list;
+ u32 * usage_map;
+ u32 usage_count;
+ u32 num_pages;
+ mali_page_table_block pages;
+} mali_mmu_page_table_allocation;
+
+typedef struct mali_mmu_page_table_allocations
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t partial;
+ _mali_osk_list_t full;
+ /* we never hold on to a empty allocation */
+} mali_mmu_page_table_allocations;
+
+/* Head of the list of MMUs */
+static _MALI_OSK_LIST_HEAD(mmu_head);
+
+/* the mmu page table cache */
+static struct mali_mmu_page_table_allocations page_table_cache;
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_directory_mapping = NULL;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_page_table_mapping = NULL;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+static mali_io_address mali_page_fault_flush_data_page_mapping = NULL;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
+
+/*
+ The fixed memory system's mali subsystem interface implementation.
+ We currently handle module and session life-time management.
+*/
+struct mali_kernel_subsystem mali_subsystem_memory =
+{
+ mali_memory_core_initialize, /* startup */
+ NULL, /*mali_memory_core_terminate,*/ /* shutdown */
+ mali_memory_core_load_complete, /* load_complete */
+ mali_memory_core_system_info_fill, /* system_info_fill */
+ mali_memory_core_session_begin, /* session_begin */
+ mali_memory_core_session_end, /* session_end */
+ NULL, /* broadcast_notification */
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+};
+
+static mali_kernel_mem_address_manager mali_address_manager =
+{
+ mali_address_manager_allocate, /* allocate */
+ mali_address_manager_release, /* release */
+ mali_address_manager_map, /* map_physical */
+ NULL /* unmap_physical not present*/
+};
+
+static mali_kernel_mem_address_manager process_address_manager =
+{
+ _mali_osk_mem_mapregion_init, /* allocate */
+ _mali_osk_mem_mapregion_term, /* release */
+ _mali_osk_mem_mapregion_map, /* map_physical */
+ _mali_osk_mem_mapregion_unmap /* unmap_physical */
+};
+
+static mali_allocation_engine memory_engine = NULL;
+static mali_physical_memory_allocator * physical_memory_allocators = NULL;
+
+static dedicated_memory_info * mem_region_registrations = NULL;
+
+/* Initialized when this subsystem is initialized. This is determined by the
+ * position in subsystems[], and so the value used to initialize this is
+ * determined at compile time */
+static mali_kernel_subsystem_identifier mali_subsystem_memory_id = (mali_kernel_subsystem_identifier)-1;
+
+/* called during module init */
+static _mali_osk_errcode_t mali_memory_core_initialize(mali_kernel_subsystem_identifier id)
+{
+ MALI_DEBUG_PRINT(2, ("MMU memory system initializing\n"));
+
+ /* save our subsystem id for later for use in slot lookup during session activation */
+ mali_subsystem_memory_id = id;
+
+ _MALI_OSK_INIT_LIST_HEAD(&mmu_head);
+
+ MALI_CHECK_NO_ERROR( mali_mmu_page_table_cache_create() );
+
+ /* register our handlers */
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(MMU, mali_memory_core_resource_mmu) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(FPGA_FRAMEWORK, mali_memory_core_resource_fpga) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(MEMORY, mali_memory_core_resource_dedicated_memory) );
+
+ MALI_CHECK_NO_ERROR( _mali_kernel_core_register_resource_handler(OS_MEMORY, mali_memory_core_resource_os_memory) );
+
+ memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager);
+ MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT);
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+static void mali_memory_core_terminate(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_memory_mmu * mmu, *temp_mmu;
+
+ MALI_DEBUG_PRINT(2, ("MMU memory system terminating\n"));
+
+ /* loop over all MMU units and shut them down */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ /* reset to defaults */
+ mali_mmu_raw_reset(mmu);
+
+ /* unregister the irq */
+ _mali_osk_irq_term(mmu->irq);
+
+ /* remove from the list of MMU's on the system */
+ _mali_osk_list_del(&mmu->list);
+
+ /* release resources */
+ _mali_osk_mem_unmapioregion(mmu->base, mmu->mapping_size, mmu->mapped_registers);
+ _mali_osk_mem_unreqregion(mmu->base, mmu->mapping_size);
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_free(mmu);
+ }
+
+ /* free global helper pages */
+ mali_free_empty_page_directory();
+ mali_free_fault_flush_pages();
+
+ /* destroy the page table cache before shutting down backends in case we have a page table leak to report */
+ mali_mmu_page_table_cache_destroy();
+
+ while ( NULL != mem_region_registrations)
+ {
+ dedicated_memory_info * m;
+ m = mem_region_registrations;
+ mem_region_registrations = m->next;
+ _mali_osk_mem_unreqregion(m->base, m->size);
+ _mali_osk_free(m);
+ }
+
+ while ( NULL != physical_memory_allocators)
+ {
+ mali_physical_memory_allocator * m;
+ m = physical_memory_allocators;
+ physical_memory_allocators = m->next;
+ m->destroy(m);
+ }
+
+ if (NULL != memory_engine)
+ {
+ mali_allocation_engine_destroy(memory_engine);
+ memory_engine = NULL;
+ }
+
+}
+
+static _mali_osk_errcode_t mali_memory_core_session_begin(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue)
+{
+ memory_session * session_data;
+ _mali_osk_errcode_t err;
+ int i;
+ mali_io_address pd_mapped;
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (NULL != *slot)
+ {
+ MALI_DEBUG_PRINT(1, ("The slot given to memory session begin already contains data"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_PRINT(2, ("MMU session begin\n"));
+
+ /* create the session data object */
+ session_data = _mali_osk_calloc(1, sizeof(memory_session));
+ MALI_CHECK_NON_NULL( session_data, _MALI_OSK_ERR_NOMEM );
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ err = mali_mmu_get_table_page(&session_data->page_directory, &pd_mapped);
+
+ session_data->page_directory_mapped = pd_mapped;
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(err);
+ }
+ MALI_DEBUG_ASSERT_POINTER( session_data->page_directory_mapped );
+
+ MALI_DEBUG_PRINT(2, ("Page directory for session 0x%x placed at physical address 0x%08X\n", mali_session_data, session_data->page_directory));
+
+ for (i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ /* mark each page table as not present */
+ _mali_osk_mem_iowrite32_relaxed(session_data->page_directory_mapped, sizeof(u32) * i, 0);
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* page_table_mapped[] is already set to NULL by _mali_osk_calloc call */
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->active_mmus);
+ session_data->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 128);
+ if (NULL == session_data->lock)
+ {
+ mali_mmu_release_table_page(session_data->page_directory);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Init the session's memory allocation list */
+ _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head );
+
+ *slot = session_data; /* slot will point to our data object */
+ MALI_DEBUG_PRINT(2, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation*)map_target;
+
+ MALI_DEBUG_PRINT(1, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+ MALI_DEBUG_ASSERT(descriptor);
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+}
+
+static void mali_memory_core_session_end(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot)
+{
+ memory_session * session_data;
+ int i;
+ const int num_page_table_entries = sizeof(session_data->page_entries_mapped) / sizeof(session_data->page_entries_mapped[0]);
+
+ MALI_DEBUG_PRINT(2, ("MMU session end\n"));
+
+ /* validate input */
+ if (NULL == slot)
+ {
+ MALI_DEBUG_PRINT(1, ("NULL slot given to memory session begin\n"));
+ return;
+ }
+
+ session_data = (memory_session *)*slot;
+
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_lock_wait( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+ /* Noninterruptable spinlock type, so must always have locked. Checking should've been done in OSK function. */
+
+#ifndef MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP
+#if _MALI_OSK_SPECIFIC_INDIRECT_MMAP
+#error Indirect MMAP specified, but UKK does not have implicit MMAP cleanup. Current implementation does not handle this.
+#else
+
+ /* Free all memory engine allocations */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_allocation *descriptor;
+ mali_memory_allocation *temp;
+ _mali_uk_mem_munmap_s unmap_args;
+
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ unmap_args.ctx = mali_session_data;
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list)
+ {
+ MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping)
+ );
+ /* ASSERT that the descriptor's lock references the correct thing */
+ MALI_DEBUG_ASSERT( descriptor->lock == session_data->lock );
+ /* Therefore, we have already locked the descriptor */
+
+ unmap_args.size = descriptor->size;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.cookie = (u32)descriptor;
+
+ /*
+ * This removes the descriptor from the list, and frees the descriptor
+ *
+ * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since
+ * the only OS we are aware of that requires indirect MMAP also has
+ * implicit mmap cleanup.
+ */
+ _mali_ukk_mem_munmap_internal( &unmap_args );
+ }
+ }
+
+ /* Assert that we really did free everything */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) );
+#endif /* _MALI_OSK_SPECIFIC_INDIRECT_MMAP */
+#endif /* MALI_UKK_HAS_IMPLICIT_MMAP_CLEANUP */
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ for (i = 0; i < num_page_table_entries; i++)
+ {
+ /* free PTE memory */
+ if (session_data->page_directory_mapped && (_mali_osk_mem_ioread32(session_data->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT))
+ {
+ mali_mmu_release_table_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0);
+ }
+ }
+
+ if (MALI_INVALID_PAGE != session_data->page_directory)
+ {
+ mali_mmu_release_table_page(session_data->page_directory);
+ session_data->page_directory = MALI_INVALID_PAGE;
+ }
+
+ _mali_osk_lock_signal( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+
+ /**
+ * @note Could the VMA close handler mean that we use the session data after it was freed?
+ * In which case, would need to refcount the session data, and free on VMA close
+ */
+
+ /* Free the lock */
+ _mali_osk_lock_term( session_data->lock );
+ /* free the session data object */
+ _mali_osk_free(session_data);
+
+ /* clear our slot */
+ *slot = NULL;
+
+ return;
+}
+
+static _mali_osk_errcode_t mali_allocate_empty_page_directory(void)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+
+ MALI_CHECK_NO_ERROR(mali_mmu_get_table_page(&mali_empty_page_directory, &mapping));
+
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_mmu_release_table_page(mali_empty_page_directory);
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+static void mali_free_empty_page_directory(void)
+{
+ if (MALI_INVALID_PAGE != mali_empty_page_directory)
+ {
+ mali_mmu_release_table_page(mali_empty_page_directory);
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ _mali_osk_mem_iowrite32_relaxed( mapping, i * sizeof(u32), data);
+ }
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_allocate_fault_flush_pages(void)
+{
+ _mali_osk_errcode_t err;
+
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_data_page, &mali_page_fault_flush_data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_page_table, &mali_page_fault_flush_page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(&mali_page_fault_flush_page_directory, &mali_page_fault_flush_page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ fill_page(mali_page_fault_flush_data_page_mapping, 0);
+ fill_page(mali_page_fault_flush_page_table_mapping, mali_page_fault_flush_data_page | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ fill_page(mali_page_fault_flush_page_directory_mapping, mali_page_fault_flush_page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(mali_page_fault_flush_page_table);
+ mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+ mali_page_fault_flush_page_table_mapping = NULL;
+ }
+ mali_mmu_release_table_page(mali_page_fault_flush_data_page);
+ mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+ mali_page_fault_flush_data_page_mapping = NULL;
+ }
+ MALI_ERROR(err);
+}
+
+static void mali_free_fault_flush_pages(void)
+{
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_page_directory)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_page_directory);
+ mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_page_table)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_page_table);
+ mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != mali_page_fault_flush_data_page)
+ {
+ mali_mmu_release_table_page(mali_page_fault_flush_data_page);
+ mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t mali_memory_core_load_complete(mali_kernel_subsystem_identifier id)
+{
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ /* Report the allocators */
+ mali_allocation_engine_report_allocators( physical_memory_allocators );
+
+ /* allocate the helper pages */
+ MALI_CHECK_NO_ERROR( mali_allocate_empty_page_directory() );
+ if (_MALI_OSK_ERR_OK != mali_allocate_fault_flush_pages())
+ {
+ mali_free_empty_page_directory();
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* activate the empty page directory on all MMU's */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
+ mali_mmu_enable_paging(mmu);
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMUs activated\n"));
+ /* the MMU system is now active */
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_system_info_fill(_mali_system_info* info)
+{
+ _mali_mem_info * mem_info;
+
+ /* Make sure we won't leak any memory. It could also be that it's an
+ * uninitialized variable, but the caller should have zeroed the
+ * variable. */
+ MALI_DEBUG_ASSERT(NULL == info->mem_info);
+
+ info->has_mmu = 1;
+
+ mem_info = _mali_osk_calloc(1,sizeof(_mali_mem_info));
+ MALI_CHECK_NON_NULL( mem_info, _MALI_OSK_ERR_NOMEM );
+
+ mem_info->size = 2048UL * 1024UL * 1024UL;
+ mem_info->maximum_order_supported = 30;
+ mem_info->flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE |_MALI_GP_READABLE | _MALI_GP_WRITEABLE;
+ mem_info->identifier = 0;
+
+ info->mem_info = mem_info;
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_mmu(_mali_osk_resource_t * resource)
+{
+ mali_kernel_memory_mmu * mmu;
+
+ MALI_DEBUG_PRINT(4, ("MMU '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1
+ ));
+
+ if (NULL != mali_memory_core_mmu_lookup(resource->mmu_id))
+ {
+ MALI_DEBUG_PRINT(1, ("Duplicate MMU ids found. The id %d is already in use\n", resource->mmu_id));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, MALI_MMU_REGISTERS_SIZE, resource->description))
+ {
+ /* specified addresses are already in used by another driver / the kernel */
+ MALI_DEBUG_PRINT(
+ 1, ("Failed to request MMU '%s' register address space at (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1
+ ));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mmu = _mali_osk_calloc(1, sizeof(mali_kernel_memory_mmu));
+
+ if (NULL == mmu)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory for handling a MMU unit"));
+ _mali_osk_mem_unreqregion(resource->base, MALI_MMU_REGISTERS_SIZE);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* basic setup */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->list);
+
+ mmu->id = resource->mmu_id;
+ mmu->irq_nr = resource->irq;
+ mmu->flags = resource->flags;
+ mmu->base = resource->base;
+ mmu->mapping_size = MALI_MMU_REGISTERS_SIZE;
+ mmu->description = resource->description; /* no need to copy */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->callbacks);
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->session_link);
+ mmu->in_page_fault_handler = 0;
+
+ mmu->lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 127-mmu->id);
+ if (NULL == mmu->lock)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create mmu lock\n"));
+ _mali_osk_mem_unreqregion(mmu->base, mmu->mapping_size);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* map the registers */
+ mmu->mapped_registers = _mali_osk_mem_mapioregion( mmu->base, mmu->mapping_size, mmu->description );
+ if (NULL == mmu->mapped_registers)
+ {
+ /* failed to map the registers */
+ MALI_DEBUG_PRINT(1, ("Failed to map MMU registers at 0x%08X\n", mmu->base));
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_mem_unreqregion(mmu->base, MALI_MMU_REGISTERS_SIZE);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(4, ("MMU '%s' @ (0x%08X - 0x%08X) mapped to 0x%08X\n",
+ resource->description, resource->base, resource->base + MALI_MMU_REGISTERS_SIZE - 1, mmu->mapped_registers
+ ));
+
+ /* setup MMU interrupt mask */
+ /* set all values to known defaults */
+ mali_mmu_raw_reset(mmu);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* setup MMU page directory pointer */
+ /* The mali_page_directory pointer is guaranteed to be 4kb aligned because we've used get_zeroed_page to accquire it */
+ /* convert the kernel virtual address into a physical address and set */
+
+ /* add to our list of MMU's */
+ _mali_osk_list_addtail(&mmu->list, &mmu_head);
+
+ mmu->irq = _mali_osk_irq_init(
+ mmu->irq_nr,
+ mali_kernel_memory_mmu_interrupt_handler_upper_half,
+ mali_kernel_memory_mmu_interrupt_handler_bottom_half,
+ (_mali_osk_irq_trigger_t)mali_mmu_probe_irq_trigger,
+ (_mali_osk_irq_ack_t)mali_mmu_probe_irq_acknowledge,
+ mmu,
+ "mali_mmu_irq_handlers"
+ );
+ if (NULL == mmu->irq)
+ {
+ _mali_osk_list_del(&mmu->list);
+ _mali_osk_lock_term(mmu->lock);
+ _mali_osk_mem_unmapioregion( mmu->base, mmu->mapping_size, mmu->mapped_registers );
+ _mali_osk_mem_unreqregion(resource->base, MALI_MMU_REGISTERS_SIZE);
+ _mali_osk_free(mmu);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* set to a known state */
+ mali_mmu_raw_reset(mmu);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+
+ MALI_DEBUG_PRINT(2, ("MMU registered\n"));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_fpga(_mali_osk_resource_t * resource)
+{
+ mali_io_address mapping;
+
+ MALI_DEBUG_PRINT(5, ("FPGA framework '%s' @ (0x%08X - 0x%08X)\n",
+ resource->description, resource->base, resource->base + sizeof(u32) * 2 - 1
+ ));
+
+ mapping = _mali_osk_mem_mapioregion(resource->base + 0x1000, sizeof(u32) * 2, "fpga framework");
+ if (mapping)
+ {
+ MALI_DEBUG_CODE(u32 data = )
+ _mali_osk_mem_ioread32(mapping, 0);
+ MALI_DEBUG_PRINT(2, ("FPGA framwork '%s' @ 0x%08X:\n", resource->description, resource->base));
+ MALI_DEBUG_PRINT(2, ("\tBitfile date: %d%02d%02d_%02d%02d\n",
+ (data >> 20),
+ (data >> 16) & 0xF,
+ (data >> 11) & 0x1F,
+ (data >> 6) & 0x1F,
+ (data >> 0) & 0x3F));
+ MALI_DEBUG_CODE(data = )
+ _mali_osk_mem_ioread32(mapping, sizeof(u32));
+ MALI_DEBUG_PRINT(2, ("\tBitfile SCCS rev: %d\n", data));
+
+ _mali_osk_mem_unmapioregion(resource->base + 0x1000, sizeof(u32) *2, mapping);
+ }
+ else MALI_DEBUG_PRINT(1, ("Failed to access FPGA framwork '%s' @ 0x%08X\n", resource->description, resource->base));
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+
+ u32 alloc_order = resource->alloc_order;
+
+ allocator = mali_os_allocator_create(resource->size, resource->cpu_usage_adjust, resource->description);
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ allocator->alloc_order = alloc_order;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+ dedicated_memory_info * cleanup_data;
+
+ u32 alloc_order = resource->alloc_order;
+
+ /* do the lowlevel linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* create generic block allocator object to handle it */
+ allocator = mali_block_allocator_create(resource->base, resource->cpu_usage_adjust, resource->size, resource->description );
+
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* save lowlevel cleanup info */
+ allocator->alloc_order = alloc_order;
+
+ cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info));
+
+ if (NULL == cleanup_data)
+ {
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ allocator->destroy(allocator);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ cleanup_data->base = resource->base;
+ cleanup_data->size = resource->size;
+
+ cleanup_data->next = mem_region_registrations;
+ mem_region_registrations = cleanup_data;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_kernel_memory_mmu_interrupt_handler_upper_half(void * data)
+{
+ mali_kernel_memory_mmu * mmu;
+ u32 int_stat;
+ mali_core_renderunit *core;
+
+ if (mali_benchmark) MALI_SUCCESS;
+
+ mmu = (mali_kernel_memory_mmu *)data;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ /* Pointer to core holding this MMU */
+ core = (mali_core_renderunit *)mmu->core;
+ if(core && (CORE_OFF == core->state))
+ {
+ MALI_SUCCESS;
+ }
+
+
+ /* check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_STATUS);
+ if (0 == int_stat)
+ {
+ MALI_ERROR(_MALI_OSK_ERR_FAULT); /* no bits set, we are sharing the IRQ line and someone else caused the interrupt */
+ }
+
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, 0);
+
+ mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ _mali_osk_irq_schedulework(mmu->irq);
+ }
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ /* clear interrupt flag */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* reenable it */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_MASK) | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+
+ MALI_SUCCESS;
+}
+
+
+static void mali_kernel_mmu_bus_reset(mali_kernel_memory_mmu * mmu)
+{
+
+#if defined(USING_MALI200)
+ int i;
+ const int replay_buffer_check_interval = 10; /* must be below 1000 */
+ const int replay_buffer_max_number_of_checks = 100;
+#endif
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ /* add an extra reference while handling the page fault */
+ mmu->usage_count++;
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(4, ("Sending stop bus request to cores\n"));
+ /* request to stop the bus, but don't wait for it to actually stop */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES, (u32)mmu);
+
+#if defined(USING_MALI200)
+ /* no new request will come from any of the connected cores from now
+ * we must now flush the playback buffer for any requests queued already
+ */
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_DEBUG_PRINT(4, ("Switching to the special page fault flush page directory\n"));
+ /* don't use the mali_mmu_activate_address_space function here as we can't stall the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_page_fault_flush_page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ /* resume the MMU */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+ /* the MMU will now play back all the requests, all going to our special page fault flush data page */
+
+ /* just to be safe, check that the playback buffer is empty before continuing */
+ if (!mali_benchmark) {
+ for (i = 0; i < replay_buffer_max_number_of_checks; i++)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY) break;
+ _mali_osk_time_ubusydelay(replay_buffer_check_interval);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, i == replay_buffer_max_number_of_checks, ("MMU: %s: Failed to flush replay buffer on page fault\n", mmu->description));
+ MALI_DEBUG_PRINT(1, ("Replay playback took %ld usec\n", i * replay_buffer_check_interval));
+ }
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+#endif
+ /* notify all subsystems that the core should be reset once the bus is actually stopped */
+ MALI_DEBUG_PRINT(4,("Sending job abort command to subsystems\n"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS, (u32)mmu);
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* reprogram the MMU */
+ mali_mmu_raw_reset(mmu);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory); /* no session is active, so just activate the empty page directory */
+ mali_mmu_enable_paging(mmu);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* release the extra address space reference, will schedule */
+ mali_memory_core_mmu_release_address_space_reference(mmu);
+
+ /* resume normal operation */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP3_CONTINUE_JOB_HANDLING, (u32)mmu);
+ MALI_DEBUG_PRINT(4, ("Page fault handling complete\n"));
+}
+
+static void mali_mmu_raw_reset(mali_kernel_memory_mmu * mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_SOFT_RESET);
+
+ if (!mali_benchmark)
+ {
+ int i;
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_DTE_ADDR) == 0)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ MALI_DEBUG_PRINT_IF(1, (max_loop_count == i), ("Reset request failed, MMU status is 0x%08X\n", mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+static void mali_mmu_enable_paging(mali_kernel_memory_mmu * mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ if (!mali_benchmark)
+ {
+ int i;
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ MALI_DEBUG_PRINT_IF(1, (max_loop_count == i), ("Enable paging request failed, MMU status is 0x%08X\n", mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+static mali_bool mali_mmu_enable_stall(mali_kernel_memory_mmu * mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 999;
+ int i;
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ if (!mali_benchmark)
+ {
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ MALI_DEBUG_PRINT_IF(1, (max_loop_count == i), ("Enable stall request failed, MMU status is 0x%08X\n", mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS)));
+ if (max_loop_count == i)
+ {
+ return MALI_FALSE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+
+static void mali_mmu_disable_stall(mali_kernel_memory_mmu * mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+ int i;
+
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ if (!mali_benchmark)
+ {
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if ((mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_STALL_ACTIVE) == 0)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ MALI_DEBUG_PRINT_IF(1, (max_loop_count == i), ("Disable stall request failed, MMU status is 0x%08X\n", mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+void mali_kernel_mmu_reset(void * input_mmu)
+{
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(input_mmu);
+ mmu = (mali_kernel_memory_mmu *)input_mmu;
+
+ MALI_DEBUG_PRINT(4, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->description));
+
+ if ( 0 != mmu->in_page_fault_handler)
+ {
+ /* This is possible if the bus can never be stopped for some reason */
+ MALI_PRINT_ERROR(("Stopping the Memory bus not possible. Mali reset could not be performed."));
+ return;
+ }
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ mali_mmu_raw_reset(mmu);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory); /* no session is active, so just activate the empty page directory */
+ mali_mmu_enable_paging(mmu);
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+void mali_kernel_mmu_force_bus_reset(void * input_mmu)
+{
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(input_mmu);
+ mmu = (mali_kernel_memory_mmu *)input_mmu;
+ if ( 0 != mmu->in_page_fault_handler)
+ {
+ /* This is possible if the bus can never be stopped for some reason */
+ MALI_PRINT_ERROR(("Stopping the Memory bus not possible. Mali reset could not be performed."));
+ return;
+ }
+ MALI_DEBUG_PRINT(1, ("Mali MMU: Force_bus_reset.\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_MASK, 0);
+ mali_kernel_mmu_bus_reset(mmu);
+}
+
+
+static void mali_kernel_memory_mmu_interrupt_handler_bottom_half(void * data)
+{
+ mali_kernel_memory_mmu * mmu;
+ u32 raw, fault_address, status;
+ mali_core_renderunit *core;
+
+ MALI_DEBUG_PRINT(1, ("mali_kernel_memory_mmu_interrupt_handler_bottom_half\n"));
+ if (NULL == data)
+ {
+ MALI_PRINT_ERROR(("MMU IRQ work queue: NULL argument"));
+ return; /* Error */
+ }
+ mmu = (mali_kernel_memory_mmu*)data;
+
+ MALI_DEBUG_PRINT(4, ("Locking subsystems\n"));
+ /* lock all subsystems */
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP0_LOCK_SUBSYSTEM, (u32)mmu);
+
+ /* Pointer to core holding this MMU */
+ core = (mali_core_renderunit *)mmu->core;
+
+ if(CORE_OFF == core->state)
+ {
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+ return;
+ }
+
+ raw = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_RAWSTAT);
+ status = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+
+ if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) )
+ {
+ MALI_DEBUG_PRINT(1, ("MMU: Page fault bottom half: No Irq found.\n"));
+ MALI_DEBUG_PRINT(4, ("Unlocking subsystems"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+ return;
+ }
+
+ mmu->in_page_fault_handler = 1;
+
+ fault_address = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+ MALI_PRINT(("Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void*)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->description)
+ );
+
+ if (NULL == mmu->active_session)
+ {
+ MALI_PRINT(("Spurious memory access detected from MMU %s\n", mmu->description));
+ }
+ else
+ {
+ MALI_PRINT(("Active page directory at 0x%08X\n", mmu->active_session->page_directory));
+ MALI_PRINT(("Info from page table for VA 0x%x:\n", (void*)fault_address));
+ MALI_PRINT(("DTE entry: PTE at 0x%x marked as %s\n",
+ (void*)(_mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped,
+ MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK),
+ _mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped,
+ MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT ? "present" : "not present"
+ ));
+
+ if (_mali_osk_mem_ioread32(mmu->active_session->page_directory_mapped, MALI_MMU_PDE_ENTRY(fault_address) * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT)
+ {
+ mali_io_address pte;
+ u32 data;
+ pte = mmu->active_session->page_entries_mapped[MALI_MMU_PDE_ENTRY(fault_address)];
+ data = _mali_osk_mem_ioread32(pte, MALI_MMU_PTE_ENTRY(fault_address) * sizeof(u32));
+ MALI_PRINT(("PTE entry: Page at 0x%x, %s %s %s\n",
+ (void*)(data & ~MALI_MMU_FLAGS_MASK),
+ data & MALI_MMU_FLAGS_PRESENT ? "present" : "not present",
+ data & MALI_MMU_FLAGS_READ_PERMISSION ? "readable" : "",
+ data & MALI_MMU_FLAGS_WRITE_PERMISSION ? "writable" : ""
+ ));
+ }
+ else
+ {
+ MALI_PRINT(("PTE entry: Not present\n"));
+ }
+ }
+
+
+ mali_kernel_mmu_bus_reset(mmu);
+
+ mmu->in_page_fault_handler = 0;
+
+ /* unlock all subsystems */
+ MALI_DEBUG_PRINT(4, ("Unlocking subsystems"));
+ _mali_kernel_core_broadcast_subsystem_message(MMU_KILL_STEP4_UNLOCK_SUBSYSTEM, (u32)mmu);
+
+}
+
+
+static u32 mali_mmu_register_read(mali_kernel_memory_mmu * unit, mali_mmu_register reg)
+{
+ u32 val;
+
+ if (mali_benchmark) return 0;
+
+ val = _mali_osk_mem_ioread32(unit->mapped_registers, (u32)reg * sizeof(u32));
+
+ MALI_DEBUG_PRINT(6, ("mali_mmu_register_read addr:0x%04X val:0x%08x\n", (u32)reg * sizeof(u32),val));
+
+ return val;
+}
+
+static void mali_mmu_register_write(mali_kernel_memory_mmu * unit, mali_mmu_register reg, u32 val)
+{
+ if (mali_benchmark) return;
+
+ MALI_DEBUG_PRINT(6, ("mali_mmu_register_write addr:0x%04X val:0x%08x\n", (u32)reg * sizeof(u32), val));
+
+ _mali_osk_mem_iowrite32(unit->mapped_registers, (u32)reg * sizeof(u32), val);
+}
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ ump_dd_handle ump_mem;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block * ump_blocks;
+ ump_mem_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) );
+ if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ump_mem = (ump_dd_handle)ctx;
+
+ MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n"));
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+
+ MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks));
+
+ if (nr_blocks == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks );
+ if ( NULL==ump_blocks )
+ {
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks))
+ {
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ /* Store away the initial offset for unmapping purposes */
+ ret_allocation->initial_offset = *offset;
+
+ for(i=0; i<nr_blocks; ++i)
+ {
+ MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += ump_blocks[i].size;
+ }
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ _mali_osk_free( ump_blocks );
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->ump_mem = ump_mem;
+ ret_allocation->size_allocated = *offset - ret_allocation->initial_offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = ump_memory_release;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void ump_memory_release(void * ctx, void * handle)
+{
+ ump_dd_handle ump_mem;
+ ump_mem_allocation *allocation;
+
+ allocation = (ump_mem_allocation *)handle;
+
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ ump_mem = allocation->ump_mem;
+
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem);
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size_allocated,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+ _mali_osk_free( allocation );
+
+
+ ump_dd_reference_release(ump_mem) ;
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args )
+{
+ ump_dd_handle ump_mem;
+ mali_physical_memory_allocator external_memory_allocator;
+ memory_session * session_data;
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ args->secure_id, args->mali_address, args->size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ;
+
+ if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor)
+ {
+ ump_dd_reference_release(ump_mem);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->lock = session_data->lock;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ external_memory_allocator.allocate = ump_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = ump_mem;
+ external_memory_allocator.name = "UMP Memory";
+ external_memory_allocator.next = NULL;
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+ _mali_osk_lock_wait( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+ _mali_osk_lock_signal( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+
+ _mali_osk_free(descriptor);
+
+ MALI_SUCCESS;
+
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 */
+
+
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ u32 * data;
+ external_mem_allocation * ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ data = (u32*)ctx;
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->initial_offset = *offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = external_memory_release;
+
+ MALI_DEBUG_PRINT(3, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1]));
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1]))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += data[1];
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap what we previously mapped */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ ret_allocation->size = *offset - ret_allocation->initial_offset;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void external_memory_release(void * ctx, void * handle)
+{
+ external_mem_allocation * allocation;
+
+ allocation = (external_mem_allocation *) handle;
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+
+ _mali_osk_free( allocation );
+
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ mali_physical_memory_allocator external_memory_allocator;
+ memory_session * session_data;
+ u32 info[2];
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ external_memory_allocator.allocate = external_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = &info[0];
+ external_memory_allocator.name = "External Memory";
+ external_memory_allocator.next = NULL;
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ (void*)args->phys_addr,
+ (void*)(args->phys_addr + args->size -1),
+ (void*)args->mali_address)
+ );
+
+ /* Validate the mali physical range */
+ MALI_CHECK_NO_ERROR( mali_kernel_core_validate_mali_phys_range( args->phys_addr, args->size ) );
+
+ info[0] = args->phys_addr;
+ info[1] = args->size;
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->lock = session_data->lock;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+ _mali_osk_lock_wait( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+ _mali_osk_lock_signal( session_data->lock, _MALI_OSK_LOCKMODE_RW );
+
+ _mali_osk_free(descriptor);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */
+ args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_page_table_cache_create(void)
+{
+ page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 110);
+ MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT );
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial);
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full);
+ MALI_SUCCESS;
+}
+
+void mali_mmu_page_table_cache_destroy(void)
+{
+ mali_mmu_page_table_allocation * alloc, *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, 0 == _mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ _mali_osk_lock_term(page_table_cache.lock);
+}
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == _mali_osk_list_empty(&page_table_cache.partial))
+ {
+ mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list);
+ int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages);
+ MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number));
+ _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map);
+ alloc->usage_count++;
+ if (alloc->num_pages == alloc->usage_count)
+ {
+ /* full, move alloc to full list*/
+ _mali_osk_list_move(&alloc->list, &page_table_cache.full);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base;
+ *mapping = (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping);
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ mali_mmu_page_table_allocation * alloc;
+ /* no free pages, allocate a new one */
+
+ alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation));
+ if (NULL == alloc)
+ {
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&alloc->list);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators))
+ {
+ MALI_DEBUG_PRINT(1, ("No more memory for page tables\n"));
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* create the usage map */
+ alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE;
+ alloc->usage_count = 1;
+ MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages));
+ alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long));
+ if (NULL == alloc->usage_map)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n"));
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* clear memory allocation */
+ fill_page(alloc->pages.mapping, 0);
+
+ _mali_osk_set_nonatomic_bit(0, alloc->usage_map);
+
+ if (alloc->num_pages > 1)
+ {
+ _mali_osk_list_add(&alloc->list, &page_table_cache.partial);
+ }
+ else
+ {
+ _mali_osk_list_add(&alloc->list, &page_table_cache.full);
+ }
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = alloc->pages.phys_base; /* return the first page */
+ *mapping = alloc->pages.mapping; /* Mapping for first page */
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+}
+
+void mali_mmu_release_table_page(u32 pa)
+{
+ mali_mmu_page_table_allocation * alloc, * temp_alloc;
+
+ MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa));
+
+ MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa));
+
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* find the entry this address belongs to */
+ /* first check the partial list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map));
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ /* the check the full list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ else
+ {
+ /* transfer to partial list */
+ _mali_osk_list_move(&alloc->list, &page_table_cache.partial);
+ }
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa));
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void* mali_memory_core_mmu_lookup(u32 id)
+{
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ /* find an MMU with a matching id */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &mmu_head, mali_kernel_memory_mmu, list)
+ {
+ if (id == mmu->id) return mmu;
+ }
+
+ /* not found */
+ return NULL;
+}
+
+void mali_memory_core_mmu_owner(void *core, void *mmu_ptr)
+{
+ mali_kernel_memory_mmu *mmu;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+ mmu->core = core;
+}
+
+void mali_mmu_activate_address_space(mali_kernel_memory_mmu * mmu, u32 page_directory)
+{
+ mali_mmu_enable_stall(mmu); /* this might fail, but changing the DTE address and ZAP should work anyway... */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_disable_stall(mmu);
+}
+
+_mali_osk_errcode_t mali_memory_core_mmu_activate_page_table(void* mmu_ptr, struct mali_session_data * mali_session_data, void(*callback)(void*), void * callback_argument)
+{
+ memory_session * requested_memory_session;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_kernel_memory_mmu * mmu;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ MALI_DEBUG_ASSERT_POINTER(mali_session_data);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ MALI_DEBUG_PRINT(4, ("Asked to activate page table for session 0x%x on MMU %s\n", mali_session_data, mmu->description));
+ requested_memory_session = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ MALI_DEBUG_PRINT(5, ("Session 0x%x looked up as using memory session 0x%x\n", mali_session_data, requested_memory_session));
+
+ MALI_DEBUG_ASSERT_POINTER(requested_memory_session);
+
+ MALI_DEBUG_PRINT(7, ("Taking locks\n"));
+
+ _mali_osk_lock_wait(requested_memory_session->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == mmu->usage_count)
+ {
+ /* no session currently active, activate the requested session */
+ MALI_DEBUG_ASSERT(NULL == mmu->active_session);
+ mmu->active_session = requested_memory_session;
+ mmu->usage_count = 1;
+ MALI_DEBUG_PRINT(4, ("MMU idle, activating page directory 0x%08X on MMU %s\n", requested_memory_session->page_directory, mmu->description));
+ mali_mmu_activate_address_space(mmu, requested_memory_session->page_directory);
+ {
+ /* Insert mmu into the right place in the active_mmus list so that
+ * it is still sorted. The list must be sorted by ID so we can get
+ * the mutexes in the right order in
+ * _mali_ukk_mem_munmap_internal().
+ */
+ _mali_osk_list_t *entry;
+ for (entry = requested_memory_session->active_mmus.next;
+ entry != &requested_memory_session->active_mmus;
+ entry = entry->next)
+ {
+ mali_kernel_memory_mmu *temp = _MALI_OSK_LIST_ENTRY(entry, mali_kernel_memory_mmu, session_link);
+ if (mmu->id < temp->id)
+ break;
+ }
+ /* If we broke out, then 'entry' points to the list node of the
+ * first mmu with a greater ID; otherwise, it points to
+ * active_mmus. We want to add *before* this node.
+ */
+ _mali_osk_list_addtail(&mmu->session_link, entry);
+ }
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ /* Allow two cores to run in parallel if they come from the same session */
+ else if (
+ (mmu->in_page_fault_handler == 0) &&
+ (requested_memory_session == mmu->active_session ) &&
+ (0==(MALI_MMU_DISALLOW_PARALLELL_WORK_OF_MALI_CORES & mmu->flags))
+ )
+ {
+ /* nested activation detected, just update the reference count */
+ MALI_DEBUG_PRINT(4, ("Nested activation detected, %d previous activations found\n", mmu->usage_count));
+ mmu->usage_count++;
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ else if (NULL != callback)
+ {
+ /* can't activate right now, notify caller on idle via callback */
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp_callback_object;
+ int found = 0;
+
+ MALI_DEBUG_PRINT(3, ("The MMU is busy and is using a different address space, callback given\n"));
+ /* check for existing registration */
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp_callback_object, &mmu->callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ if (callback_object->callback == callback)
+ {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found)
+ {
+ MALI_DEBUG_PRINT(5, ("Duplicate callback registration found, ignoring\n"));
+ /* callback already registered */
+ err = _MALI_OSK_ERR_BUSY;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(5,("New callback, registering\n"));
+ /* register the new callback */
+ callback_object = _mali_osk_malloc(sizeof(mali_kernel_memory_mmu_idle_callback));
+ if (NULL != callback_object)
+ {
+ MALI_DEBUG_PRINT(7,("Callback struct setup\n"));
+ callback_object->callback = callback;
+ callback_object->callback_argument = callback_argument;
+ _mali_osk_list_addtail(&callback_object->link, &mmu->callbacks);
+ err = _MALI_OSK_ERR_BUSY;
+ }
+ }
+ }
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_lock_signal(requested_memory_session->lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_ERROR(err);
+}
+
+void mali_memory_core_mmu_release_address_space_reference(void* mmu_ptr)
+{
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp;
+ mali_kernel_memory_mmu * mmu;
+ memory_session * session;
+
+ _MALI_OSK_LIST_HEAD(callbacks);
+
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ session = mmu->active_session;
+
+ /* support that we handle spurious page faults */
+ if (NULL != session)
+ {
+ _mali_osk_lock_wait(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("Deactivation of address space on MMU %s, %d references exists\n", mmu->description, mmu->usage_count));
+ MALI_DEBUG_ASSERT(0 != mmu->usage_count);
+ mmu->usage_count--;
+ if (0 != mmu->usage_count)
+ {
+ MALI_DEBUG_PRINT(4, ("MMU still in use by this address space, %d references still exists\n", mmu->usage_count));
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ /* support that we handle spurious page faults */
+ if (NULL != session)
+ {
+ _mali_osk_lock_signal(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ return;
+ }
+
+ MALI_DEBUG_PRINT(4, ("Activating the empty page directory on %s\n", mmu->description));
+
+ /* last reference gone, deactivate current address space */
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
+
+ /* unlink from session */
+ _mali_osk_list_delinit(&mmu->session_link);
+ /* remove the active session pointer */
+ mmu->active_session = NULL;
+
+ /* Notify all registered callbacks.
+ * We have to be clever here:
+ * We must call the callbacks with the spinlock unlocked and
+ * the callback list emptied to allow them to re-register.
+ * So we make a copy of the list, clears the list and then later call the callbacks on the local copy
+ */
+ /* copy list */
+ _MALI_OSK_INIT_LIST_HEAD(&callbacks);
+ _mali_osk_list_splice(&mmu->callbacks, &callbacks);
+ /* clear the original, allowing new registrations during the callback */
+ _MALI_OSK_INIT_LIST_HEAD(&mmu->callbacks);
+
+ /* end of mmu manipulation, so safe to unlock */
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* then finally remove the (possible) session lock, supporting that no session was active (spurious page fault handling) */
+ if (NULL != session)
+ {
+ _mali_osk_lock_signal(session->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp, &callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ MALI_DEBUG_ASSERT_POINTER(callback_object->callback);
+ (callback_object->callback)(callback_object->callback_argument);
+ _mali_osk_list_del(&callback_object->link);
+ _mali_osk_free(callback_object);
+ }
+}
+
+void mali_memory_core_mmu_unregister_callback(void* mmu_ptr, void(*callback)(void*))
+{
+ mali_kernel_memory_mmu_idle_callback * callback_object, * temp_callback_object;
+ mali_kernel_memory_mmu * mmu;
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+
+ MALI_DEBUG_ASSERT_POINTER(callback);
+ MALI_DEBUG_ASSERT_POINTER(mmu_ptr);
+
+ mmu = (mali_kernel_memory_mmu *)mmu_ptr;
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(callback_object, temp_callback_object, &mmu->callbacks, mali_kernel_memory_mmu_idle_callback, link)
+ {
+ MALI_DEBUG_ASSERT_POINTER(callback_object->callback);
+ if (callback_object->callback == callback)
+ {
+ _mali_osk_list_del(&callback_object->link);
+ _mali_osk_free(callback_object);
+ break;
+ }
+ }
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor)
+{
+ /* allocate page tables, if needed */
+ int i;
+ const int first_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address);
+ int last_pde_idx;
+ memory_session * session_data;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+ int page_dir_updated = 0;
+#endif
+
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ last_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address + _MALI_OSK_MALI_PAGE_SIZE + descriptor->size - 1);
+ }
+ else
+ {
+ last_pde_idx = MALI_MMU_PDE_ENTRY(descriptor->mali_address + descriptor->size - 1);
+ }
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ MALI_DEBUG_PRINT(4, ("allocating page tables for Mali virtual address space 0x%08X to 0x%08X\n", descriptor->mali_address, descriptor->mali_address + descriptor->size - 1));
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * We have active MMUs, so we are probably in the process of alocating more memory for a suspended GP job (PLBU heap)
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page directory
+ * from the L2 cache if we add new page directory entries (PDEs) to the page directory.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ if ( 0 == (_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)) & MALI_MMU_FLAGS_PRESENT) )
+ {
+ u32 pte_phys;
+ mali_io_address pte_mapped;
+ _mali_osk_errcode_t err;
+
+ /* allocate a new page table */
+ MALI_DEBUG_ASSERT(0 == session_data->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == session_data->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pte_phys, &pte_mapped);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ session_data->page_entries_mapped[i] = pte_mapped;
+ MALI_DEBUG_ASSERT_POINTER( session_data->page_entries_mapped[i] );
+
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), pte_phys | MALI_MMU_FLAGS_PRESENT); /* mark page table as present */
+
+ /* update usage count */
+ session_data->page_entries_usage_count[i]++;
+#if defined USING_MALI400_L2_CACHE
+ page_dir_updated = 1;
+#endif
+ continue; /* continue loop */
+ }
+
+ MALI_DEBUG_PRINT(1, ("Page table alloc failed\n"));
+ break; /* abort loop, failed to allocate one or more page tables */
+ }
+ else
+ {
+ session_data->page_entries_usage_count[i]++;
+ }
+ }
+
+ if (i <= last_pde_idx)
+ {
+ /* one or more pages could not be allocated, release reference count for the ones we added one for */
+ /* adjust for the one which caused the for loop to be aborted */
+ i--;
+
+ while (i >= first_pde_idx)
+ {
+ MALI_DEBUG_ASSERT(0 != session_data->page_entries_usage_count[i]);
+ session_data->page_entries_usage_count[i]--;
+ if (0 == session_data->page_entries_usage_count[i])
+ {
+ /* last reference removed */
+ mali_mmu_release_table_page(MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32))));
+ session_data->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0); /* mark as not present in the page directory */
+ }
+ i--;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus && 1 == page_dir_updated)
+ {
+ /*
+ * We have updated the page directory and have an active MMU using it, so invalidate it in the Mali L2 cache.
+ */
+ mali_kernel_l2_cache_invalidate_page(session_data->page_directory);
+ }
+#endif
+
+ /* all OK */
+ MALI_SUCCESS;
+}
+
+static void mali_address_manager_release(mali_memory_allocation * descriptor)
+{
+ int first_pde_idx;
+ int last_pde_idx;
+ memory_session * session_data;
+ u32 mali_address;
+ u32 mali_address_end;
+ u32 left;
+ int i;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+ int page_dir_updated = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_directory_mapped);
+
+ mali_address = descriptor->mali_address;
+ mali_address_end = descriptor->mali_address + descriptor->size;
+ left = descriptor->size;
+
+ first_pde_idx = MALI_MMU_PDE_ENTRY(mali_address);
+ last_pde_idx = MALI_MMU_PDE_ENTRY(mali_address_end - 1);
+
+ MALI_DEBUG_PRINT(3, ("Zapping Mali MMU table for address 0x%08X size 0x%08X\n", mali_address, left));
+ MALI_DEBUG_PRINT(4, ("Zapping PDE %d through %d\n", first_pde_idx, last_pde_idx));
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page tables
+ * from the L2 cache to ensure that the memory is unmapped.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ int size_inside_pte = left < 0x400000 ? left : 0x400000;
+ const int first_pte_idx = MALI_MMU_PTE_ENTRY(mali_address);
+ int last_pte_idx = MALI_MMU_PTE_ENTRY(mali_address + size_inside_pte - 1);
+
+ if (last_pte_idx < first_pte_idx)
+ {
+ /* The last_pte_idx is into the next PTE, crop it to fit into this */
+ last_pte_idx = 1023; /* 1024 PTE entries, so 1023 is the last one */
+ size_inside_pte = MALI_MMU_ADDRESS(i + 1, 0) - mali_address;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != session_data->page_entries_usage_count[i]);
+ MALI_DEBUG_PRINT(4, ("PDE %d: zapping entries %d through %d, address 0x%08X, size 0x%08X, left 0x%08X (page table at 0x%08X)\n",
+ i, first_pte_idx, last_pte_idx, mali_address, size_inside_pte, left,
+ MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)))));
+
+ session_data->page_entries_usage_count[i]--;
+
+ if (0 == session_data->page_entries_usage_count[i])
+ {
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+ mali_mmu_release_table_page(MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32))));
+ session_data->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32(session_data->page_directory_mapped, i * sizeof(u32), 0); /* mark as not present in the page directory */
+#if defined USING_MALI400_L2_CACHE
+ page_dir_updated = 1;
+#endif
+ }
+ else
+ {
+ int j;
+
+ for (j = first_pte_idx; j <= last_pte_idx; j++)
+ {
+ _mali_osk_mem_iowrite32(session_data->page_entries_mapped[i], j * sizeof(u32), 0);
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus)
+ {
+ /* Invalidate the page we've just modified */
+ mali_kernel_l2_cache_invalidate_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ }
+#endif
+ }
+ left -= size_inside_pte;
+ mali_address += size_inside_pte;
+ }
+
+#if defined USING_MALI400_L2_CACHE
+ if ((1 == page_dir_updated) && (1== has_active_mmus))
+ {
+ /* The page directory was also updated */
+ mali_kernel_l2_cache_invalidate_page(session_data->page_directory);
+ }
+#endif
+}
+
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size)
+{
+ memory_session * session_data;
+ u32 mali_address;
+ u32 mali_address_end;
+ u32 current_phys_addr;
+#if defined USING_MALI400_L2_CACHE
+ int has_active_mmus = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ current_phys_addr = *phys_addr;
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ mali_address = descriptor->mali_address + offset;
+ mali_address_end = descriptor->mali_address + offset + size;
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == _mali_osk_list_empty(&session_data->active_mmus))
+ {
+ /*
+ * We have active MMUs, so we are probably in the process of alocating more memory for a suspended GP job (PLBU heap)
+ * From Mali-400 MP r1p0, MMU page directory/tables are also cached by the Mali L2 cache, thus we need to invalidate the page tables
+ * from the L2 cache when we have allocated more heap memory.
+ * We only need to do this when we have an active MMU, because we otherwise invalidate the entire Mali L2 cache before at job start
+ */
+ has_active_mmus = 1;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(6, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", current_phys_addr, mali_address, size));
+
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped);
+
+ for ( ; mali_address < mali_address_end; mali_address += MALI_MMU_PAGE_SIZE, current_phys_addr += MALI_MMU_PAGE_SIZE)
+ {
+ MALI_DEBUG_ASSERT_POINTER(session_data->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32_relaxed(session_data->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)], MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32), current_phys_addr | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ }
+ _mali_osk_write_mem_barrier();
+
+#if defined USING_MALI400_L2_CACHE
+ if (1 == has_active_mmus)
+ {
+ int i;
+ const int first_pde_idx = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde_idx = MALI_MMU_PDE_ENTRY(mali_address_end - 1);
+
+ /*
+ * Invalidate the updated page table(s), incase they have been used for something
+ * else since last job start (invalidation of entire Mali L2 cache)
+ */
+ for (i = first_pde_idx; i <= last_pde_idx; i++)
+ {
+ mali_kernel_l2_cache_invalidate_page( _mali_osk_mem_ioread32(session_data->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ }
+ }
+#endif
+
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_mmap for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ struct mali_session_data * mali_session_data;
+ mali_memory_allocation * descriptor;
+ memory_session * session_data;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ mali_session_data = (struct mali_session_data *)args->ctx;
+
+ if (NULL == mali_session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: mali_session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ MALI_DEBUG_ASSERT( mali_subsystem_memory_id >= 0 );
+
+ session_data = mali_kernel_session_manager_slot_get(mali_session_data, mali_subsystem_memory_id);
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+ descriptor->lock = session_data->lock;
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head))
+ {
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ /* no need to lock the MMU as we own it already */
+ MALI_DEBUG_PRINT(5, ("Zapping the cache of mmu %s as it's using the page table we have updated\n", mmu->description));
+
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_mmu_enable_stall(mmu); /* this might fail, but ZAP should work anyway... */
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_disable_stall(mmu);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* All ok, write out any information generated from this call */
+ args->mapping = descriptor->mapping;
+ args->cookie = (u32)descriptor;
+
+ MALI_DEBUG_PRINT(7, ("MMAP OK\n"));
+ /* All done */
+ MALI_SUCCESS;
+ }
+ else
+ {
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ /* OOM, but not a fatal error */
+ MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n"));
+ _mali_osk_free(descriptor);
+ /* Linux will free the CPU address allocation, userspace client the Mali address allocation */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+}
+
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args )
+{
+ memory_session * session_data;
+ mali_kernel_memory_mmu * mmu, * temp_mmu;
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ session_data = (memory_session*)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ /* Stall the MMU(s) which is using the address space we're operating on.
+ * Note that active_mmus must be sorted in order of ID to avoid a mutex
+ * ordering violation.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ u32 status;
+ status = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_STATUS);
+ if ( MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE == (status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) ) {
+ MALI_DEBUG_PRINT(2, ("Stopped stall attempt for mmu with id %d since it is in page fault mode.\n", mmu->id));
+ continue;
+ }
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+
+ /*
+ * If we're unable to stall, then make sure we tell our caller that,
+ * the caller should then release the session lock for a while,
+ * then this function again.
+ * This function will fail if we're in page fault mode, and to get
+ * out of page fault mode, the page fault handler must be able to
+ * take the session lock.
+ */
+ if (!mali_mmu_enable_stall(mmu))
+ {
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_BUSY;
+ }
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ _mali_osk_lock_wait(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ /* This function also removes the memory from the session's memory list */
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+
+ /* any L2 maintenance was done during mali_allocation_engine_release_memory */
+ /* the session is locked, so the active mmu list should be the same */
+ /* zap the TLB and resume operation */
+ _MALI_OSK_LIST_FOREACHENTRY(mmu, temp_mmu, &session_data->active_mmus, mali_kernel_memory_mmu, session_link)
+ {
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+ mali_mmu_disable_stall(mmu);
+
+ _mali_osk_lock_signal(mmu->lock, _MALI_OSK_LOCKMODE_RW);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Handler for unmapping memory for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+ _mali_osk_lock_t *descriptor_lock;
+ _mali_osk_errcode_t err;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ MALI_DEBUG_ASSERT_POINTER((memory_session*)descriptor->mali_addr_mapping_info);
+
+ descriptor_lock = descriptor->lock; /* should point to the session data lock... */
+
+ err = _MALI_OSK_ERR_BUSY;
+ while (err == _MALI_OSK_ERR_BUSY)
+ {
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+
+ err = _mali_ukk_mem_munmap_internal( args );
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+
+ if (err == _MALI_OSK_ERR_BUSY)
+ {
+ /*
+ * Reason for this;
+ * We where unable to stall the MMU, probably because we are in page fault handling.
+ * Sleep for a while with the session lock released, then try again.
+ * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+ */
+ _mali_osk_time_ubusydelay(10);
+ }
+ }
+
+ return err;
+}
+
+/* Is called when the rendercore wants the mmu to give an interrupt */
+static void mali_mmu_probe_irq_trigger(mali_kernel_memory_mmu * mmu)
+{
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_trigger\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_irq_acknowledge(mali_kernel_memory_mmu * mmu)
+{
+ u32 int_stat;
+
+ int_stat = mali_mmu_register_read(mmu, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ }
+ else MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_mmu_register_write(mmu, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+ else MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+
+ if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+ {
+ MALI_SUCCESS;
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+struct dump_info
+{
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char * comment, struct dump_info * info, int dump_to_serial)
+{
+ if (dump_to_serial) MALI_DEBUG_PRINT(1, ("writereg %08X %08X # %s\n", where, what, comment));
+
+ if (NULL != info)
+ {
+ info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+ if (NULL != info->buffer)
+ {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32)*2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info, int dump_to_serial)
+{
+ if (dump_to_serial)
+ {
+ int i;
+ for (i = 0; i < 256; i++)
+ {
+ MALI_DEBUG_PRINT(1, ("%08X: %08X %08X %08X %08X\n", phys_addr + 16*i, _mali_osk_mem_ioread32(page, (i*4 + 0) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 1) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 2) * sizeof(u32)),
+ _mali_osk_mem_ioread32(page, (i*4 + 3) * sizeof(u32))));
+
+ }
+ }
+
+ if (NULL != info)
+ {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer)
+ {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(memory_session * session_data, struct dump_info * info)
+{
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != session_data->page_directory_mapped)
+ {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ dump_page(session_data->page_directory_mapped, session_data->page_directory, info, 0)
+ );
+
+ for (i = 0; i < 1024; i++)
+ {
+ if (NULL != session_data->page_entries_mapped[i])
+ {
+ MALI_CHECK_NO_ERROR(
+ dump_page(session_data->page_entries_mapped[i], _mali_osk_mem_ioread32(session_data->page_directory_mapped, i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info, 0)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(memory_session * session_data, struct dump_info * info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, session_data->page_directory, "set the page directory address", info, 0));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info, 0));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info, 0));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ memory_session * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (memory_session *)mali_kernel_session_manager_slot_get(args->ctx, mali_subsystem_memory_id);
+
+ info.buffer_left = args->size;
+ info.buffer = args->buffer;
+
+ args->register_writes = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data, &info));
+
+ args->page_table_dump = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/**
+ * Stub function to satisfy UDD interface exclusion requirement.
+ * This is because the Base code compiles in \b both MMU and non-MMU calls,
+ * so both sets must be declared (but the 'unused' set may be stub)
+ */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args )
+{
+ MALI_IGNORE( args );
+ return _MALI_OSK_ERR_FAULT;
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+ return mali_allocation_engine_memory_usage(physical_memory_allocators);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_mem_mmu.h b/drivers/media/video/samsung/mali/common/mali_kernel_mem_mmu.h
new file mode 100644
index 0000000..6a110d0
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_mem_mmu.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_MMU_H__
+#define __MALI_KERNEL_MEM_MMU_H__
+
+#include "mali_kernel_session_manager.h"
+
+/**
+ * Lookup a MMU core by ID.
+ * @param id ID of the MMU to find
+ * @return NULL if ID not found or valid, non-NULL if a core was found.
+ */
+void* mali_memory_core_mmu_lookup(u32 id);
+
+/**
+ * Set the core pointer of MMU to core owner of MMU
+ *
+ * @param core Core holding this MMU
+ * @param mmu_ptr The MMU whose core pointer needs set to core holding the MMU
+ *
+ */
+void mali_memory_core_mmu_owner(void *core, void *mmu_ptr);
+
+/**
+ * Activate a user session with its address space on the given MMU.
+ * If the session can't be activated due to that the MMU is busy and
+ * a callback pointer is given, the callback will be called once the MMU becomes idle.
+ * If the same callback pointer is registered multiple time it will only be called once.
+ * Nested activations are supported.
+ * Each call must be matched by a call to @see mali_memory_core_mmu_release_address_space_reference
+ *
+ * @param mmu The MMU to activate the address space on
+ * @param mali_session_data The user session object which address space to activate
+ * @param callback Pointer to the function to call when the MMU becomes idle
+ * @param callback_arg Argument given to the callback
+ * @return 0 if the address space was activated, -EBUSY if the MMU was busy, -EFAULT in all other cases.
+ */
+int mali_memory_core_mmu_activate_page_table(void* mmu_ptr, struct mali_session_data * mali_session_data, void(*callback)(void*), void * callback_argument);
+
+/**
+ * Release a reference to the current active address space.
+ * Once the last reference is released any callback(s) registered will be called before the function returns
+ *
+ * @note Caution must be shown calling this function with locks held due to that callback can be called
+ * @param mmu The mmu to release a reference to the active address space of
+ */
+void mali_memory_core_mmu_release_address_space_reference(void* mmu);
+
+/**
+ * Soft reset of MMU - needed after power up
+ *
+ * @param mmu_ptr The MMU pointer registered with the relevant core
+ */
+void mali_kernel_mmu_reset(void * mmu_ptr);
+
+void mali_kernel_mmu_force_bus_reset(void * mmu_ptr);
+
+/**
+ * Unregister a previously registered callback.
+ * @param mmu The MMU to unregister the callback on
+ * @param callback The function to unregister
+ */
+void mali_memory_core_mmu_unregister_callback(void* mmu, void(*callback)(void*));
+
+
+
+#endif /* __MALI_KERNEL_MEM_MMU_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.c b/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.c
new file mode 100644
index 0000000..324fcab
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+
+typedef struct os_allocation
+{
+ u32 num_pages;
+ u32 offset_start;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+} os_allocation;
+
+typedef struct os_allocator
+{
+ _mali_osk_lock_t *mutex;
+
+ /**
+ * Maximum number of pages to allocate from the OS
+ */
+ u32 num_pages_max;
+
+ /**
+ * Number of pages allocated from the OS
+ */
+ u32 num_pages_allocated;
+
+ /** CPU Usage adjustment (add to mali physical address to get cpu physical address) */
+ u32 cpu_usage_adjust;
+} os_allocator;
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void os_allocator_release(void * ctx, void * handle);
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block );
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ os_allocator * info;
+
+ max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1);
+
+ MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust));
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(os_allocator));
+ if (NULL != info)
+ {
+ info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE;
+ info->num_pages_allocated = 0;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, 106);
+ if (NULL != info->mutex)
+ {
+ allocator->allocate = os_allocator_allocate;
+ allocator->allocate_page_table_block = os_allocator_allocate_page_table_block;
+ allocator->destroy = os_allocator_destroy;
+ allocator->stat = os_allocator_stat;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ info = (os_allocator*)allocator->ctx;
+ return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
+
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (os_allocator*)allocator->ctx;
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ u32 left;
+ os_allocator * info;
+ os_allocation * allocation;
+ int pages_allocated = 0;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size - *offset;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /** @note this code may not work on Linux, or may require a more complex Linux implementation */
+ allocation = _mali_osk_malloc(sizeof(os_allocation));
+ if (NULL != allocation)
+ {
+ u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
+ allocation->offset_start = *offset;
+ allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
+ MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max) && _mali_osk_mem_check_allocated(os_mem_max_usage))
+ {
+ err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ if ( _MALI_OSK_ERR_NOMEM == err)
+ {
+ /* 'Partial' allocation (or, out-of-memory on first page) */
+ break;
+ }
+
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+
+ /* Fatal error, cleanup any previous pages allocated. */
+ if ( pages_allocated > 0 )
+ {
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+ /* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
+ }
+
+ pages_allocated = 0;
+
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ break;
+ }
+
+ /* Loop iteration */
+ if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
+ else left -= _MALI_OSK_CPU_PAGE_SIZE;
+
+ pages_allocated++;
+
+ *offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+
+ if (left) MALI_PRINT(("Out of memory. Mali memory allocated: %d kB Configured maximum OS memory usage: %d kB\n",
+ (info->num_pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));
+
+ /* Loop termination; decide on result */
+ if (pages_allocated)
+ {
+ MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+ _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
+ allocation->num_pages = pages_allocated;
+ allocation->engine = engine; /* Necessary to make the engine's unmap call */
+ allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
+ info->num_pages_allocated += pages_allocated;
+
+ MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ alloc_info->ctx = info;
+ alloc_info->handle = allocation;
+ alloc_info->release = os_allocator_release;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
+ _mali_osk_free( allocation );
+ }
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+static void os_allocator_release(void * ctx, void * handle)
+{
+ os_allocator * info;
+ os_allocation * allocation;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (os_allocator*)ctx;
+ allocation = (os_allocation*)handle;
+ engine = allocation->engine;
+ descriptor = allocation->descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER( engine );
+ MALI_DEBUG_ASSERT_POINTER( descriptor );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("Releasing %d os pages\n", allocation->num_pages));
+
+ MALI_DEBUG_ASSERT( allocation->num_pages <= info->num_pages_allocated);
+ info->num_pages_allocated -= allocation->num_pages;
+
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*allocation->num_pages, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free(allocation);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ int allocation_order = 11; /* _MALI_OSK_CPU_PAGE_SIZE << 6 */
+ void *virt = NULL;
+ u32 size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ os_allocator * info;
+
+ u32 cpu_phys_base;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ info = (os_allocator*)ctx;
+
+ /* Ensure we don't allocate more than we're supposed to from the ctx */
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /* if the number of pages to be requested lead to exceeding the memory
+ * limit in info->num_pages_max, reduce the size that is to be requested. */
+ while ( (info->num_pages_allocated + (1 << allocation_order) > info->num_pages_max)
+ && _mali_osk_mem_check_allocated(info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE) )
+ {
+ if ( allocation_order > 0 ) {
+ --allocation_order;
+ } else {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+ }
+
+ /* try to allocate 2^(allocation_order) pages, if that fails, try
+ * allocation_order-1 to allocation_order 0 (inclusive) */
+ while ( allocation_order >= 0 )
+ {
+ size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ virt = _mali_osk_mem_allocioregion( &cpu_phys_base, size );
+
+ if (NULL != virt) break;
+
+ --allocation_order;
+ }
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate consistent memory. Is CONSISTENT_DMA_SIZE set too low?\n"));
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ MALI_DEBUG_PRINT(5, ("os_allocator_allocate_page_table_block: Allocation of order %i succeeded\n",
+ allocation_order));
+
+ /* we now know the size of the allocation since we know for what
+ * allocation_order the allocation succeeded */
+ size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+
+
+ block->release = os_allocator_page_table_block_release;
+ block->ctx = ctx;
+ block->handle = (void*)allocation_order;
+ block->size = size;
+ block->phys_base = cpu_phys_base - info->cpu_usage_adjust;
+ block->mapping = virt;
+
+ info->num_pages_allocated += (1 << allocation_order);
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block )
+{
+ os_allocator * info;
+ u32 allocation_order;
+ u32 pages_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (os_allocator*)page_table_block->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER( info );
+
+ allocation_order = (u32)page_table_block->handle;
+
+ pages_allocated = 1 << allocation_order;
+
+ MALI_DEBUG_ASSERT( pages_allocated * _MALI_OSK_CPU_PAGE_SIZE == page_table_block->size );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_ASSERT( pages_allocated <= info->num_pages_allocated);
+ info->num_pages_allocated -= pages_allocated;
+
+ /* Adjust phys_base from mali physical address to CPU physical address */
+ _mali_osk_mem_freeioregion( page_table_block->phys_base + info->cpu_usage_adjust, page_table_block->size, page_table_block->mapping );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.h b/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.h
new file mode 100644
index 0000000..0946169
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_OS_H__
+#define __MALI_KERNEL_MEM_OS_H__
+
+/**
+ * @brief Creates an object that manages allocating OS memory
+ *
+ * Creates an object that provides an interface to allocate OS memory and
+ * have it mapped into the Mali virtual memory space.
+ *
+ * The object exposes pointers to
+ * - allocate OS memory
+ * - allocate Mali page tables in OS memory
+ * - destroy the object
+ *
+ * Allocations from OS memory are of type mali_physical_memory_allocation
+ * which provides a function to release the allocation.
+ *
+ * @param max_allocation max. number of bytes that can be allocated from OS memory
+ * @param cpu_usage_adjust value to add to mali physical addresses to obtain CPU physical addresses
+ * @param name description of the allocator
+ * @return pointer to mali_physical_memory_allocator object. NULL on failure.
+ **/
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name);
+
+#endif /* __MALI_KERNEL_MEM_OS_H__ */
+
+
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c b/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c
new file mode 100644
index 0000000..ff105a4
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+typedef struct memory_engine
+{
+ mali_kernel_mem_address_manager * mali_address;
+ mali_kernel_mem_address_manager * process_address;
+} memory_engine;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
+{
+ memory_engine * engine;
+
+ /* Mali Address Manager need not support unmap_physical */
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
+
+ /* Process Address Manager must support unmap_physical for OS allocation
+ * error path handling */
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
+
+
+ engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
+ if (NULL == engine) return NULL;
+
+ engine->mali_address = mali_address_manager;
+ engine->process_address = process_address_manager;
+
+ return (mali_allocation_engine)engine;
+}
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine)
+{
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ _mali_osk_free(engine);
+}
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_allocators);
+ /* ASSERT that the list member has been initialized, even if it won't be
+ * used for tracking. We need it to be initialized to see if we need to
+ * delete it from a list in the release function. */
+ MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
+
+ if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
+ {
+ _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ res = engine->process_address->allocate(descriptor);
+ }
+ if ( _MALI_OSK_ERR_OK == res )
+ {
+ /* address space setup OK, commit physical memory to the allocation */
+ mali_physical_memory_allocator * active_allocator = physical_allocators;
+ struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
+ u32 offset = 0;
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ if ( NULL != tracking_list )
+ {
+ /* Insert into the memory session list */
+ /* ASSERT that it is not already part of a list */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
+ _mali_osk_list_add( &descriptor->list, tracking_list );
+ }
+
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* reuse current active_allocation_tracker */
+ MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ if (NULL != active_allocator->next)
+ {
+ /* need a new allocation tracker */
+ active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
+ if (NULL != active_allocation_tracker->next)
+ {
+ active_allocation_tracker = active_allocation_tracker->next;
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));
+
+ /* allocation failure, start cleanup */
+ /* loop over any potential partial allocations */
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ /* handle blank trackers which will show up during failure */
+ if (NULL != active_allocation_tracker->release)
+ {
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ }
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ /* release the address spaces */
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+ }
+ engine->mali_address->release(descriptor);
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+ mali_physical_memory_allocation * active_allocation_tracker;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* Determine whether we need to remove this from a tracking list */
+ if ( ! _mali_osk_list_empty( &descriptor->list ) )
+ {
+ _mali_osk_list_del( &descriptor->list );
+ /* Clear the list for debug mode, catch use-after-free */
+ MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
+ }
+
+ engine->mali_address->release(descriptor);
+
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
+{
+ _mali_osk_errcode_t err;
+ memory_engine * engine = (memory_engine*)mem_engine;
+ _mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
+
+ /* Handle process address manager first, because we may need them to
+ * allocate the physical page */
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Handle OS-allocated specially, since an adjustment may be required */
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
+ {
+ MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
+
+ /* Set flags to use on error path */
+ unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
+
+ err = engine->process_address->map_physical(descriptor, offset, &phys, size);
+ /* Adjust for cpu physical address to mali physical address */
+ phys -= cpu_usage_adjust;
+ }
+ else
+ {
+ u32 cpu_phys;
+ /* Adjust mali physical address to cpu physical address */
+ cpu_phys = phys + cpu_usage_adjust;
+ err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
+ }
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_ERROR( err );
+ }
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
+
+ /* Mali address manager must use the physical address - no point in asking
+ * it to allocate another one for us */
+ MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
+
+ err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
+ engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
+ }
+
+ MALI_ERROR( err );
+ }
+
+ MALI_SUCCESS;
+}
+
+void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->process_address);
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Mandetory for process_address manager to have an unmap function*/
+ engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+
+ /* Optional for mali_address manager to have an unmap function*/
+ if ( NULL != engine->mali_address->unmap_physical )
+ {
+ engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* try next */
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+
+void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
+ while ( NULL != active_allocator )
+ {
+ if ( NULL != active_allocator->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
+ }
+ active_allocator = active_allocator->next;
+ }
+
+}
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator)
+{
+ u32 sum = 0;
+ while(NULL != allocator)
+ {
+ /* Only count allocators that have set up a stat function. */
+ if(allocator->stat)
+ sum += allocator->stat(allocator);
+
+ allocator = allocator->next;
+ }
+
+ return sum;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.h b/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.h
new file mode 100644
index 0000000..0173c78
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEMORY_ENGINE_H__
+#define __MALI_KERNEL_MEMORY_ENGINE_H__
+
+typedef void * mali_allocation_engine;
+
+typedef enum { MALI_MEM_ALLOC_FINISHED, MALI_MEM_ALLOC_PARTIAL, MALI_MEM_ALLOC_NONE, MALI_MEM_ALLOC_INTERNAL_FAILURE } mali_physical_memory_allocation_result;
+
+typedef struct mali_physical_memory_allocation
+{
+ void (*release)(void * ctx, void * handle); /**< Function to call on to release the physical memory */
+ void * ctx;
+ void * handle;
+ struct mali_physical_memory_allocation * next;
+} mali_physical_memory_allocation;
+
+struct mali_page_table_block;
+
+typedef struct mali_page_table_block
+{
+ void (*release)(struct mali_page_table_block *page_table_block);
+ void * ctx;
+ void * handle;
+ u32 size; /**< In bytes, should be a multiple of MALI_MMU_PAGE_SIZE to avoid internal fragementation */
+ u32 phys_base; /**< Mali physical address */
+ mali_io_address mapping;
+} mali_page_table_block;
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+typedef enum
+{
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE = 0x1,
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE = 0x2,
+} mali_memory_allocation_flag;
+
+/**
+ * Supplying this 'magic' physical address requests that the OS allocate the
+ * physical address at page commit time, rather than committing a specific page
+ */
+#define MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC ((u32)(-1))
+
+typedef struct mali_memory_allocation
+{
+ /* Information about the allocation */
+ void * mapping; /**< CPU virtual address where the memory is mapped at */
+ u32 mali_address; /**< The Mali seen address of the memory allocation */
+ u32 size; /**< Size of the allocation */
+ u32 permission; /**< Permission settings */
+ mali_memory_allocation_flag flags;
+
+ _mali_osk_lock_t * lock;
+
+ /* Manager specific information pointers */
+ void * mali_addr_mapping_info; /**< Mali address allocation specific info */
+ void * process_addr_mapping_info; /**< Mapping manager specific info */
+
+ mali_physical_memory_allocation physical_allocation;
+
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+} mali_memory_allocation;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+typedef struct mali_physical_memory_allocator
+{
+ mali_physical_memory_allocation_result (*allocate)(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+ mali_physical_memory_allocation_result (*allocate_page_table_block)(void * ctx, mali_page_table_block * block); /* MALI_MEM_ALLOC_PARTIAL not allowed */
+ void (*destroy)(struct mali_physical_memory_allocator * allocator);
+ u32 (*stat)(struct mali_physical_memory_allocator * allocator);
+ void * ctx;
+ const char * name; /**< Descriptive name for use in mali_allocation_engine_report_allocators, or NULL */
+ u32 alloc_order; /**< Order in which the allocations should happen */
+ struct mali_physical_memory_allocator * next;
+} mali_physical_memory_allocator;
+
+typedef struct mali_kernel_mem_address_manager
+{
+ _mali_osk_errcode_t (*allocate)(mali_memory_allocation *); /**< Function to call to reserve an address */
+ void (*release)(mali_memory_allocation *); /**< Function to call to free the address allocated */
+
+ /**
+ * Function called for each physical sub allocation.
+ * Called for each physical block allocated by the physical memory manager.
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in,out] phys_addr A pointer to the physical address of the start of the
+ * physical block. When *phys_addr == MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC
+ * is used, this requests the function to allocate the physical page
+ * itself, and return it through the pointer provided.
+ * @param[in] size Length in bytes of the physical block
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ * Specifically, _MALI_OSK_ERR_UNSUPPORTED indicates that the function
+ * does not support allocating physical pages itself.
+ */
+ _mali_osk_errcode_t (*map_physical)(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+
+ /**
+ * Function called to remove a physical sub allocation.
+ * Called on error paths where one of the address managers fails.
+ *
+ * @note this is optional. For address managers where this is not
+ * implemented, the value of this member is NULL. The memory engine
+ * currently does not require the mali address manager to be able to
+ * unmap individual pages, but the process address manager must have this
+ * capability.
+ *
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in] size Length in bytes of the physical block
+ * @param[in] flags flags to use on a per-page basis. For OS-allocated
+ * physical pages, this must include _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR.
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ */
+ void (*unmap_physical)(mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags);
+
+} mali_kernel_mem_address_manager;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager);
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine);
+
+int mali_allocation_engine_allocate_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_provider, _mali_osk_list_t *tracking_list );
+void mali_allocation_engine_release_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+int mali_allocation_engine_map_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size);
+void mali_allocation_engine_unmap_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags);
+
+int mali_allocation_engine_allocate_page_tables(mali_allocation_engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider);
+
+void mali_allocation_engine_report_allocators(mali_physical_memory_allocator * physical_provider);
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator);
+
+#endif /* __MALI_KERNEL_MEMORY_ENGINE_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_pp.h b/drivers/media/video/samsung/mali/common/mali_kernel_pp.h
new file mode 100644
index 0000000..8cf7bf7
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_pp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PP_H__
+#define __MALI_KERNEL_PP_H__
+
+extern struct mali_kernel_subsystem mali_subsystem_mali200;
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t malipp_signal_power_up( u32 core_num, mali_bool queue_only );
+_mali_osk_errcode_t malipp_signal_power_down( u32 core_num, mali_bool immediate_only );
+#endif
+
+#endif /* __MALI_KERNEL_PP_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_profiling.c b/drivers/media/video/samsung/mali/common/mali_kernel_profiling.c
new file mode 100644
index 0000000..ca04b5f
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_profiling.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_timestamp.h"
+#include "mali_kernel_profiling.h"
+#include "mali_linux_trace.h"
+
+typedef struct mali_profiling_entry
+{
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+} mali_profiling_entry;
+
+
+typedef enum mali_profiling_state
+{
+ MALI_PROFILING_STATE_UNINITIALIZED,
+ MALI_PROFILING_STATE_IDLE,
+ MALI_PROFILING_STATE_RUNNING,
+ MALI_PROFILING_STATE_RETURN,
+} mali_profiling_state;
+
+
+static _mali_osk_lock_t *lock = NULL;
+static mali_profiling_state prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+static mali_profiling_entry* profile_entries = NULL;
+static u32 profile_entry_count = 0;
+static _mali_osk_atomic_t profile_insert_index;
+static _mali_osk_atomic_t profile_entries_written;
+static mali_bool mali_profiling_default_enable = MALI_FALSE;
+
+_mali_osk_errcode_t _mali_profiling_init(mali_bool auto_start)
+{
+ profile_entries = NULL;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+
+ lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 );
+ if (NULL == lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+
+ if (MALI_TRUE == auto_start)
+ {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* Use maximum buffer size */
+
+ mali_profiling_default_enable = MALI_TRUE; /* save this so user space can query this on their startup */
+ if (_MALI_OSK_ERR_OK != _mali_profiling_start(&limit))
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_profiling_term(void)
+{
+ prof_state = MALI_PROFILING_STATE_UNINITIALIZED;
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ if (NULL != profile_entries)
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ if (NULL != lock)
+ {
+ _mali_osk_lock_term(lock);
+ lock = NULL;
+ }
+}
+
+inline _mali_osk_errcode_t _mali_profiling_start(u32 * limit)
+{
+ _mali_osk_errcode_t ret;
+
+ mali_profiling_entry *new_profile_entries = _mali_osk_valloc(*limit * sizeof(mali_profiling_entry));
+
+ if(NULL == new_profile_entries)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_IDLE)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_vfree(new_profile_entries);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (*limit > MALI_PROFILING_MAX_BUFFER_ENTRIES)
+ {
+ *limit = MALI_PROFILING_MAX_BUFFER_ENTRIES;
+ }
+
+ profile_entries = new_profile_entries;
+ profile_entry_count = *limit;
+
+ ret = _mali_timestamp_reset();
+
+ if (ret == _MALI_OSK_ERR_OK)
+ {
+ prof_state = MALI_PROFILING_STATE_RUNNING;
+ }
+ else
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+inline void _mali_profiling_add_counter(u32 event_id, u32 data0)
+{
+#if MALI_TRACEPOINTS_ENABLED
+ _mali_osk_profiling_add_counter(event_id, data0);
+#endif
+}
+
+inline _mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4)
+{
+ u32 cur_index = _mali_osk_atomic_inc_return(&profile_insert_index) - 1;
+
+#if MALI_TRACEPOINTS_ENABLED
+ _mali_osk_profiling_add_event(event_id, data0);
+#endif
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING || cur_index >= profile_entry_count)
+ {
+ /*
+ * Not in recording mode, or buffer is full
+ * Decrement index again, and early out
+ */
+ _mali_osk_atomic_dec(&profile_insert_index);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ profile_entries[cur_index].timestamp = _mali_timestamp_get();
+ profile_entries[cur_index].event_id = event_id;
+ profile_entries[cur_index].data[0] = data0;
+ profile_entries[cur_index].data[1] = data1;
+ profile_entries[cur_index].data[2] = data2;
+ profile_entries[cur_index].data[3] = data3;
+ profile_entries[cur_index].data[4] = data4;
+
+ _mali_osk_atomic_inc(&profile_entries_written);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+#if MALI_TRACEPOINTS_ENABLED
+/*
+ * The following code uses a bunch of magic numbers taken from the userspace
+ * side of the DDK; they are re-used here verbatim. They are taken from the
+ * file mali_instrumented_counter_types.h.
+ */
+#define MALI_GLES_COUNTER_OFFSET 1000
+#define MALI_VG_COUNTER_OFFSET 2000
+#define MALI_EGL_COUNTER_OFFSET 3000
+#define MALI_SHARED_COUNTER_OFFSET 4000
+
+/* These offsets are derived from the gator driver; see gator_events_mali.c. */
+#define GATOR_EGL_COUNTER_OFFSET 17
+#define GATOR_GLES_COUNTER_OFFSET 18
+
+_mali_osk_errcode_t _mali_ukk_transfer_sw_counters(_mali_uk_sw_counters_s *args)
+{
+ /* Convert the DDK counter ID to what gator expects */
+ unsigned int gator_counter_value = 0;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (args->id >= MALI_EGL_COUNTER_OFFSET && args->id <= MALI_SHARED_COUNTER_OFFSET)
+ {
+ gator_counter_value = (args->id - MALI_EGL_COUNTER_OFFSET) + GATOR_EGL_COUNTER_OFFSET;
+ }
+ else if (args->id >= MALI_GLES_COUNTER_OFFSET && args->id <= MALI_VG_COUNTER_OFFSET)
+ {
+ gator_counter_value = (args->id - MALI_GLES_COUNTER_OFFSET) + GATOR_GLES_COUNTER_OFFSET;
+ }
+ else
+ {
+ /* Pass it straight through; gator will ignore it anyway. */
+ gator_counter_value = args->id;
+ }
+
+ trace_mali_sw_counter(gator_counter_value, args->value);
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ return _MALI_OSK_ERR_OK;
+}
+#endif
+
+inline _mali_osk_errcode_t _mali_profiling_stop(u32 * count)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RUNNING)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ /* go into return state (user to retreive events), no more events will be added after this */
+ prof_state = MALI_PROFILING_STATE_RETURN;
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* wait for all elements to be completely inserted into array */
+ while (_mali_osk_atomic_read(&profile_insert_index) != _mali_osk_atomic_read(&profile_entries_written))
+ {
+ /* do nothing */;
+ }
+
+ *count = _mali_osk_atomic_read(&profile_insert_index);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+inline u32 _mali_profiling_get_count(void)
+{
+ u32 retval = 0;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if (prof_state == MALI_PROFILING_STATE_RETURN)
+ {
+ retval = _mali_osk_atomic_read(&profile_entries_written);
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+
+ return retval;
+}
+
+inline _mali_osk_errcode_t _mali_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ if (index >= _mali_osk_atomic_read(&profile_entries_written))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ *timestamp = profile_entries[index].timestamp;
+ *event_id = profile_entries[index].event_id;
+ data[0] = profile_entries[index].data[0];
+ data[1] = profile_entries[index].data[1];
+ data[2] = profile_entries[index].data[2];
+ data[3] = profile_entries[index].data[3];
+ data[4] = profile_entries[index].data[4];
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+inline _mali_osk_errcode_t _mali_profiling_clear(void)
+{
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (prof_state != MALI_PROFILING_STATE_RETURN)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_INVALID_ARGS; /* invalid to call this function in this state */
+ }
+
+ prof_state = MALI_PROFILING_STATE_IDLE;
+ profile_entry_count = 0;
+ _mali_osk_atomic_init(&profile_insert_index, 0);
+ _mali_osk_atomic_init(&profile_entries_written, 0);
+ if (NULL != profile_entries)
+ {
+ _mali_osk_vfree(profile_entries);
+ profile_entries = NULL;
+ }
+
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return _MALI_OSK_ERR_OK;
+}
+
+mali_bool _mali_profiling_is_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RUNNING ? MALI_TRUE : MALI_FALSE;
+}
+
+mali_bool _mali_profiling_have_recording(void)
+{
+ return prof_state == MALI_PROFILING_STATE_RETURN ? MALI_TRUE : MALI_FALSE;
+}
+
+void _mali_profiling_set_default_enable_state(mali_bool enable)
+{
+ mali_profiling_default_enable = enable;
+}
+
+mali_bool _mali_profiling_get_default_enable_state(void)
+{
+ return mali_profiling_default_enable;
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
+{
+ return _mali_profiling_start(&args->limit);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
+{
+ /* Always add process and thread identificator in the first two data elements for events from user space */
+ return _mali_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
+{
+ return _mali_profiling_stop(&args->count);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
+{
+ return _mali_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
+{
+ return _mali_profiling_clear();
+}
+
+_mali_osk_errcode_t _mali_ukk_profiling_get_config(_mali_uk_profiling_get_config_s *args)
+{
+ args->enable_events = mali_profiling_default_enable;
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_profiling.h b/drivers/media/video/samsung/mali/common/mali_kernel_profiling.h
new file mode 100644
index 0000000..eb1e6c2
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_profiling.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PROFILING_H__
+#define __MALI_KERNEL_PROFILING_H__
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+
+#include "cinstr/mali_cinstr_profiling_events_m200.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_profiling_term(void);
+
+/** Add a counter event
+ * @param event_id - Magic counter id
+ * @param data0 - Value of counter
+ */
+void _mali_profiling_add_counter(u32 event_id, u32 data0);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_start(u32 * limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_stop(u32 * count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */_mali_osk_errcode_t _mali_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_profiling_have_recording(void);
+
+/**
+ * Enable or disable profiling events as default for new sessions (applications)
+ *
+ * @param enable MALI_TRUE if profiling events should be turned on, otherwise MALI_FALSE
+ */
+void _mali_profiling_set_default_enable_state(mali_bool enable);
+
+/**
+ * Get current default enable state for new sessions (applications)
+ *
+ * @return MALI_TRUE if profiling events should be turned on, otherwise MALI_FALSE
+ */
+mali_bool _mali_profiling_get_default_enable_state(void);
+
+#endif /* MALI_TIMELINE_PROFILING_ENABLED */
+
+#endif /* __MALI_KERNEL_PROFILING_H__ */
+
+
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_rendercore.c b/drivers/media/video/samsung/mali/common/mali_kernel_rendercore.c
new file mode 100644
index 0000000..cfc5ec1
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_rendercore.c
@@ -0,0 +1,2031 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_rendercore.h"
+#include "mali_osk_list.h"
+#if MALI_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+#if USING_MMU
+#include "mali_kernel_mem_mmu.h"
+#endif /* USING_MMU */
+#if defined USING_MALI400_L2_CACHE
+#include "mali_kernel_l2_cache.h"
+#endif /* USING_MALI400_L2_CACHE */
+
+#define HANG_CHECK_MSECS_MIN 100
+#define HANG_CHECK_MSECS_MAX 2000 /* 2 secs */
+#define HANG_CHECK_MSECS_DEFAULT 500 /* 500 ms */
+
+#define WATCHDOG_MSECS_MIN (10*HANG_CHECK_MSECS_MIN)
+#define WATCHDOG_MSECS_MAX 3600000 /* 1 hour */
+#define WATCHDOG_MSECS_DEFAULT 4000 /* 4 secs */
+
+/* max value that will be converted from jiffies to micro seconds and written to job->render_time_usecs */
+#define JOB_MAX_JIFFIES 100000
+
+int mali_hang_check_interval = HANG_CHECK_MSECS_DEFAULT;
+int mali_max_job_runtime = WATCHDOG_MSECS_DEFAULT;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+int mali_boot_profiling = 0;
+#endif
+
+#ifdef MALI_REBOOTNOTIFIER
+extern _mali_osk_atomic_t mali_shutdown_state;
+#endif
+
+/* Subsystem entrypoints: */
+static _mali_osk_errcode_t rendercore_subsystem_startup(mali_kernel_subsystem_identifier id);
+static void rendercore_subsystem_terminate(mali_kernel_subsystem_identifier id);
+#if USING_MMU
+static void rendercore_subsystem_broadcast_notification(mali_core_notification_message message, u32 data);
+#endif
+
+
+static void mali_core_subsystem_cleanup_all_renderunits(struct mali_core_subsystem* subsys);
+static void mali_core_subsystem_move_core_set_idle(struct mali_core_renderunit *core);
+
+static mali_core_session * mali_core_subsystem_get_waiting_session(mali_core_subsystem *subsystem);
+static mali_core_job * mali_core_subsystem_release_session_get_job(mali_core_subsystem *subsystem, mali_core_session * session);
+
+static void find_and_abort(mali_core_session* session, u32 abort_id);
+
+static void mali_core_job_start_on_core(mali_core_job *job, mali_core_renderunit *core);
+#if USING_MMU
+static void mali_core_subsystem_callback_schedule_wrapper(void* sub);
+#endif
+static void mali_core_subsystem_schedule(mali_core_subsystem*subsystem);
+static void mali_core_renderunit_detach_job_from_core(mali_core_renderunit* core, mali_subsystem_reschedule_option reschedule, mali_subsystem_job_end_code end_status);
+
+static void mali_core_renderunit_irq_handler_remove(struct mali_core_renderunit *core);
+
+static _mali_osk_errcode_t mali_core_irq_handler_upper_half (void * data);
+static void mali_core_irq_handler_bottom_half ( void *data );
+
+#if USING_MMU
+static void lock_subsystem(struct mali_core_subsystem * subsys);
+static void unlock_subsystem(struct mali_core_subsystem * subsys);
+#endif
+
+
+/**
+ * This will be one of the subsystems in the array of subsystems:
+ * static struct mali_kernel_subsystem * subsystems[];
+ * found in file: mali_kernel_core.c
+ *
+ * This subsystem is necessary for operations common to all rendercore
+ * subsystems. For example, mali_subsystem_mali200 and mali_subsystem_gp2 may
+ * share a mutex when RENDERCORES_USE_GLOBAL_MUTEX is non-zero.
+ */
+struct mali_kernel_subsystem mali_subsystem_rendercore=
+{
+ rendercore_subsystem_startup, /* startup */
+ NULL, /*rendercore_subsystem_terminate,*/ /* shutdown */
+ NULL, /* load_complete */
+ NULL, /* system_info_fill */
+ NULL, /* session_begin */
+ NULL, /* session_end */
+#if USING_MMU
+ rendercore_subsystem_broadcast_notification, /* broadcast_notification */
+#else
+ NULL,
+#endif
+#if MALI_STATE_TRACKING
+ NULL, /* dump_state */
+#endif
+} ;
+
+static _mali_osk_lock_t *rendercores_global_mutex = NULL;
+static u32 rendercores_global_mutex_is_held = 0;
+static u32 rendercores_global_mutex_owner = 0;
+
+/** The 'dummy' rendercore subsystem to allow global subsystem mutex to be
+ * locked for all subsystems that extend the ''rendercore'' */
+static mali_core_subsystem rendercore_dummy_subsystem = {0,};
+
+/*
+ * Rendercore Subsystem functions.
+ *
+ * These are exposed by mali_subsystem_rendercore
+ */
+
+/**
+ * @brief Initialize the Rendercore subsystem.
+ *
+ * This must be called before any other subsystem that extends the
+ * ''rendercore'' may be initialized. For example, this must be called before
+ * the following functions:
+ * - mali200_subsystem_startup(), from mali_subsystem_mali200
+ * - maligp_subsystem_startup(), from mali_subsystem_gp2
+ *
+ * @note This function is separate from mali_core_subsystem_init(). They
+ * are related, in that mali_core_subsystem_init() may use the structures
+ * initialized by rendercore_subsystem_startup()
+ */
+static _mali_osk_errcode_t rendercore_subsystem_startup(mali_kernel_subsystem_identifier id)
+{
+ rendercores_global_mutex_is_held = 0;
+ rendercores_global_mutex = _mali_osk_lock_init(
+ (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED),
+ 0, 129);
+
+ if (NULL == rendercores_global_mutex)
+ {
+ MALI_PRINT_ERROR(("Failed: _mali_osk_lock_init\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ rendercore_dummy_subsystem.name = "Rendercore Global Subsystem"; /* On the constant pool, do not free */
+ rendercore_dummy_subsystem.magic_nr = SUBSYSTEM_MAGIC_NR; /* To please the Subsystem Mutex code */
+
+#if MALI_GPU_UTILIZATION
+ if (mali_utilization_init() != _MALI_OSK_ERR_OK)
+ {
+ _mali_osk_lock_term(rendercores_global_mutex);
+ rendercores_global_mutex = NULL;
+ MALI_PRINT_ERROR(("Failed: mali_utilization_init\n")) ;
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ if (_mali_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE) != _MALI_OSK_ERR_OK)
+ {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Rendercore: Failed to initialize profiling, feature will be unavailable\n")) ;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(2, ("Rendercore: subsystem global mutex initialized\n")) ;
+ MALI_SUCCESS;
+}
+
+/**
+ * @brief Terminate the Rendercore subsystem.
+ *
+ * This must only be called \b after any other subsystem that extends the
+ * ''rendercore'' has been terminated. For example, this must be called \b after
+ * the following functions:
+ * - mali200_subsystem_terminate(), from mali_subsystem_mali200
+ * - maligp_subsystem_terminate(), from mali_subsystem_gp2
+ *
+ * @note This function is separate from mali_core_subsystem_cleanup(), though,
+ * the subsystems that extend ''rendercore'' must still call
+ * mali_core_subsystem_cleanup() when they terminate.
+ */
+static void rendercore_subsystem_terminate(mali_kernel_subsystem_identifier id)
+{
+ /* Catch double-terminate */
+ MALI_DEBUG_ASSERT_POINTER( rendercores_global_mutex );
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_term();
+#endif
+
+#if MALI_GPU_UTILIZATION
+ mali_utilization_term();
+#endif
+
+ rendercore_dummy_subsystem.name = NULL; /* The original string was on the constant pool, do not free */
+ rendercore_dummy_subsystem.magic_nr = 0;
+
+ /* ASSERT that no-one's holding this */
+ MALI_DEBUG_PRINT_ASSERT( 0 == rendercores_global_mutex_is_held,
+ ("Rendercores' Global Mutex was held at termination time. Have the subsystems that extend ''rendercore'' been terminated?\n") );
+
+ _mali_osk_lock_term( rendercores_global_mutex );
+ rendercores_global_mutex = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Rendercore: subsystem global mutex terminated\n")) ;
+}
+
+
+#if USING_MMU
+/**
+ * @brief Handle certain Rendercore subsystem broadcast notifications
+ *
+ * When RENDERCORES_USE_GLOBAL_MUTEX is non-zero, this handles the following messages:
+ * - MMU_KILL_STEP0_LOCK_SUBSYSTEM
+ * - MMU_KILL_STEP4_UNLOCK_SUBSYSTEM
+ *
+ * The purpose is to manage the Rendercode Global Mutex, which cannot be
+ * managed by any system that extends the ''rendercore''.
+ *
+ * All other messages must be handled by mali_core_subsystem_broadcast_notification()
+ *
+ *
+ * When RENDERCORES_USE_GLOBAL_MUTEX is 0, this function does nothing.
+ * Instead, the subsystem that extends the ''rendercore' \b must handle its
+ * own mutexes - refer to mali_core_subsystem_broadcast_notification().
+ *
+ * Used currently only for signalling when MMU has a pagefault
+ */
+static void rendercore_subsystem_broadcast_notification(mali_core_notification_message message, u32 data)
+{
+ switch(message)
+ {
+ case MMU_KILL_STEP0_LOCK_SUBSYSTEM:
+ lock_subsystem( &rendercore_dummy_subsystem );
+ break;
+ case MMU_KILL_STEP4_UNLOCK_SUBSYSTEM:
+ unlock_subsystem( &rendercore_dummy_subsystem );
+ break;
+
+ case MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES:
+ /** FALLTHROUGH */
+ case MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS:
+ /** FALLTHROUGH */
+ case MMU_KILL_STEP3_CONTINUE_JOB_HANDLING:
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Illegal message: 0x%x, data: 0x%x\n", (u32)message, data));
+ break;
+ }
+
+}
+#endif
+
+/*
+ * Functions inherited by the subsystems that extend the ''rendercore''.
+ */
+
+void mali_core_renderunit_timeout_function_hang_detection(void *arg)
+{
+ mali_bool action = MALI_FALSE;
+ mali_core_renderunit * core;
+
+ core = (mali_core_renderunit *) arg;
+ if( !core ) return;
+
+ /* if NOT idle OR NOT powered off OR has TIMED_OUT */
+ if ( !((CORE_WATCHDOG_TIMEOUT == core->state ) || (CORE_IDLE== core->state) || (CORE_OFF == core->state)) )
+ {
+ core->state = CORE_HANG_CHECK_TIMEOUT;
+ action = MALI_TRUE;
+ }
+
+ if(action) _mali_osk_irq_schedulework(core->irq);
+}
+
+
+void mali_core_renderunit_timeout_function(void *arg)
+{
+ mali_core_renderunit * core;
+ mali_bool is_watchdog;
+
+ core = (mali_core_renderunit *)arg;
+ if( !core ) return;
+
+ is_watchdog = MALI_TRUE;
+ if (mali_benchmark)
+ {
+ /* poll based core */
+ mali_core_job *job;
+ job = core->current_job;
+ if ( (NULL != job) &&
+ (0 != _mali_osk_time_after(job->watchdog_jiffies,_mali_osk_time_tickcount()))
+ )
+ {
+ core->state = CORE_POLL;
+ is_watchdog = MALI_FALSE;
+ }
+ }
+
+ if (is_watchdog)
+ {
+ MALI_DEBUG_PRINT(3, ("SW-Watchdog timeout: Core:%s\n", core->description));
+ core->state = CORE_WATCHDOG_TIMEOUT;
+ }
+
+ _mali_osk_irq_schedulework(core->irq);
+}
+
+/* Used by external renderunit_create<> function */
+_mali_osk_errcode_t mali_core_renderunit_init(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Core: renderunit_init: Core:%s\n", core->description));
+
+ _MALI_OSK_INIT_LIST_HEAD(&core->list) ;
+ core->timer = _mali_osk_timer_init();
+ if (NULL == core->timer)
+ {
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot init timer\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_timer_setcallback(core->timer, mali_core_renderunit_timeout_function, (void *)core);
+
+ core->timer_hang_detection = _mali_osk_timer_init();
+ if (NULL == core->timer_hang_detection)
+ {
+ _mali_osk_timer_term(core->timer);
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot init hang detection timer\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ _mali_osk_timer_setcallback(core->timer_hang_detection, mali_core_renderunit_timeout_function_hang_detection, (void *)core);
+
+#if USING_MALI_PMM
+ /* Init no pending power downs */
+ core->pend_power_down = MALI_FALSE;
+
+ /* Register the core with the PMM - which powers it up */
+ if (_MALI_OSK_ERR_OK != malipmm_core_register( core->pmm_id ))
+ {
+ _mali_osk_timer_term(core->timer);
+ _mali_osk_timer_term(core->timer_hang_detection);
+ MALI_PRINT_ERROR(("Core: renderunit_init: Core:%s -- cannot register with PMM\n", core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif /* USING_MALI_PMM */
+
+ core->error_recovery = MALI_FALSE;
+ core->in_detach_function = MALI_FALSE;
+ core->state = CORE_IDLE;
+ core->current_job = NULL;
+ core->magic_nr = CORE_MAGIC_NR;
+#if USING_MMU
+ core->mmu = NULL;
+#endif /* USING_MMU */
+
+ MALI_SUCCESS;
+}
+
+void mali_core_renderunit_term(mali_core_renderunit * core)
+{
+ MALI_DEBUG_PRINT(5, ("Core: renderunit_term: Core:%s\n", core->description));
+
+ if (NULL != core->timer)
+ {
+ _mali_osk_timer_term(core->timer);
+ core->timer = NULL;
+ }
+ if (NULL != core->timer_hang_detection)
+ {
+ _mali_osk_timer_term(core->timer_hang_detection);
+ core->timer_hang_detection = NULL;
+ }
+
+#if USING_MALI_PMM
+ /* Unregister the core with the PMM */
+ malipmm_core_unregister( core->pmm_id );
+#endif
+}
+
+/* Used by external renderunit_create<> function */
+_mali_osk_errcode_t mali_core_renderunit_map_registers(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_map_registers: Core:%s\n", core->description)) ;
+ if( (0 == core->registers_base_addr) ||
+ (0 == core->size) ||
+ (NULL == core->description)
+ )
+ {
+ MALI_PRINT_ERROR(("Missing fields in the core structure %u %u 0x%x;\n", core->registers_base_addr, core->size, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(core->registers_base_addr, core->size, core->description))
+ {
+ MALI_PRINT_ERROR(("Could not request register region (0x%08X - 0x%08X) to core: %s\n",
+ core->registers_base_addr, core->registers_base_addr + core->size - 1, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Success: request_mem_region: (0x%08X - 0x%08X) Core:%s\n",
+ core->registers_base_addr, core->registers_base_addr + core->size - 1, core->description));
+ }
+
+ core->registers_mapped = _mali_osk_mem_mapioregion( core->registers_base_addr, core->size, core->description );
+
+ if ( 0 == core->registers_mapped )
+ {
+ MALI_PRINT_ERROR(("Could not ioremap registers for %s .\n", core->description));
+ _mali_osk_mem_unreqregion(core->registers_base_addr, core->size);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Success: ioremap_nocache: Internal ptr: (0x%08X - 0x%08X) Core:%s\n",
+ (u32) core->registers_mapped,
+ ((u32)core->registers_mapped)+ core->size - 1,
+ core->description));
+ }
+
+ MALI_DEBUG_PRINT(4, ("Success: Mapping registers to core: %s\n",core->description));
+
+ MALI_SUCCESS;
+}
+
+/* Used by external renderunit_create<> function + other places */
+void mali_core_renderunit_unmap_registers(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_unmap_registers: Core:%s\n", core->description));
+ if (0 == core->registers_mapped)
+ {
+ MALI_PRINT_ERROR(("Trying to unmap register-mapping with NULL from core: %s\n", core->description));
+ return;
+ }
+ _mali_osk_mem_unmapioregion(core->registers_base_addr, core->size, core->registers_mapped);
+ core->registers_mapped = 0;
+ _mali_osk_mem_unreqregion(core->registers_base_addr, core->size);
+}
+
+static void mali_core_renderunit_irq_handler_remove(mali_core_renderunit *core)
+{
+ MALI_DEBUG_PRINT(3, ("Core: renderunit_irq_handler_remove: Core:%s\n", core->description));
+ _mali_osk_irq_term(core->irq);
+}
+
+mali_core_renderunit * mali_core_renderunit_get_mali_core_nr(mali_core_subsystem *subsys, u32 mali_core_nr)
+{
+ mali_core_renderunit * core;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ if (subsys->number_of_cores <= mali_core_nr)
+ {
+ MALI_PRINT_ERROR(("Trying to get illegal mali_core_nr: 0x%x for %s", mali_core_nr, subsys->name));
+ return NULL;
+ }
+ core = (subsys->mali_core_array)[mali_core_nr];
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_get_mali_core_nr: Core:%s\n", core->description));
+ MALI_CHECK_CORE(core);
+ return core;
+}
+
+/* Is used by external function:
+ subsystem_startup<> */
+_mali_osk_errcode_t mali_core_subsystem_init(mali_core_subsystem* new_subsys)
+{
+ int i;
+
+ /* These function pointers must have been set on before calling this function */
+ if (
+ ( NULL == new_subsys->name ) ||
+ ( NULL == new_subsys->start_job ) ||
+ ( NULL == new_subsys->irq_handler_upper_half ) ||
+ ( NULL == new_subsys->irq_handler_bottom_half ) ||
+ ( NULL == new_subsys->get_new_job_from_user ) ||
+ ( NULL == new_subsys->return_job_to_user )
+ )
+ {
+ MALI_PRINT_ERROR(("Missing functions in subsystem."));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_DEBUG_PRINT(2, ("Core: subsystem_init: %s\n", new_subsys->name)) ;
+
+ /* Catch use-before-initialize/use-after-terminate */
+ MALI_DEBUG_ASSERT_POINTER( rendercores_global_mutex );
+
+ new_subsys->magic_nr = SUBSYSTEM_MAGIC_NR;
+
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->renderunit_idle_head); /* Idle cores of this type */
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->renderunit_off_head); /* Powered off cores of this type */
+
+ /* Linked list for each priority of sessions with a job ready for scheduleing */
+ for(i=0; i<PRIORITY_LEVELS; ++i)
+ {
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->awaiting_sessions_head[i]);
+ }
+
+ /* Linked list of all sessions connected to this coretype */
+ _MALI_OSK_INIT_LIST_HEAD(&new_subsys->all_sessions_head);
+
+ MALI_SUCCESS;
+}
+
+#if USING_MMU
+void mali_core_subsystem_attach_mmu(mali_core_subsystem* subsys)
+{
+ u32 i;
+ mali_core_renderunit * core;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ if ( NULL==core ) break;
+ core->mmu = mali_memory_core_mmu_lookup(core->mmu_id);
+ mali_memory_core_mmu_owner(core,core->mmu);
+ MALI_DEBUG_PRINT(2, ("Attach mmu: 0x%x to core: %s in subsystem: %s\n", core->mmu, core->description, subsys->name));
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+}
+#endif
+
+/* This will register an IRQ handler, and add the core to the list of available cores for this subsystem. */
+_mali_osk_errcode_t mali_core_subsystem_register_renderunit(mali_core_subsystem* subsys, mali_core_renderunit * core)
+{
+ mali_core_renderunit ** mali_core_array;
+ u32 previous_nr;
+ u32 previous_size;
+ u32 new_nr;
+ u32 new_size;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+
+ /* If any of these are 0 there is an error */
+ if(0 == core->subsystem ||
+ 0 == core->registers_base_addr ||
+ 0 == core->size ||
+ 0 == core->description)
+ {
+ MALI_PRINT_ERROR(("Missing fields in the core structure 0x%x 0x%x 0x%x;\n",
+ core->registers_base_addr, core->size, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Core: subsystem_register_renderunit: %s\n", core->description));
+
+ MALI_CHECK_NON_NULL(
+ core->irq = _mali_osk_irq_init(
+ core->irq_nr,
+ mali_core_irq_handler_upper_half,
+ mali_core_irq_handler_bottom_half,
+ (_mali_osk_irq_trigger_t)subsys->probe_core_irq_trigger,
+ (_mali_osk_irq_ack_t)subsys->probe_core_irq_acknowledge,
+ core,
+ "mali_core_irq_handlers"
+ ),
+ _MALI_OSK_ERR_FAULT
+ );
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ /* Update which core number this is */
+ core->core_number = subsys->number_of_cores;
+
+ /* Update the array of cores in the subsystem. */
+ previous_nr = subsys->number_of_cores;
+ previous_size = sizeof(mali_core_renderunit*)*previous_nr;
+ new_nr = previous_nr + 1;
+ new_size = sizeof(mali_core_renderunit*)*new_nr;
+
+ if (0 != previous_nr)
+ {
+ if (NULL == subsys->mali_core_array)
+ {
+ MALI_PRINT_ERROR(("Internal error"));
+ goto exit_function;
+ }
+
+ mali_core_array = (mali_core_renderunit **) _mali_osk_malloc( new_size );
+ if (NULL == mali_core_array )
+ {
+ MALI_PRINT_ERROR(("Out of mem"));
+ err = _MALI_OSK_ERR_NOMEM;
+ goto exit_function;
+ }
+ _mali_osk_memcpy(mali_core_array, subsys->mali_core_array, previous_size);
+ _mali_osk_free( subsys->mali_core_array);
+ MALI_DEBUG_PRINT(5, ("Success: adding a new core to subsystem array %s\n", core->description) ) ;
+ }
+ else
+ {
+ mali_core_array = (mali_core_renderunit **) _mali_osk_malloc( new_size );
+ if (NULL == mali_core_array )
+ {
+ MALI_PRINT_ERROR(("Out of mem"));
+ err = _MALI_OSK_ERR_NOMEM;
+ goto exit_function;
+ }
+ MALI_DEBUG_PRINT(6, ("Success: adding first core to subsystem array %s\n", core->description) ) ;
+ }
+ subsys->mali_core_array = mali_core_array;
+ mali_core_array[previous_nr] = core;
+
+ /* Add the core to the list of available cores on the system */
+ _mali_osk_list_add(&(core->list), &(subsys->renderunit_idle_head));
+
+ /* Update total number of cores */
+ subsys->number_of_cores = new_nr;
+ MALI_DEBUG_PRINT(6, ("Success: mali_core_subsystem_register_renderunit %s\n", core->description));
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_SUCCESS;
+
+exit_function:
+ mali_core_renderunit_irq_handler_remove(core);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_ERROR(err);
+}
+
+
+/**
+ * Called by the core when a system info update is needed
+ * We fill in info about all the core types available
+ * @param subsys Pointer to the core's @a mali_core_subsystem data structure
+ * @param info Pointer to system info struct to update
+ * @return _MALI_OSK_ERR_OK on success, or another _mali_osk_errcode_t error code on failure
+ */
+_mali_osk_errcode_t mali_core_subsystem_system_info_fill(mali_core_subsystem* subsys, _mali_system_info* info)
+{
+ u32 i;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK; /* OK if no cores to update info for */
+ mali_core_renderunit * core;
+ _mali_core_info **core_info_nextp;
+ _mali_core_info * cinfo;
+
+ MALI_DEBUG_PRINT(4, ("mali_core_subsystem_system_info_fill: %s\n", subsys->name) ) ;
+
+ /* check input */
+ MALI_CHECK_NON_NULL(info, _MALI_OSK_ERR_INVALID_ARGS);
+
+ core_info_nextp = &(info->core_info);
+ cinfo = info->core_info;
+
+ while(NULL!=cinfo)
+ {
+ core_info_nextp = &(cinfo->next);
+ cinfo = cinfo->next;
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ if ( NULL==core )
+ {
+ err = _MALI_OSK_ERR_FAULT;
+ goto early_exit;
+ }
+ cinfo = (_mali_core_info *)_mali_osk_calloc(1, sizeof(_mali_core_info));
+ if ( NULL==cinfo )
+ {
+ err = _MALI_OSK_ERR_NOMEM;
+ goto early_exit;
+ }
+ cinfo->version = core->core_version;
+ cinfo->type =subsys->core_type;
+ cinfo->reg_address = core->registers_base_addr;
+ cinfo->core_nr = i;
+ cinfo->next = NULL;
+ /* Writing this address to the previous' *(&next) ptr */
+ *core_info_nextp = cinfo;
+ /* Setting the next_ptr to point to &this->next_ptr */
+ core_info_nextp = &(cinfo->next);
+ }
+early_exit:
+ if ( _MALI_OSK_ERR_OK != err) MALI_PRINT_ERROR(("Error: In mali_core_subsystem_system_info_fill %d\n", err));
+ MALI_DEBUG_CODE(
+ cinfo = info->core_info;
+
+ MALI_DEBUG_PRINT(3, ("Current list of cores\n"));
+ while( NULL != cinfo )
+ {
+ MALI_DEBUG_PRINT(3, ("Type: 0x%x\n", cinfo->type));
+ MALI_DEBUG_PRINT(3, ("Version: 0x%x\n", cinfo->version));
+ MALI_DEBUG_PRINT(3, ("Reg_addr: 0x%x\n", cinfo->reg_address));
+ MALI_DEBUG_PRINT(3, ("Core_nr: 0x%x\n", cinfo->core_nr));
+ MALI_DEBUG_PRINT(3, ("Flags: 0x%x\n", cinfo->flags));
+ MALI_DEBUG_PRINT(3, ("*****\n"));
+ cinfo = cinfo->next;
+ }
+ );
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_ERROR(err);
+}
+
+
+/* Is used by external function:
+ subsystem_terminate<> */
+void mali_core_subsystem_cleanup(mali_core_subsystem* subsys)
+{
+ u32 i;
+ mali_core_renderunit * core;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ MALI_DEBUG_PRINT(2, ("Core: subsystem_cleanup: %s\n", subsys->name )) ;
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+#if USING_MMU
+ if (NULL != core->mmu)
+ {
+ /* the MMU is attached in the load_complete callback, which will never be called if the module fails to load, handle that case */
+ mali_memory_core_mmu_unregister_callback(core->mmu, mali_core_subsystem_callback_schedule_wrapper);
+ }
+#endif
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ mali_core_renderunit_irq_handler_remove(core);
+
+ /* When a process terminates, all cores running jobs from that process is reset and put to idle.
+ That means that when the module is unloading (this code) we are guaranteed that all cores are idle.
+ However: if something (we can't think of) is really wrong, a core may give an interrupt during this
+ unloading, and we may now in the code have a bottom-half-processing pending from the interrupts
+ we deregistered above. To be sure that the bottom halves do not access the structures after they
+ are deallocated we flush the bottom-halves processing here, before the deallocation. */
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+#if USING_MALI_PMM
+ /* Only reset when we are using PMM and the core is not off */
+#if MALI_PMM_NO_PMU
+ /* We need to reset when there is no PMU - but this will
+ * cause the register read/write functions to report an
+ * error (hence the if to check for CORE_OFF below) we
+ * change state to allow the reset to happen.
+ */
+ core->state = CORE_IDLE;
+#endif
+ if( core->state != CORE_OFF )
+ {
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_DISABLE );
+ }
+#else
+ /* Always reset the core */
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_DISABLE );
+#endif
+
+ mali_core_renderunit_unmap_registers(core);
+
+ _mali_osk_list_delinit(&core->list);
+
+ mali_core_renderunit_term(core);
+
+ subsys->renderunit_delete(core);
+ }
+
+ mali_core_subsystem_cleanup_all_renderunits(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT(6, ("SUCCESS: mali_core_subsystem_cleanup: %s\n", subsys->name )) ;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_number_of_cores_get(mali_core_session * session, u32 *number_of_cores)
+{
+ mali_core_subsystem * subsystem;
+
+ subsystem = session->subsystem;
+ if ( NULL != number_of_cores )
+ {
+ *number_of_cores = subsystem->number_of_cores;
+
+ MALI_DEBUG_PRINT(4, ("Core: ioctl_number_of_cores_get: %s: %u\n", subsystem->name, *number_of_cores) ) ;
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_start_job(mali_core_session * session, void *job_data)
+{
+ mali_core_subsystem * subsystem;
+ _mali_osk_errcode_t err;
+
+ /* need the subsystem to run callback function */
+ subsystem = session->subsystem;
+ MALI_CHECK_NON_NULL(subsystem, _MALI_OSK_ERR_FAULT);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ err = subsystem->get_new_job_from_user(session, job_data);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_ERROR(err);
+}
+
+
+/* We return the version number to the first core in this subsystem */
+_mali_osk_errcode_t mali_core_subsystem_ioctl_core_version_get(mali_core_session * session, _mali_core_version *version)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit * core0;
+ u32 nr_return;
+
+ subsystem = session->subsystem;
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+
+ core0 = mali_core_renderunit_get_mali_core_nr(subsystem, 0);
+
+ if( NULL == core0 )
+ {
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ nr_return = core0->core_version;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_DEBUG_PRINT(4, ("Core: ioctl_core_version_get: %s: %u\n", subsystem->name, nr_return )) ;
+
+ *version = nr_return;
+
+ MALI_SUCCESS;
+}
+
+void mali_core_subsystem_ioctl_abort_job(mali_core_session * session, u32 id)
+{
+ find_and_abort(session, id);
+}
+
+static mali_bool job_should_be_aborted(mali_core_job *job, u32 abort_id)
+{
+ if ( job->abort_id == abort_id ) return MALI_TRUE;
+ else return MALI_FALSE;
+}
+
+static void find_and_abort(mali_core_session* session, u32 abort_id)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit *core;
+ mali_core_renderunit *tmp;
+ mali_core_job *job;
+
+ subsystem = session->subsystem;
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ job = mali_job_queue_abort_job(session, abort_id);
+ if (NULL != job)
+ {
+ MALI_DEBUG_PRINT(3, ("Core: Aborting %s job, with id nr: %u, from the waiting_to_run slot.\n", subsystem->name, abort_id ));
+ if (mali_job_queue_empty(session))
+ {
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+ }
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ subsystem->return_job_to_user(job , JOB_STATUS_END_ABORT);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY( core, tmp, &session->renderunits_working_head, mali_core_renderunit, list )
+ {
+ job = core->current_job;
+ if ( (job!=NULL) && (job_should_be_aborted (job, abort_id) ) )
+ {
+ MALI_DEBUG_PRINT(3, ("Core: Aborting %s job, with id nr: %u, which is currently running on mali.\n", subsystem->name, abort_id ));
+ if ( core->state==CORE_IDLE )
+ {
+ MALI_PRINT_ERROR(("Aborting core with running job which is idle. Must be something very wrong."));
+ goto end_bug;
+ }
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_ABORT);
+ }
+ }
+end_bug:
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_suspend_response(mali_core_session * session, void *argument)
+{
+ mali_core_subsystem * subsystem;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+
+ /* need the subsystem to run callback function */
+ subsystem = session->subsystem;
+ MALI_CHECK_NON_NULL(subsystem, _MALI_OSK_ERR_FAULT);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ if ( NULL != subsystem->suspend_response)
+ {
+ MALI_DEBUG_PRINT(4, ("MALI_IOC_CORE_CMD_SUSPEND_RESPONSE start\n"));
+ err = subsystem->suspend_response(session, argument);
+ MALI_DEBUG_PRINT(4, ("MALI_IOC_CORE_CMD_SUSPEND_RESPONSE end\n"));
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ return err;
+}
+
+
+/* Is used by internal function:
+ mali_core_subsystem_cleanup<>s */
+/* All cores should be removed before calling this function
+Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_cleanup_all_renderunits(mali_core_subsystem* subsys)
+{
+ int i;
+ _mali_osk_free(subsys->mali_core_array);
+ subsys->number_of_cores = 0;
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_cleanup_all_renderunits: %s\n", subsys->name) ) ;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+
+ if ( ! _mali_osk_list_empty(&(subsys->renderunit_idle_head)))
+ {
+ MALI_PRINT_ERROR(("List renderunit_list_idle should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->renderunit_idle_head)) ;
+ }
+
+ if ( ! _mali_osk_list_empty(&(subsys->renderunit_off_head)))
+ {
+ MALI_PRINT_ERROR(("List renderunit_list_off should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->renderunit_off_head)) ;
+ }
+
+ for(i=0; i<PRIORITY_LEVELS; ++i)
+ {
+ if ( ! _mali_osk_list_empty(&(subsys->awaiting_sessions_head[i])))
+ {
+ MALI_PRINT_ERROR(("List awaiting_sessions_linkedlist should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->awaiting_sessions_head[i])) ;
+ subsys->awaiting_sessions_sum_all_priorities = 0;
+ }
+ }
+
+ if ( ! _mali_osk_list_empty(&(subsys->all_sessions_head)))
+ {
+ MALI_PRINT_ERROR(("List all_sessions_linkedlist should be empty."));
+ _MALI_OSK_INIT_LIST_HEAD(&(subsys->all_sessions_head)) ;
+ }
+}
+
+/* Is used by internal functions:
+ mali_core_irq_handler_bottom_half<>;
+ mali_core_subsystem_schedule<>; */
+/* Will release the core.*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_core_set_idle(mali_core_renderunit *core)
+{
+ mali_core_subsystem *subsystem;
+#if USING_MALI_PMM
+ mali_core_status oldstatus;
+#endif
+ subsystem = core->subsystem;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ _mali_osk_timer_del(core->timer);
+ _mali_osk_timer_del(core->timer_hang_detection);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_core_set_idle: %s\n", core->description) ) ;
+
+ core->current_job = NULL ;
+
+#if USING_MALI_PMM
+
+ oldstatus = core->state;
+
+ if ( !core->pend_power_down )
+ {
+ core->state = CORE_IDLE ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_idle_head );
+ }
+
+ if( CORE_OFF != oldstatus )
+ {
+ /* Message that this core is now idle or in fact off */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_FINISHED,
+ 0 };
+ event.data = core->pmm_id;
+ _mali_ukk_pmm_event_message( &event );
+#if USING_MMU
+ /* Only free the reference when entering idle state from
+ * anything other than power off
+ */
+ mali_memory_core_mmu_release_address_space_reference(core->mmu);
+#endif /* USING_MMU */
+ }
+
+ if( core->pend_power_down )
+ {
+ core->state = CORE_OFF ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_off_head );
+
+ /* Done the move from the active queues, so the pending power down can be done */
+ core->pend_power_down = MALI_FALSE;
+ malipmm_core_power_down_okay( core->pmm_id );
+ }
+
+#else /* !USING_MALI_PMM */
+
+ core->state = CORE_IDLE ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_idle_head );
+
+#if USING_MMU
+ mali_memory_core_mmu_release_address_space_reference(core->mmu);
+#endif
+
+#endif /* USING_MALI_PMM */
+}
+
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_set_working(mali_core_renderunit *core, mali_core_job *job)
+{
+ mali_core_subsystem *subsystem;
+ mali_core_session *session;
+ u64 time_now;
+
+ session = job->session;
+ subsystem = core->subsystem;
+
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_JOB(job);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_set_working: %s\n", core->description) ) ;
+
+ time_now = _mali_osk_time_get_ns();
+ job->start_time = time_now;
+#if MALI_GPU_UTILIZATION
+ mali_utilization_core_start(time_now);
+#endif
+
+ core->current_job = job ;
+ core->state = CORE_WORKING ;
+ _mali_osk_list_move( &core->list, &session->renderunits_working_head );
+
+}
+
+#if USING_MALI_PMM
+
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_move_core_set_off(mali_core_renderunit *core)
+{
+ mali_core_subsystem *subsystem;
+ subsystem = core->subsystem;
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ /* Cores must be idle before powering off */
+ MALI_DEBUG_ASSERT(core->state == CORE_IDLE);
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_move_core_set_off: %s\n", core->description) ) ;
+
+ core->current_job = NULL ;
+ core->state = CORE_OFF ;
+ _mali_osk_list_move( &core->list, &subsystem->renderunit_off_head );
+}
+
+#endif /* USING_MALI_PMM */
+
+/* Is used by internal function:
+ mali_core_subsystem_schedule<>; */
+/* Returns the job with the highest priority for the subsystem. NULL if none*/
+/* Must hold subsystem_mutex before entering this function */
+static mali_core_session * mali_core_subsystem_get_waiting_session(mali_core_subsystem *subsystem)
+{
+ int i;
+
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ if ( 0 == subsystem->awaiting_sessions_sum_all_priorities )
+ {
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_get_waiting_job: No awaiting session found\n"));
+ return NULL;
+ }
+
+ for( i=0; i<PRIORITY_LEVELS ; ++i)
+ {
+ if (!_mali_osk_list_empty(&subsystem->awaiting_sessions_head[i]))
+ {
+ return _MALI_OSK_LIST_ENTRY(subsystem->awaiting_sessions_head[i].next, mali_core_session, awaiting_sessions_list);
+ }
+ }
+
+ return NULL;
+}
+
+static mali_core_job * mali_core_subsystem_release_session_get_job(mali_core_subsystem *subsystem, mali_core_session * session)
+{
+ mali_core_job *job;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ job = mali_job_queue_get_job(session);
+ subsystem->awaiting_sessions_sum_all_priorities--;
+
+ if(mali_job_queue_empty(session))
+ {
+ /* This is the last job, so remove it from the list */
+ _mali_osk_list_delinit(&session->awaiting_sessions_list);
+ }
+ else
+ {
+ if (0 == (job->flags & MALI_UK_START_JOB_FLAG_MORE_JOBS_FOLLOW))
+ {
+ /* There are more jobs, but the follow flag is not set, so let other sessions run their jobs first */
+ _mali_osk_list_del(&(session->awaiting_sessions_list));
+ _mali_osk_list_addtail(&(session->awaiting_sessions_list), &(subsystem->awaiting_sessions_head[
+ session->queue[session->queue_head]->priority]));
+ }
+ /* else; keep on list, follow flag is set and there are more jobs in queue for this session */
+ }
+
+ MALI_CHECK_JOB(job);
+ return job;
+}
+
+/* Is used by internal functions:
+ mali_core_subsystem_schedule<> */
+/* This will start the job on the core. It will also release the core if it did not start.*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_job_start_on_core(mali_core_job *job, mali_core_renderunit *core)
+{
+ mali_core_session *session;
+ mali_core_subsystem *subsystem;
+ _mali_osk_errcode_t err;
+ session = job->session;
+ subsystem = core->subsystem;
+
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_JOB(job);
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_CHECK_SESSION(session);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(4, ("Core: job_start_on_core: job=0x%x, session=0x%x, core=%s\n", job, session, core->description));
+
+ MALI_DEBUG_ASSERT(NULL == core->current_job) ;
+ MALI_DEBUG_ASSERT(CORE_IDLE == core->state );
+
+ mali_core_subsystem_move_set_working(core, job);
+
+#if defined USING_MALI400_L2_CACHE
+ if (0 == (job->flags & MALI_UK_START_JOB_FLAG_NO_FLUSH))
+ {
+ /* Invalidate the L2 cache */
+ if (_MALI_OSK_ERR_OK != mali_kernel_l2_cache_invalidate_all() )
+ {
+ MALI_DEBUG_PRINT(4, ("Core: Clear of L2 failed, return job. System may not be usable for some reason.\n"));
+ mali_core_subsystem_move_core_set_idle(core);
+ subsystem->return_job_to_user(job,JOB_STATUS_END_SYSTEM_UNUSABLE );
+ return;
+ }
+ }
+#endif
+
+ /* Tries to start job on the core. Returns MALI_FALSE if the job could not be started */
+ err = subsystem->start_job(job, core);
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ /* This will happen only if there is something in the job object
+ which make it inpossible to start. Like if it require illegal memory.*/
+ MALI_DEBUG_PRINT(4, ("Core: start_job failed, return job and putting core back into idle list\n"));
+ mali_core_subsystem_move_core_set_idle(core);
+ subsystem->return_job_to_user(job,JOB_STATUS_END_ILLEGAL_JOB );
+ }
+ else
+ {
+ u32 delay = _mali_osk_time_mstoticks(job->watchdog_msecs)+1;
+ job->watchdog_jiffies = _mali_osk_time_tickcount() + delay;
+ if (mali_benchmark)
+ {
+ _mali_osk_timer_add(core->timer, 1);
+ }
+ else
+ {
+ _mali_osk_timer_add(core->timer, delay);
+ }
+ }
+}
+
+#if USING_MMU
+static void mali_core_subsystem_callback_schedule_wrapper(void* sub)
+{
+ mali_core_subsystem * subsystem;
+ subsystem = (mali_core_subsystem *)sub;
+ MALI_DEBUG_PRINT(3, ("MMU: Is schedulling subsystem: %s\n", subsystem->name));
+ mali_core_subsystem_schedule(subsystem);
+}
+#endif
+
+/* Is used by internal function:
+ mali_core_irq_handler_bottom_half
+ mali_core_session_add_job
+*/
+/* Must hold subsystem_mutex before entering this function */
+static void mali_core_subsystem_schedule(mali_core_subsystem * subsystem)
+{
+ mali_core_renderunit *core, *tmp;
+ mali_core_session *session;
+ mali_core_job *job;
+#ifdef MALI_REBOOTNOTIFIER
+ if (_mali_osk_atomic_read(&mali_shutdown_state) > 0) {
+ MALI_DEBUG_PRINT(3, ("Core: mali already under shutdown process!!")) ;
+ return;
+ }
+#endif
+
+ MALI_DEBUG_PRINT(5, ("Core: subsystem_schedule: %s\n", subsystem->name )) ;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ /* First check that there are sessions with jobs waiting to run */
+ if ( 0 == subsystem->awaiting_sessions_sum_all_priorities)
+ {
+ MALI_DEBUG_PRINT(6, ("Core: No jobs available for %s\n", subsystem->name) ) ;
+ return;
+ }
+
+ /* Returns the session with the highest priority job for the subsystem. NULL if none*/
+ session = mali_core_subsystem_get_waiting_session(subsystem);
+
+ if (NULL == session)
+ {
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: No runnable job found\n"));
+ return;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp, &subsystem->renderunit_idle_head, mali_core_renderunit, list)
+ {
+#if USING_MMU
+ int err = mali_memory_core_mmu_activate_page_table(core->mmu, session->mmu_session, mali_core_subsystem_callback_schedule_wrapper, subsystem);
+ if (0 == err)
+ {
+ /* core points to a core where the MMU page table activation succeeded */
+#endif
+ /* This will remove the job from queue system */
+ job = mali_core_subsystem_release_session_get_job(subsystem, session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Got a job 0x%x\n", job));
+
+#if USING_MALI_PMM
+ {
+ /* Message that there is a job scheduled to run
+ * NOTE: mali_core_job_start_on_core() can fail to start
+ * the job for several reasons, but it will move the core
+ * back to idle which will create the FINISHED message
+ * so we can still say that the job is SCHEDULED
+ */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_SCHEDULED,
+ 0 };
+ event.data = core->pmm_id;
+ _mali_ukk_pmm_event_message( &event );
+ }
+#endif
+ /* This will {remove core from freelist AND start the job on the core}*/
+ mali_core_job_start_on_core(job, core);
+
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Job started, done\n"));
+ return;
+#if USING_MMU
+ }
+#endif
+ }
+ MALI_DEBUG_PRINT(6, ("Core: Schedule: Could not activate MMU. Scheduelling postponed to MMU, checking next.\n"));
+
+#if USING_MALI_PMM
+ {
+ /* Message that there are jobs to run */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_JOB_QUEUED,
+ 0 };
+ if( subsystem->core_type == _MALI_GP2 || subsystem->core_type == _MALI_400_GP )
+ {
+ event.data = MALI_PMM_CORE_GP;
+ }
+ else
+ {
+ /* Check the PP is supported by the PMM */
+ MALI_DEBUG_ASSERT( subsystem->core_type == _MALI_200 || subsystem->core_type == _MALI_400_PP );
+ /* We state that all PP cores are scheduled to inform the PMM
+ * that it may need to power something up!
+ */
+ event.data = MALI_PMM_CORE_PP_ALL;
+ }
+ _mali_ukk_pmm_event_message( &event );
+ }
+#endif /* USING_MALI_PMM */
+
+}
+
+/* Is used by external function:
+ session_begin<> */
+void mali_core_session_begin(mali_core_session * session)
+{
+ mali_core_subsystem * subsystem;
+ int i;
+
+ subsystem = session->subsystem;
+ if ( NULL == subsystem )
+ {
+ MALI_PRINT_ERROR(("Missing data in struct\n"));
+ return;
+ }
+ MALI_DEBUG_PRINT(2, ("Core: session_begin: for %s\n", session->subsystem->name )) ;
+
+ session->magic_nr = SESSION_MAGIC_NR;
+
+ _MALI_OSK_INIT_LIST_HEAD(&session->renderunits_working_head);
+
+ for (i = 0; i < MALI_JOB_QUEUE_SIZE; i++)
+ {
+ session->queue[i] = NULL;
+ }
+ session->queue_head = 0;
+ session->queue_tail = 0;
+ _MALI_OSK_INIT_LIST_HEAD(&session->awaiting_sessions_list);
+ _MALI_OSK_INIT_LIST_HEAD(&session->all_sessions_list);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsystem);
+ _mali_osk_list_add(&session->all_sessions_list, &session->subsystem->all_sessions_head);
+
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_init(&session->jobs_received, 0);
+ _mali_osk_atomic_init(&session->jobs_returned, 0);
+ session->pid = _mali_osk_get_pid();
+#endif
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_begin: for %s DONE\n", session->subsystem->name) ) ;
+}
+
+#if USING_MMU
+static void mali_core_renderunit_stop_bus(mali_core_renderunit* core)
+{
+ core->subsystem->stop_bus(core);
+}
+#endif
+
+void mali_core_session_close(mali_core_session * session)
+{
+ mali_core_subsystem * subsystem;
+ mali_core_renderunit *core;
+
+ subsystem = session->subsystem;
+ MALI_DEBUG_ASSERT_POINTER(subsystem);
+
+ MALI_DEBUG_PRINT(2, ("Core: session_close: for %s\n", session->subsystem->name) ) ;
+
+ /* We must grab subsystem mutex since the list this session belongs to
+ is owned by the subsystem */
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ /* Remove this session from the global sessionlist */
+ _mali_osk_list_delinit(&session->all_sessions_list);
+
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+
+ /* Return the potensial waiting job to user */
+ while ( !mali_job_queue_empty(session) )
+ {
+ /* Queue not empty */
+ mali_core_job *job = mali_job_queue_get_job(session);
+ subsystem->return_job_to_user( job, JOB_STATUS_END_SHUTDOWN );
+ subsystem->awaiting_sessions_sum_all_priorities--;
+ }
+
+ /* Kill active cores working for this session - freeing their jobs
+ Since the handling of one core also could stop jobs from another core, there is a while loop */
+ while ( ! _mali_osk_list_empty(&session->renderunits_working_head) )
+ {
+ core = _MALI_OSK_LIST_ENTRY(session->renderunits_working_head.next, mali_core_renderunit, list);
+ MALI_DEBUG_PRINT(3, ("Core: session_close: Core was working: %s\n", core->description )) ;
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_SHUTDOWN );
+ }
+ _MALI_OSK_INIT_LIST_HEAD(&session->renderunits_working_head); /* Not necessary - we will _mali_osk_free session*/
+
+ MALI_DEBUG_PRINT(5, ("Core: session_close: for %s FINISHED\n", session->subsystem->name )) ;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+}
+
+/* Must hold subsystem_mutex before entering this function */
+_mali_osk_errcode_t mali_core_session_add_job(mali_core_session * session, mali_core_job *job, mali_core_job **job_return)
+{
+ mali_core_subsystem * subsystem;
+
+ job->magic_nr = JOB_MAGIC_NR;
+ MALI_CHECK_SESSION(session);
+
+ subsystem = session->subsystem;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsystem);
+
+ MALI_DEBUG_PRINT(5, ("Core: session_add_job: for %s\n", subsystem->name )) ;
+
+ /* Setting the default value; No job to return */
+ MALI_DEBUG_ASSERT_POINTER(job_return);
+ *job_return = NULL;
+
+ if (mali_job_queue_empty(session))
+ {
+ /* Add session to the wait list only if it didn't already have a job waiting. */
+ _mali_osk_list_addtail( &(session->awaiting_sessions_list), &(subsystem->awaiting_sessions_head[job->priority]));
+ }
+
+
+ if (_MALI_OSK_ERR_OK != mali_job_queue_add_job(session, job))
+ {
+ if (mali_job_queue_empty(session))
+ {
+ _mali_osk_list_delinit(&(session->awaiting_sessions_list));
+ }
+ MALI_DEBUG_PRINT(4, ("Core: session_add_job: %s queue is full\n", subsystem->name));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Continue to add the new job as the next job from this session */
+ MALI_DEBUG_PRINT(6, ("Core: session_add_job job=0x%x\n", job));
+
+ subsystem->awaiting_sessions_sum_all_priorities++;
+
+ mali_core_subsystem_schedule(subsystem);
+
+ MALI_DEBUG_PRINT(6, ("Core: session_add_job: for %s FINISHED\n", session->subsystem->name )) ;
+
+ MALI_SUCCESS;
+}
+
+static void mali_core_job_set_run_time(mali_core_job * job, u64 end_time)
+{
+ u32 time_used_nano_seconds;
+
+ time_used_nano_seconds = end_time - job->start_time;
+ job->render_time_usecs = time_used_nano_seconds / 1000;
+}
+
+static void mali_core_renderunit_detach_job_from_core(mali_core_renderunit* core, mali_subsystem_reschedule_option reschedule, mali_subsystem_job_end_code end_status)
+{
+ mali_core_job * job;
+ mali_core_subsystem * subsystem;
+ mali_bool already_in_detach_function;
+ u64 time_now;
+
+ MALI_DEBUG_ASSERT(CORE_IDLE != core->state);
+ time_now = _mali_osk_time_get_ns();
+ job = core->current_job;
+ subsystem = core->subsystem;
+
+ /* The reset_core() called some lines below might call this detach
+ * funtion again. To protect the core object from being modified by
+ * recursive calls, the in_detach_function would track if it is an recursive call
+ */
+ already_in_detach_function = core->in_detach_function;
+
+
+ if ( MALI_FALSE == already_in_detach_function )
+ {
+ core->in_detach_function = MALI_TRUE;
+ if ( NULL != job )
+ {
+ mali_core_job_set_run_time(job, time_now);
+ core->current_job = NULL;
+ }
+ }
+
+ if (JOB_STATUS_END_SEG_FAULT == end_status)
+ {
+ subsystem->reset_core( core, MALI_CORE_RESET_STYLE_HARD );
+ }
+ else
+ {
+ subsystem->reset_core( core, MALI_CORE_RESET_STYLE_RUNABLE );
+ }
+
+ if ( MALI_FALSE == already_in_detach_function )
+ {
+ if ( CORE_IDLE != core->state )
+ {
+ #if MALI_GPU_UTILIZATION
+ mali_utilization_core_end(time_now);
+ #endif
+ mali_core_subsystem_move_core_set_idle(core);
+ }
+
+ core->in_detach_function = MALI_FALSE;
+
+ if ( SUBSYSTEM_RESCHEDULE == reschedule )
+ {
+ mali_core_subsystem_schedule(subsystem);
+ }
+ if ( NULL != job )
+ {
+ core->subsystem->return_job_to_user(job, end_status);
+ }
+ }
+}
+
+#if USING_MMU
+/* This function intentionally does not release the semaphore. You must run
+ stop_bus_for_all_cores(), reset_all_cores_on_mmu() and continue_job_handling()
+ after calling this function, and then call unlock_subsystem() to release the
+ semaphore. */
+
+static void lock_subsystem(struct mali_core_subsystem * subsys)
+{
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex.
+
+ This function only stops cores behind the given MMU, unless "mmu" is NULL, in
+ which case all cores are stopped.
+*/
+static void stop_bus_for_all_cores_on_mmu(struct mali_core_subsystem * subsys, void* mmu)
+{
+ u32 i;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_DEBUG_PRINT(2,("Handling: bus stop %s\n", subsys->name ));
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+ /* We stop only cores behind the given MMU, unless MMU is NULL */
+ if ( (NULL!=mmu) && (core->mmu != mmu) ) continue;
+
+ if ( CORE_IDLE != core->state )
+ {
+ MALI_DEBUG_PRINT(4, ("Stopping bus on core %s\n", core->description));
+ mali_core_renderunit_stop_bus(core);
+ core->error_recovery = MALI_TRUE;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4,("Core: not active %s\n", core->description ));
+ }
+ }
+ /* Mutex is still being held, to prevent things to happen while we do cleanup */
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex.
+
+ This function only resets cores behind the given MMU, unless "mmu" is NULL, in
+ which case all cores are reset.
+*/
+static void reset_all_cores_on_mmu(struct mali_core_subsystem * subsys, void* mmu)
+{
+ u32 i;
+
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_DEBUG_PRINT(3, ("Handling: reset cores from mmu: 0x%x on %s\n", mmu, subsys->name ));
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+
+ /* We reset only cores behind the given MMU, unless MMU is NULL */
+ if ( (NULL!=mmu) && (core->mmu != mmu) ) continue;
+
+ if ( CORE_IDLE != core->state )
+ {
+ MALI_DEBUG_PRINT(4, ("Abort and reset core: %s\n", core->description ));
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_WAIT, JOB_STATUS_END_SEG_FAULT);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Core: not active %s\n", core->description ));
+ }
+ }
+ MALI_DEBUG_PRINT(4, ("Handling: done %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* You must run lock_subsystem() before entering this function, to ensure that
+ the subsystem mutex is held.
+ Later, unlock_subsystem() can be called to release the mutex. */
+static void continue_job_handling(struct mali_core_subsystem * subsys)
+{
+ u32 i, j;
+
+ MALI_DEBUG_PRINT(3, ("Handling: Continue: %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+
+
+ for(i=0 ; i < subsys->number_of_cores ; ++i)
+ {
+ mali_core_renderunit * core;
+ core = mali_core_renderunit_get_mali_core_nr(subsys,i);
+ core->error_recovery = MALI_FALSE;
+ }
+
+ i = subsys->number_of_cores;
+ j = subsys->awaiting_sessions_sum_all_priorities;
+
+ /* Schedule MIN(nr_waiting_jobs , number of cores) times */
+ while( i-- && j--)
+ {
+ mali_core_subsystem_schedule(subsys);
+ }
+ MALI_DEBUG_PRINT(4, ("Handling: done %s\n", subsys->name ));
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+}
+
+/* Unlock the subsystem. */
+static void unlock_subsystem(struct mali_core_subsystem * subsys)
+{
+ MALI_ASSERT_MUTEX_IS_GRABBED(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+}
+
+void mali_core_subsystem_broadcast_notification(struct mali_core_subsystem * subsys, mali_core_notification_message message, u32 data)
+{
+ void * mmu;
+ mmu = (void*) data;
+
+ switch(message)
+ {
+ case MMU_KILL_STEP0_LOCK_SUBSYSTEM:
+ break;
+ case MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES:
+ stop_bus_for_all_cores_on_mmu(subsys, mmu);
+ break;
+ case MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS:
+ reset_all_cores_on_mmu(subsys, mmu );
+ break;
+ case MMU_KILL_STEP3_CONTINUE_JOB_HANDLING:
+ continue_job_handling(subsys);
+ break;
+ case MMU_KILL_STEP4_UNLOCK_SUBSYSTEM:
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Illegal message: 0x%x, data: 0x%x\n", (u32)message, data));
+ break;
+ }
+}
+#endif /* USING_MMU */
+
+void job_watchdog_set(mali_core_job * job, u32 watchdog_msecs)
+{
+ if (watchdog_msecs == 0) job->watchdog_msecs = mali_max_job_runtime; /* use the default */
+ else if (watchdog_msecs > WATCHDOG_MSECS_MAX) job->watchdog_msecs = WATCHDOG_MSECS_MAX; /* no larger than max */
+ else if (watchdog_msecs < WATCHDOG_MSECS_MIN) job->watchdog_msecs = WATCHDOG_MSECS_MIN; /* not below min */
+ else job->watchdog_msecs = watchdog_msecs;
+}
+
+u32 mali_core_hang_check_timeout_get(void)
+{
+ /* check the value. The user might have set the value outside the allowed range */
+ if (mali_hang_check_interval > HANG_CHECK_MSECS_MAX) mali_hang_check_interval = HANG_CHECK_MSECS_MAX; /* cap to max */
+ else if (mali_hang_check_interval < HANG_CHECK_MSECS_MIN) mali_hang_check_interval = HANG_CHECK_MSECS_MIN; /* cap to min */
+
+ /* return the active value */
+ return mali_hang_check_interval;
+}
+
+static _mali_osk_errcode_t mali_core_irq_handler_upper_half (void * data)
+{
+ mali_core_renderunit *core;
+ u32 has_pending_irq;
+
+ core = (mali_core_renderunit * )data;
+
+ if(core && (CORE_OFF == core->state))
+ {
+ MALI_SUCCESS;
+ }
+
+ if ( (NULL == core) ||
+ (NULL == core->subsystem) ||
+ (NULL == core->subsystem->irq_handler_upper_half) )
+ {
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ MALI_CHECK_CORE(core);
+ MALI_CHECK_SUBSYSTEM(core->subsystem);
+
+ has_pending_irq = core->subsystem->irq_handler_upper_half(core);
+
+ if ( has_pending_irq )
+ {
+ _mali_osk_irq_schedulework( core->irq ) ;
+ MALI_SUCCESS;
+ }
+
+ if (mali_benchmark) MALI_SUCCESS;
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+static void mali_core_irq_handler_bottom_half ( void *data )
+{
+ mali_core_renderunit *core;
+ mali_core_subsystem* subsystem;
+
+ mali_subsystem_job_end_code job_status;
+
+ core = (mali_core_renderunit * )data;
+
+ MALI_CHECK_CORE(core);
+ subsystem = core->subsystem;
+ MALI_CHECK_SUBSYSTEM(subsystem);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+ if ( CORE_IDLE == core->state || CORE_OFF == core->state ) goto end_function;
+
+ MALI_DEBUG_PRINT(5, ("IRQ: handling irq from core %s\n", core->description )) ;
+
+ _mali_osk_cache_flushall();
+
+ /* This function must also update the job status flag */
+ job_status = subsystem->irq_handler_bottom_half( core );
+
+ /* Retval is nonzero if the job is finished. */
+ if ( JOB_STATUS_CONTINUE_RUN != job_status )
+ {
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, job_status);
+ }
+ else
+ {
+ switch ( core->state )
+ {
+ case CORE_WATCHDOG_TIMEOUT:
+ MALI_DEBUG_PRINT(2, ("Watchdog SW Timeout of job from core: %s\n", core->description ));
+ mali_core_renderunit_detach_job_from_core(core, SUBSYSTEM_RESCHEDULE, JOB_STATUS_END_TIMEOUT_SW );
+ break;
+
+ case CORE_POLL:
+ MALI_DEBUG_PRINT(5, ("Poll core: %s\n", core->description )) ;
+ core->state = CORE_WORKING;
+ _mali_osk_timer_add( core->timer, 1);
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(4, ("IRQ: The job on the core continue to run: %s\n", core->description )) ;
+ break;
+ }
+ }
+end_function:
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsystem);
+}
+
+void subsystem_flush_mapped_mem_cache(void)
+{
+ _mali_osk_cache_flushall();
+ _mali_osk_mem_barrier();
+}
+
+#if USING_MALI_PMM
+
+_mali_osk_errcode_t mali_core_subsystem_signal_power_down(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool immediate_only)
+{
+ mali_core_renderunit * core = NULL;
+
+ MALI_CHECK_SUBSYSTEM(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ /* It is possible that this signal funciton can be called during a driver exit,
+ * and so the requested core may now be destroyed. (This is due to us not having
+ * the subsys lock before signalling power down).
+ * mali_core_renderunit_get_mali_core_nr() will report a Mali ERR because
+ * the core number is out of range (which is a valid error in other cases).
+ * So instead we check here (now that we have the subsys lock) and let the
+ * caller cope with the core get failure and check that the core has
+ * been unregistered in the PMM as part of its destruction.
+ */
+ if ( subsys->number_of_cores > mali_core_nr )
+ {
+ core = mali_core_renderunit_get_mali_core_nr(subsys, mali_core_nr);
+ }
+
+ if ( NULL == core )
+ {
+ /* Couldn't find the core */
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 1, ("Core: Failed to find core to power down\n") );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ else if ( core->state != CORE_IDLE )
+ {
+ /* When powering down we either set a pending power down flag here so we
+ * can power down cleanly after the job completes or we don't set the
+ * flag if we have been asked to only do a power down right now
+ * In either case, return that the core is busy
+ */
+ if ( !immediate_only ) core->pend_power_down = MALI_TRUE;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 5, ("Core: No idle core to power down\n") );
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* Shouldn't have a pending power down flag set */
+ MALI_DEBUG_ASSERT( !core->pend_power_down );
+
+ /* Move core to off queue */
+ mali_core_subsystem_move_core_set_off(core);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_core_subsystem_signal_power_up(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool queue_only)
+{
+ mali_core_renderunit * core;
+
+ MALI_CHECK_SUBSYSTEM(subsys);
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys);
+
+ core = mali_core_renderunit_get_mali_core_nr(subsys, mali_core_nr);
+
+ if( core == NULL )
+ {
+ /* Couldn't find the core */
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 1, ("Core: Failed to find core to power up\n") );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+ else if( core->state != CORE_OFF )
+ {
+ /* This will usually happen because we are trying to cancel a pending power down */
+ core->pend_power_down = MALI_FALSE;
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+ MALI_DEBUG_PRINT( 1, ("Core: No powered off core to power up (cancelled power down?)\n") );
+ MALI_ERROR(_MALI_OSK_ERR_BUSY);
+ }
+
+ /* Shouldn't have a pending power down set */
+ MALI_DEBUG_ASSERT( !core->pend_power_down );
+
+ /* Move core to idle queue */
+ mali_core_subsystem_move_core_set_idle(core);
+
+ if( !queue_only )
+ {
+ /* Reset MMU & core - core must be idle to allow this */
+#if USING_MMU
+ if ( NULL!=core->mmu )
+ {
+#if defined(USING_MALI200)
+ if (core->pmm_id != MALI_PMM_CORE_PP0)
+ {
+#endif
+ mali_kernel_mmu_reset(core->mmu);
+#if defined(USING_MALI200)
+ }
+#endif
+
+ }
+#endif /* USING_MMU */
+ subsys->reset_core( core, MALI_CORE_RESET_STYLE_RUNABLE );
+ }
+
+ /* Need to schedule work to start on this core */
+ mali_core_subsystem_schedule(subsys);
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys);
+
+ MALI_SUCCESS;
+}
+
+#endif /* USING_MALI_PMM */
+
+#if MALI_STATE_TRACKING
+u32 mali_core_renderunit_dump_state(mali_core_subsystem* subsystem, char *buf, u32 size)
+{
+ u32 i, len = 0;
+ mali_core_renderunit *core;
+ mali_core_renderunit *tmp_core;
+
+ mali_core_session* session;
+ mali_core_session* tmp_session;
+
+ if (0 >= size)
+ {
+ return 0;
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_GRAB( subsystem );
+
+ len += _mali_osk_snprintf(buf + len, size - len, "Subsystem:\n");
+ len += _mali_osk_snprintf(buf + len, size - len, " Name: %s\n", subsystem->name);
+
+ for (i = 0; i < subsystem->number_of_cores; i++)
+ {
+ len += _mali_osk_snprintf(buf + len, size - len, " Core: #%u\n",
+ subsystem->mali_core_array[i]->core_number);
+ len += _mali_osk_snprintf(buf + len, size - len, " Description: %s\n",
+ subsystem->mali_core_array[i]->description);
+ switch(subsystem->mali_core_array[i]->state)
+ {
+ case CORE_IDLE:
+ len += _mali_osk_snprintf(buf + len, size - len, " State: CORE_IDLE\n");
+ break;
+ case CORE_WORKING:
+ len += _mali_osk_snprintf(buf + len, size - len, " State: CORE_WORKING\n");
+ break;
+ case CORE_WATCHDOG_TIMEOUT:
+ len += _mali_osk_snprintf(buf + len, size - len, " State: CORE_WATCHDOG_TIMEOUT\n");
+ break;
+ case CORE_POLL:
+ len += _mali_osk_snprintf(buf + len, size - len, " State: CORE_POLL\n");
+ break;
+ case CORE_HANG_CHECK_TIMEOUT:
+ len += _mali_osk_snprintf(buf + len, size - len, " State: CORE_HANG_CHECK_TIMEOUT\n");
+ break;
+ case CORE_OFF:
+ len += _mali_osk_snprintf(buf + len, size - len, " State: CORE_OFF\n");
+ break;
+ default:
+ len += _mali_osk_snprintf(buf + len, size - len, " State: Unknown (0x%X)\n",
+ subsystem->mali_core_array[i]->state);
+ break;
+ }
+ len += _mali_osk_snprintf(buf + len, size - len, " Current job: 0x%X\n",
+ (u32)(subsystem->mali_core_array[i]->current_job));
+ if (subsystem->mali_core_array[i]->current_job)
+ {
+ u64 time_used_nano_seconds;
+ u32 time_used_micro_seconds;
+ u64 time_now = _mali_osk_time_get_ns();
+
+ time_used_nano_seconds = time_now - subsystem->mali_core_array[i]->current_job->start_time;
+ time_used_micro_seconds = ((u32)(time_used_nano_seconds)) / 1000;
+
+ len += _mali_osk_snprintf(buf + len, size - len, " Current job session: 0x%X\n",
+ subsystem->mali_core_array[i]->current_job->session);
+ len += _mali_osk_snprintf(buf + len, size - len, " Current job number: %d\n",
+ subsystem->mali_core_array[i]->current_job->job_nr);
+ len += _mali_osk_snprintf(buf + len, size - len, " Current job render_time micro seconds: %d\n",
+ time_used_micro_seconds );
+ len += _mali_osk_snprintf(buf + len, size - len, " Current job start time micro seconds: %d\n",
+ (u32) (subsystem->mali_core_array[i]->current_job->start_time >>10) );
+ }
+ len += _mali_osk_snprintf(buf + len, size - len, " Core version: 0x%X\n",
+ subsystem->mali_core_array[i]->core_version);
+#if USING_MALI_PMM
+ len += _mali_osk_snprintf(buf + len, size - len, " PMM id: 0x%X\n",
+ subsystem->mali_core_array[i]->pmm_id);
+ len += _mali_osk_snprintf(buf + len, size - len, " Power down requested: %s\n",
+ subsystem->mali_core_array[i]->pend_power_down ? "TRUE" : "FALSE");
+#endif
+ }
+
+ len += _mali_osk_snprintf(buf + len, size - len, " Cores on idle list:\n");
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp_core, &subsystem->renderunit_idle_head, mali_core_renderunit, list)
+ {
+ len += _mali_osk_snprintf(buf + len, size - len, " Core #%u\n", core->core_number);
+ }
+
+ len += _mali_osk_snprintf(buf + len, size - len, " Cores on off list:\n");
+ _MALI_OSK_LIST_FOREACHENTRY(core, tmp_core, &subsystem->renderunit_off_head, mali_core_renderunit, list)
+ {
+ len += _mali_osk_snprintf(buf + len, size - len, " Core #%u\n", core->core_number);
+ }
+
+ len += _mali_osk_snprintf(buf + len, size - len, " Connected sessions:\n");
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp_session, &subsystem->all_sessions_head, mali_core_session, all_sessions_list)
+ {
+ len += _mali_osk_snprintf(buf + len, size - len,
+ " Session 0x%X:\n", (u32)session);
+ len += _mali_osk_snprintf(buf + len, size - len,
+ " Queue depth: %u\n", mali_job_queue_size(session));
+ len += _mali_osk_snprintf(buf + len, size - len,
+ " First waiting job: 0x%p\n", session->queue[session->queue_head]);
+ len += _mali_osk_snprintf(buf + len, size - len, " Notification queue: %s\n",
+ _mali_osk_notification_queue_is_empty(session->notification_queue) ? "EMPTY" : "NON-EMPTY");
+ len += _mali_osk_snprintf(buf + len, size - len,
+ " Jobs received:%4d\n", _mali_osk_atomic_read(&session->jobs_received));
+ len += _mali_osk_snprintf(buf + len, size - len,
+ " Jobs started :%4d\n", _mali_osk_atomic_read(&session->jobs_started));
+ len += _mali_osk_snprintf(buf + len, size - len,
+ " Jobs ended :%4d\n", _mali_osk_atomic_read(&session->jobs_ended));
+ len += _mali_osk_snprintf(buf + len, size - len,
+ " Jobs returned:%4d\n", _mali_osk_atomic_read(&session->jobs_returned));
+ len += _mali_osk_snprintf(buf + len, size - len, " PID: %d\n", session->pid);
+ }
+
+ len += _mali_osk_snprintf(buf + len, size - len, " Waiting sessions sum all priorities: %u\n",
+ subsystem->awaiting_sessions_sum_all_priorities);
+ for (i = 0; i < PRIORITY_LEVELS; i++)
+ {
+ len += _mali_osk_snprintf(buf + len, size - len, " Waiting sessions with priority %u:\n", i);
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp_session, &subsystem->awaiting_sessions_head[i],
+ mali_core_session, awaiting_sessions_list)
+ {
+ len += _mali_osk_snprintf(buf + len, size - len, " Session 0x%X:\n", (u32)session);
+ len += _mali_osk_snprintf(buf + len, size - len, " Waiting job: 0x%X\n",
+ (u32)session->queue[session->queue_head]);
+ len += _mali_osk_snprintf(buf + len, size - len, " Notification queue: %s\n",
+ _mali_osk_notification_queue_is_empty(session->notification_queue) ? "EMPTY" : "NON-EMPTY");
+ }
+ }
+
+ MALI_CORE_SUBSYSTEM_MUTEX_RELEASE( subsystem );
+ return len;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_rendercore.h b/drivers/media/video/samsung/mali/common/mali_kernel_rendercore.h
new file mode 100644
index 0000000..5fbe686
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_rendercore.h
@@ -0,0 +1,565 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_RENDERCORE_H__
+#define __MALI_RENDERCORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#define PRIORITY_LEVELS 3
+#define PRIORITY_MAX 0
+#define PRIORITY_MIN (PRIORITY_MAX+PRIORITY_LEVELS-1)
+
+/* This file contains what we need in kernel for all core types. */
+
+typedef enum
+{
+ CORE_IDLE, /**< Core is ready for a new job */
+ CORE_WORKING, /**< Core is working on a job */
+ CORE_WATCHDOG_TIMEOUT, /**< Core is working but it has timed out */
+ CORE_POLL, /**< Poll timer triggered, pending handling */
+ CORE_HANG_CHECK_TIMEOUT,/**< Timeout for hang detection */
+ CORE_OFF /**< Core is powered off */
+} mali_core_status;
+
+typedef enum
+{
+ SUBSYSTEM_RESCHEDULE,
+ SUBSYSTEM_WAIT
+} mali_subsystem_reschedule_option;
+
+typedef enum
+{
+ MALI_CORE_RESET_STYLE_RUNABLE,
+ MALI_CORE_RESET_STYLE_DISABLE,
+ MALI_CORE_RESET_STYLE_HARD
+} mali_core_reset_style;
+
+typedef enum
+{
+ JOB_STATUS_CONTINUE_RUN = 0x01,
+ JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ JOB_STATUS_END_OOM = 1<<(16+1),
+ JOB_STATUS_END_ABORT = 1<<(16+2),
+ JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ JOB_STATUS_END_HANG = 1<<(16+4),
+ JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} mali_subsystem_job_end_code;
+
+
+struct mali_core_job;
+struct mali_core_subsystem;
+struct mali_core_renderunit;
+struct mali_core_session;
+
+/* We have one of these subsystems for each core type */
+typedef struct mali_core_subsystem
+{
+ struct mali_core_renderunit ** mali_core_array; /* An array of all cores of this type */
+ u32 number_of_cores; /* Number of cores in this list */
+
+ _mali_core_type core_type;
+
+ u32 magic_nr;
+
+ _mali_osk_list_t renderunit_idle_head; /* Idle cores of this type */
+ _mali_osk_list_t renderunit_off_head; /* Powered off cores of this type */
+
+ /* Linked list for each priority of sessions with a job ready for scheduelling */
+ _mali_osk_list_t awaiting_sessions_head[PRIORITY_LEVELS];
+ u32 awaiting_sessions_sum_all_priorities;
+
+ /* Linked list of all sessions connected to this coretype */
+ _mali_osk_list_t all_sessions_head;
+
+ /* Linked list of all sessions connected to this coretype */
+ struct _mali_osk_notification_queue_t * notification_queue;
+
+ const char * name;
+ mali_kernel_subsystem_identifier id;
+
+ /**** Functions registered for this core type. Set during mali_core_init ******/
+ /* Start this job on this core. Return MALI_TRUE if the job was started. */
+ _mali_osk_errcode_t (*start_job)(struct mali_core_job * job, struct mali_core_renderunit * core);
+
+ /* Check if given core has an interrupt pending. Return MALI_TRUE and set mask to 0 if pending */
+ u32 (*irq_handler_upper_half)(struct mali_core_renderunit * core);
+
+ /* This function should check if the interrupt indicates that job was finished.
+ If so it should update the job-struct, reset the core registers, and return MALI_TRUE, .
+ If the job is still working after this function it should return MALI_FALSE.
+ The function must also enable the bits in the interrupt mask for the core.
+ Called by the bottom half interrupt function. */
+ int (*irq_handler_bottom_half)(struct mali_core_renderunit* core);
+
+ /* This function is called from the ioctl function and should return a mali_core_job pointer
+ to a created mali_core_job object with the data given from userspace */
+ _mali_osk_errcode_t (*get_new_job_from_user)(struct mali_core_session * session, void * argument);
+
+ _mali_osk_errcode_t (*suspend_response)(struct mali_core_session * session, void * argument);
+
+ /* This function is called from the ioctl function and should write the necessary data
+ to userspace telling which job was finished and the status and debuginfo for this job.
+ The function must also free and cleanup the input job object. */
+ void (*return_job_to_user)(struct mali_core_job * job, mali_subsystem_job_end_code end_status);
+
+ /* Is called when a subsystem shuts down. This function needs to
+ release internal pointers in the core struct, and free the
+ core struct before returning.
+ It is not allowed to write to any registers, since this
+ unmapping is already done. */
+ void (*renderunit_delete)(struct mali_core_renderunit * core);
+
+ /* Is called when we want to abort a job that is running on the core.
+ This is done if program exits while core is running */
+ void (*reset_core)(struct mali_core_renderunit * core, mali_core_reset_style style);
+
+ /* Is called when the rendercore wants the core to give an interrupt */
+ void (*probe_core_irq_trigger)(struct mali_core_renderunit* core);
+
+ /* Is called when the irq probe wants the core to acknowledge an interrupt from the hw */
+ _mali_osk_errcode_t (*probe_core_irq_acknowledge)(struct mali_core_renderunit* core);
+
+ /* Called when the rendercore want to issue a bus stop request to a core */
+ void (*stop_bus)(struct mali_core_renderunit* core);
+} mali_core_subsystem;
+
+
+/* Per core data. This must be embedded into each core type internal core info. */
+typedef struct mali_core_renderunit
+{
+ struct mali_core_subsystem * subsystem; /* The core belongs to this subsystem */
+ _mali_osk_list_t list; /* Is always in subsystem->idle_list OR session->renderunits_working */
+ mali_core_status state;
+ mali_bool error_recovery; /* Indicates if the core is waiting for external help to recover (typically the MMU) */
+ mali_bool in_detach_function;
+ struct mali_core_job * current_job; /* Current job being processed on this core ||NULL */
+ u32 magic_nr;
+ _mali_osk_timer_t * timer;
+ _mali_osk_timer_t * timer_hang_detection;
+
+ mali_io_address registers_mapped; /* IO-mapped pointer to registers */
+ u32 registers_base_addr; /* Base addres of the registers */
+ u32 size; /* The size of registers_mapped */
+ const char * description; /* Description of this core. */
+ u32 irq_nr; /* The IRQ nr for this core */
+ u32 core_version;
+#if USING_MMU
+ u32 mmu_id;
+ void * mmu; /* The MMU this rendercore is behind.*/
+#endif
+#if USING_MALI_PMM
+ mali_pmm_core_id pmm_id; /* The PMM core id */
+ mali_bool pend_power_down; /* Power down is requested */
+#endif
+
+ u32 core_number; /* 0 for first detected core of this type, 1 for second and so on */
+
+ _mali_osk_irq_t *irq;
+} mali_core_renderunit;
+
+
+#define MALI_JOB_QUEUE_SIZE 8
+/* Per open FILE data. */
+/* You must held subsystem->mutex before any transactions to this datatype. */
+typedef struct mali_core_session
+{
+ struct mali_core_subsystem * subsystem; /* The session belongs to this subsystem */
+ _mali_osk_list_t renderunits_working_head; /* List of renderunits working for this session */
+ struct mali_core_job *queue[MALI_JOB_QUEUE_SIZE]; /* The next job from this session to run */
+ int queue_head;
+ int queue_tail;
+ int queue_size;
+
+ _mali_osk_list_t awaiting_sessions_list; /* Linked list of sessions with jobs, for each priority */
+ _mali_osk_list_t all_sessions_list; /* Linked list of all sessions on the system. */
+
+ _mali_osk_notification_queue_t * notification_queue; /* Messages back to Base in userspace*/
+#if USING_MMU
+ struct mali_session_data * mmu_session; /* The session associated with the MMU page tables for this core */
+#endif
+ u32 magic_nr;
+#if MALI_STATE_TRACKING
+ _mali_osk_atomic_t jobs_received;
+ _mali_osk_atomic_t jobs_started;
+ _mali_osk_atomic_t jobs_ended;
+ _mali_osk_atomic_t jobs_returned;
+ u32 pid;
+#endif
+} mali_core_session;
+
+/* This must be embedded into a specific mali_core_job struct */
+/* use this macro to get spesific mali_core_job: container_of(ptr, type, member)*/
+typedef struct mali_core_job
+{
+ _mali_osk_list_t list; /* Linked list of jobs. Used by struct mali_core_session */
+ struct mali_core_session *session;
+ u32 magic_nr;
+ u32 priority;
+ u32 watchdog_msecs;
+ u32 render_time_usecs ;
+ u64 start_time;
+ unsigned long watchdog_jiffies;
+ u32 abort_id;
+ u32 job_nr;
+ _mali_uk_start_job_flags flags;
+} mali_core_job;
+
+MALI_STATIC_INLINE mali_bool mali_job_queue_empty(mali_core_session *session)
+{
+ if (0 == session->queue_size)
+ {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_job_queue_full(mali_core_session *session)
+{
+ if (MALI_JOB_QUEUE_SIZE == session->queue_size)
+ {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_job_queue_add_job(mali_core_session *session, struct mali_core_job *job)
+{
+ if (mali_job_queue_full(session))
+ {
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ session->queue[session->queue_tail] = job;
+ session->queue_tail = (session->queue_tail + 1) % MALI_JOB_QUEUE_SIZE;
+ session->queue_size++;
+
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE struct mali_core_job *mali_job_queue_get_job(mali_core_session *session)
+{
+ struct mali_core_job *job;
+ MALI_DEBUG_ASSERT(!mali_job_queue_empty(session));
+
+ job = session->queue[session->queue_head];
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session->queue[session->queue_head] = NULL;
+ session->queue_head = (session->queue_head + 1) % MALI_JOB_QUEUE_SIZE;
+ session->queue_size--;
+
+ return job;
+}
+
+MALI_STATIC_INLINE u32 mali_job_queue_size(mali_core_session *session)
+{
+ return (u32)(session->queue_size);
+}
+
+MALI_STATIC_INLINE struct mali_core_job *mali_job_queue_abort_job(mali_core_session *session, u32 abort_id)
+{
+ int i;
+ int n;
+ struct mali_core_job *job = NULL;
+
+ for (i = session->queue_head, n = session->queue_size; n > 0; n--, i = (i+1)%MALI_JOB_QUEUE_SIZE)
+ {
+ if (session->queue[i]->abort_id == abort_id)
+ {
+ /* Remove job from queue */
+ job = session->queue[i];
+ session->queue[i] = NULL;
+
+ session->queue_size -= 1;
+ n--;
+ break;
+ }
+ }
+ if (NULL == job)
+ {
+ return NULL;
+ }
+
+ /* Rearrange queue */
+ while (n > 0)
+ {
+ int next = (i + 1) % MALI_JOB_QUEUE_SIZE;
+ session->queue[i] = session->queue[next];
+ i = next;
+ n--;
+ }
+ session->queue_tail = i;
+
+ return job;
+}
+
+
+/*
+ * The rendercode subsystem is included in the subsystems[] array.
+ */
+extern struct mali_kernel_subsystem mali_subsystem_rendercore;
+
+void subsystem_flush_mapped_mem_cache(void);
+
+
+#define SUBSYSTEM_MAGIC_NR 0xdeadbeef
+#define CORE_MAGIC_NR 0xcafebabe
+#define SESSION_MAGIC_NR 0xbabe1234
+#define JOB_MAGIC_NR 0x0123abcd
+
+
+#define MALI_CHECK_SUBSYSTEM(subsystem)\
+ do { \
+ if ( SUBSYSTEM_MAGIC_NR != subsystem->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+ } while (0)
+
+#define MALI_CHECK_CORE(CORE)\
+ do { \
+ if ( CORE_MAGIC_NR != CORE->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+#define MALI_CHECK_SESSION(SESSION)\
+ do { \
+ if ( SESSION_MAGIC_NR != SESSION->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+#define MALI_CHECK_JOB(JOB)\
+ do { \
+ if ( JOB_MAGIC_NR != JOB->magic_nr) MALI_PRINT_ERROR(("Wrong magic number"));\
+} while (0)
+
+
+/* Check if job_a has higher priority than job_b */
+MALI_STATIC_INLINE int job_has_higher_priority(mali_core_job * job_a, mali_core_job * job_b)
+{
+ /* The lowest number has the highest priority */
+ return (int) (job_a->priority < job_b->priority);
+}
+
+MALI_STATIC_INLINE void job_priority_set(mali_core_job * job, u32 priority)
+{
+ if (priority > PRIORITY_MIN) job->priority = PRIORITY_MIN;
+ else job->priority = priority;
+}
+
+void job_watchdog_set(mali_core_job * job, u32 watchdog_msecs);
+
+/* For use by const default register settings (e.g. set these after reset) */
+typedef struct register_address_and_value
+{
+ u32 address;
+ u32 value;
+} register_address_and_value ;
+
+
+/* For use by dynamic default register settings (e.g. set these after reset) */
+typedef struct register_address_and_value_list
+{
+ _mali_osk_list_t list;
+ register_address_and_value item;
+} register_address_and_value_list ;
+
+/* Used if the user wants to set a continious block of registers */
+typedef struct register_array_user
+{
+ u32 entries_in_array;
+ u32 start_address;
+ void __user * reg_array;
+}register_array_user;
+
+
+#define MALI_CORE_SUBSYSTEM_MUTEX_GRAB(subsys) \
+ do { \
+ MALI_DEBUG_PRINT(5, ("MUTEX: GRAB %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ _mali_osk_lock_wait( rendercores_global_mutex, _MALI_OSK_LOCKMODE_RW); \
+ MALI_DEBUG_PRINT(5, ("MUTEX: GRABBED %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ rendercores_global_mutex_is_held = 1; \
+ rendercores_global_mutex_owner = _mali_osk_get_tid(); \
+ } while (0) ;
+
+#define MALI_CORE_SUBSYSTEM_MUTEX_RELEASE(subsys) \
+ do { \
+ MALI_DEBUG_PRINT(5, ("MUTEX: RELEASE %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ rendercores_global_mutex_is_held = 0; \
+ rendercores_global_mutex_owner = 0; \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ _mali_osk_lock_signal( rendercores_global_mutex, _MALI_OSK_LOCKMODE_RW); \
+ MALI_DEBUG_PRINT(5, ("MUTEX: RELEASED %s() %d on %s\n",__FUNCTION__, __LINE__, subsys->name)); \
+ if ( SUBSYSTEM_MAGIC_NR != subsys->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ } while (0) ;
+
+
+#define MALI_ASSERT_MUTEX_IS_GRABBED(input_pointer)\
+ do { \
+ if ( 0 == rendercores_global_mutex_is_held ) MALI_PRINT_ERROR(("ASSERT MUTEX SHOULD BE GRABBED"));\
+ if ( SUBSYSTEM_MAGIC_NR != input_pointer->magic_nr ) MALI_PRINT_ERROR(("Wrong magic number"));\
+ if ( rendercores_global_mutex_owner != _mali_osk_get_tid() ) MALI_PRINT_ERROR(("Owner mismatch"));\
+ } while (0)
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_core_renderunit_register_rw_check(mali_core_renderunit *core,
+ u32 relative_address)
+{
+#if USING_MALI_PMM
+ if( core->state == CORE_OFF )
+ {
+ MALI_PRINT_ERROR(("Core is OFF during access: Core: %s Addr: 0x%04X\n",
+ core->description,relative_address));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+#endif
+
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+
+ if (mali_benchmark) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ MALI_DEBUG_CODE(if (relative_address >= core->size)
+ {
+ MALI_PRINT_ERROR(("Trying to access illegal register: 0x%04x in core: %s",
+ relative_address, core->description));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ })
+
+ MALI_SUCCESS;
+}
+
+
+MALI_STATIC_INLINE u32 mali_core_renderunit_register_read(struct mali_core_renderunit *core, u32 relative_address)
+{
+ u32 read_val;
+
+ if(_MALI_OSK_ERR_FAULT == mali_core_renderunit_register_rw_check(core, relative_address))
+ return 0xDEADBEEF;
+
+ read_val = _mali_osk_mem_ioread32(core->registers_mapped, relative_address);
+
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_read: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, read_val));
+
+ return read_val;
+}
+
+MALI_STATIC_INLINE void mali_core_renderunit_register_read_array(struct mali_core_renderunit *core,
+ u32 relative_address,
+ u32 * result_array,
+ u32 nr_of_regs)
+{
+ /* NOTE Do not use burst reads against the registers */
+ u32 i;
+
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_read_array: Core:%s Addr:0x%04X Nr_regs: %u\n",
+ core->description,relative_address, nr_of_regs));
+
+ for(i=0; i<nr_of_regs; ++i)
+ {
+ result_array[i] = mali_core_renderunit_register_read(core, relative_address + i*4);
+ }
+}
+
+/*
+ * Write to a core register, and bypass implied memory barriers.
+ *
+ * On some systems, _mali_osk_mem_iowrite32() implies a memory barrier. This
+ * can be a performance problem when doing many writes in sequence.
+ *
+ * When using this function, ensure proper barriers are put in palce. Most
+ * likely a _mali_osk_mem_barrier() is needed after all related writes are
+ * completed.
+ *
+ */
+MALI_STATIC_INLINE void mali_core_renderunit_register_write_relaxed(mali_core_renderunit *core,
+ u32 relative_address,
+ u32 new_val)
+{
+ if(_MALI_OSK_ERR_FAULT == mali_core_renderunit_register_rw_check(core, relative_address))
+ return;
+
+ MALI_DEBUG_PRINT(6, ("mali_core_renderunit_register_write_relaxed: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, new_val));
+
+ _mali_osk_mem_iowrite32_relaxed(core->registers_mapped, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_core_renderunit_register_write(struct mali_core_renderunit *core,
+ u32 relative_address,
+ u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("mali_core_renderunit_register_write: Core:%s Addr:0x%04X Val:0x%08x\n",
+ core->description,relative_address, new_val));
+
+ if(_MALI_OSK_ERR_FAULT == mali_core_renderunit_register_rw_check(core, relative_address))
+ return;
+
+ _mali_osk_mem_iowrite32(core->registers_mapped, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_core_renderunit_register_write_array(struct mali_core_renderunit *core,
+ u32 relative_address,
+ u32 * write_array,
+ u32 nr_of_regs)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("Core: renderunit_register_write_array: Core:%s Addr:0x%04X Nr_regs: %u\n",
+ core->description,relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for( i = 0; i< nr_of_regs; i++)
+ {
+ mali_core_renderunit_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+ }
+}
+
+_mali_osk_errcode_t mali_core_renderunit_init(struct mali_core_renderunit * core);
+void mali_core_renderunit_term(struct mali_core_renderunit * core);
+int mali_core_renderunit_map_registers(struct mali_core_renderunit *core);
+void mali_core_renderunit_unmap_registers(struct mali_core_renderunit *core);
+int mali_core_renderunit_irq_handler_add(struct mali_core_renderunit *core);
+mali_core_renderunit * mali_core_renderunit_get_mali_core_nr(mali_core_subsystem *subsys, u32 mali_core_nr);
+
+int mali_core_subsystem_init(struct mali_core_subsystem * new_subsys);
+#if USING_MMU
+void mali_core_subsystem_attach_mmu(mali_core_subsystem* subsys);
+#endif
+int mali_core_subsystem_register_renderunit(struct mali_core_subsystem * subsys, struct mali_core_renderunit * core);
+int mali_core_subsystem_system_info_fill(mali_core_subsystem* subsys, _mali_system_info* info);
+void mali_core_subsystem_cleanup(struct mali_core_subsystem * subsys);
+#if USING_MMU
+void mali_core_subsystem_broadcast_notification(struct mali_core_subsystem * subsys, mali_core_notification_message message, u32 data);
+#endif
+void mali_core_session_begin(mali_core_session *session);
+void mali_core_session_close(mali_core_session * session);
+int mali_core_session_add_job(mali_core_session * session, mali_core_job *job, mali_core_job **job_return);
+u32 mali_core_hang_check_timeout_get(void);
+
+_mali_osk_errcode_t mali_core_subsystem_ioctl_start_job(mali_core_session * session, void *job_data);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_number_of_cores_get(mali_core_session * session, u32 *number_of_cores);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_core_version_get(mali_core_session * session, _mali_core_version *version);
+_mali_osk_errcode_t mali_core_subsystem_ioctl_suspend_response(mali_core_session * session, void* argument);
+void mali_core_subsystem_ioctl_abort_job(mali_core_session * session, u32 id);
+
+#if USING_MALI_PMM
+_mali_osk_errcode_t mali_core_subsystem_signal_power_down(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool immediate_only);
+_mali_osk_errcode_t mali_core_subsystem_signal_power_up(mali_core_subsystem *subsys, u32 mali_core_nr, mali_bool queue_only);
+#endif
+
+#if MALI_STATE_TRACKING
+u32 mali_core_renderunit_dump_state(mali_core_subsystem* subsystem, char *buf, u32 size);
+#endif
+
+#endif /* __MALI_RENDERCORE_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_session_manager.h b/drivers/media/video/samsung/mali/common/mali_kernel_session_manager.h
new file mode 100644
index 0000000..8cc41d7
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_session_manager.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SESSION_MANAGER_H__
+#define __MALI_KERNEL_SESSION_MANAGER_H__
+
+/* Incomplete struct to pass around pointers to it */
+struct mali_session_data;
+
+void * mali_kernel_session_manager_slot_get(struct mali_session_data * session, int id);
+
+#endif /* __MALI_KERNEL_SESSION_MANAGER_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_subsystem.h b/drivers/media/video/samsung/mali/common/mali_kernel_subsystem.h
new file mode 100644
index 0000000..8f05216
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_subsystem.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_subsystem.h
+ */
+
+#ifndef __MALI_KERNEL_SUBSYSTEM_H__
+#define __MALI_KERNEL_SUBSYSTEM_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+
+/* typedefs of the datatypes used in the hook functions */
+typedef void * mali_kernel_subsystem_session_slot;
+typedef int mali_kernel_subsystem_identifier;
+typedef _mali_osk_errcode_t (*mali_kernel_resource_registrator)(_mali_osk_resource_t *);
+
+/**
+ * Broadcast notification messages
+ */
+typedef enum mali_core_notification_message
+{
+ MMU_KILL_STEP0_LOCK_SUBSYSTEM, /**< Request to lock subsystem */
+ MMU_KILL_STEP1_STOP_BUS_FOR_ALL_CORES, /**< Request to stop all buses */
+ MMU_KILL_STEP2_RESET_ALL_CORES_AND_ABORT_THEIR_JOBS, /**< Request kill all jobs, and not start more jobs */
+ MMU_KILL_STEP3_CONTINUE_JOB_HANDLING, /**< Request to continue with new jobs on all cores */
+ MMU_KILL_STEP4_UNLOCK_SUBSYSTEM /**< Request to unlock subsystem */
+} mali_core_notification_message;
+
+/**
+ * A function pointer can be NULL if the subsystem isn't interested in the event.
+ */
+typedef struct mali_kernel_subsystem
+{
+ /* subsystem control */
+ _mali_osk_errcode_t (*startup)(mali_kernel_subsystem_identifier id); /**< Called during module load or system startup*/
+ void (*shutdown)(mali_kernel_subsystem_identifier id); /**< Called during module unload or system shutdown */
+
+ /**
+ * Called during module load or system startup.
+ * Called when all subsystems have reported startup OK and all resources where successfully initialized
+ */
+ _mali_osk_errcode_t (*load_complete)(mali_kernel_subsystem_identifier id);
+
+ /* per subsystem handlers */
+ _mali_osk_errcode_t (*system_info_fill)(_mali_system_info* info); /**< Fill info into info struct. MUST allocate memory with kmalloc, since it's kfree'd */
+
+ /* per session handlers */
+ /**
+ * Informs about a new session.
+ * slot can be used to track per-session per-subsystem data.
+ * queue can be used to send events to user space.
+ * _mali_osk_errcode_t error return value.
+ */
+ _mali_osk_errcode_t (*session_begin)(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot, _mali_osk_notification_queue_t * queue);
+ /**
+ * Informs that a session is ending
+ * slot was the same as given during session_begin
+ */
+ void (*session_end)(struct mali_session_data * mali_session_data, mali_kernel_subsystem_session_slot * slot);
+
+ /* Used by subsystems to send messages to each other. This is the receiving end */
+ void (*broadcast_notification)(mali_core_notification_message message, u32 data);
+
+#if MALI_STATE_TRACKING
+ /** Dump the current state of the subsystem */
+ u32 (*dump_state)(char *buf, u32 size);
+#endif
+} mali_kernel_subsystem;
+
+/* functions used by the subsystems to interact with the core */
+/**
+ * Register a resouce handler
+ * @param type The resoruce type to register a handler for
+ * @param handler Pointer to the function handling this resource
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t _mali_kernel_core_register_resource_handler(_mali_osk_resource_type_t type, mali_kernel_resource_registrator handler);
+
+/* function used to interact with other subsystems */
+/**
+ * Broadcast a message
+ * Sends a message to all subsystems which have registered a broadcast notification handler
+ * @param message The message to send
+ * @param data Message specific extra data
+ */
+void _mali_kernel_core_broadcast_subsystem_message(mali_core_notification_message message, u32 data);
+
+#if MALI_STATE_TRACKING
+/**
+ * Tell all subsystems to dump their current state
+ */
+u32 _mali_kernel_core_dump_state(char *buf, u32 size);
+#endif
+
+
+#endif /* __MALI_KERNEL_SUBSYSTEM_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_utilization.c b/drivers/media/video/samsung/mali/common/mali_kernel_utilization.c
new file mode 100644
index 0000000..b43b872
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_utilization.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+#define MALI_GPU_UTILIZATION_TIMEOUT 1000
+
+static _mali_osk_lock_t *time_data_lock;
+
+static _mali_osk_atomic_t num_running_cores;
+
+static u64 period_start_time = 0;
+static u64 work_start_time = 0;
+static u64 accumulated_work_time = 0;
+
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+
+static void calculate_gpu_utilization(void *arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized;
+ u32 period_normalized;
+ u32 utilization;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (accumulated_work_time == 0 && work_start_time == 0) {
+ /* Don't reschedule timer, this will be started if new work arrives */
+ timer_running = MALI_FALSE;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* No work done for this period, report zero usage */
+ mali_gpu_utilization_handler(0);
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time != 0) {
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = time_now;
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized = (u32)(accumulated_work_time >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF) {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ } else {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized <<= 8;
+ }
+
+ utilization = work_normalized / period_normalized;
+
+ accumulated_work_time = 0;
+ period_start_time = time_now; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+
+ mali_gpu_utilization_handler(utilization);
+}
+
+
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+ time_data_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ|_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0 );
+ if (NULL == time_data_lock)
+ return _MALI_OSK_ERR_FAULT;
+
+
+ _mali_osk_atomic_init(&num_running_cores, 0);
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer) {
+ _mali_osk_lock_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ if (NULL != utilization_timer) {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ }
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer) {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_atomic_term(&num_running_cores);
+
+ _mali_osk_lock_term(time_data_lock);
+}
+
+
+
+void mali_utilization_core_start(u64 time_now)
+{
+ if (_mali_osk_atomic_inc_return(&num_running_cores) == 1) {
+ /*
+ * We went from zero cores working, to one core working,
+ * we now consider the entire GPU for being busy
+ */
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (time_now < period_start_time)
+ {
+ /*
+ * This might happen if the calculate_gpu_utilization() was able
+ * to run between the sampling of time_now and us grabbing the lock above
+ */
+ time_now = period_start_time;
+ }
+
+ work_start_time = time_now;
+
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+ period_start_time = work_start_time; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_del(utilization_timer);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+ } else {
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ }
+}
+
+
+
+void mali_utilization_core_end(u64 time_now)
+{
+ if (_mali_osk_atomic_dec_return(&num_running_cores) == 0) {
+ /*
+ * No more cores are working, so accumulate the time we was busy.
+ */
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (time_now < work_start_time)
+ {
+ /*
+ * This might happen if the calculate_gpu_utilization() was able
+ * to run between the sampling of time_now and us grabbing the lock above
+ */
+ time_now = work_start_time;
+ }
+
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = 0;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_utilization.h b/drivers/media/video/samsung/mali/common/mali_kernel_utilization.h
new file mode 100644
index 0000000..c779978
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_utilization.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Should be called when a job is about to execute a job
+ */
+void mali_utilization_core_start(u64 time_now);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+/**
+ * Should be called when a job has completed executing a job
+ */
+void mali_utilization_core_end(u64 time_now);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_vsync.c b/drivers/media/video/samsung/mali/common/mali_kernel_vsync.c
new file mode 100644
index 0000000..dc39e01
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_vsync.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+/*#include "mali_timestamp.h"*/
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+/* u64 ts = _mali_timestamp_get();
+ */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT)
+ {
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ 0, 0, 0, 0, 0);
+ }
+
+ if ( event==_MALI_UK_VSYNC_EVENT_END_WAIT)
+ {
+
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ 0, 0, 0, 0, 0);
+ }
+#endif
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/media/video/samsung/mali/common/mali_osk.h b/drivers/media/video/samsung/mali/common/mali_osk.h
new file mode 100644
index 0000000..72d851d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk.h
@@ -0,0 +1,1716 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+ typedef unsigned char u8;
+ typedef signed char s8;
+ typedef unsigned short u16;
+ typedef signed short s16;
+ typedef unsigned int u32;
+ typedef signed int s32;
+ typedef unsigned long long u64;
+ #define BITS_PER_LONG (sizeof(long)*8)
+#else
+ /* Ensure Linux types u32, etc. are defined */
+ #include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+ typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+ #define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+ #define MALI_FALSE ((mali_bool)0)
+#endif
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum
+{
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_irq_schedulework(). Refer to \ref _mali_osk_irq_schedulework() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)( void * arg );
+
+/** @brief IRQ 'bottom-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the deferred handling
+ * of a resource's IRQ. Usually, this work cannot be carried out in IRQ context
+ * by the IRQ upper-half handler.
+ *
+ * The IRQ bottom-half handler maps on to the concept of an IST that may
+ * execute some time after the actual IRQ has fired.
+ *
+ * All OSK-registered IRQ bottom-half handlers will be serialized, across all
+ * CPU-cores in the system.
+ *
+ * Refer to \ref _mali_osk_irq_schedulework() for more information on the
+ * IRQ work-queue, and the calling of the IRQ bottom-half handler.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_irq_bhandler_t)( void * arg );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Even though the structure has space for a u32, the counters will only
+ * represent signed 24-bit integers.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct
+{
+ union
+ {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * Flags are supplied at the point where the Lock is initialized. Each flag can
+ * be combined with others using bitwise OR, '|'.
+ *
+ * The flags must be sufficiently rich to cope with all our OSs. This means
+ * that on some OSs, certain flags can be completely ignored. We define a
+ * number of terms that are significant across all OSs:
+ *
+ * - Sleeping/non-sleeping mutexs. Sleeping mutexs can block on waiting, and so
+ * schedule out the current thread. This is significant on OSs where there are
+ * situations in which the current thread must not be put to sleep. On OSs
+ * without this restriction, sleeping and non-sleeping mutexes can be treated
+ * as the same (if that is required).
+ * - Interruptable/non-interruptable mutexes. For sleeping mutexes, it may be
+ * possible for the sleep to be interrupted for a reason other than the thread
+ * being able to obtain the lock. OSs behaving in this way may provide a
+ * mechanism to control whether sleeping mutexes can be interrupted. On OSs
+ * that do not support the concept of interruption, \b or they do not support
+ * control of mutex interruption, then interruptable mutexes may be treated
+ * as non-interruptable.
+ *
+ * Some constrains apply to the lock type flags:
+ *
+ * - Spinlocks are by nature, non-interruptable. Hence, they must always be
+ * combined with the NONINTERRUPTABLE flag, because it is meaningless to ask
+ * for a spinlock that is interruptable (and this highlights its
+ * non-interruptable-ness). For example, on certain OSs they should be used when
+ * you must not sleep.
+ * - Reader/writer is an optimization hint, and any type of lock can be
+ * reader/writer. Since this is an optimization hint, the implementation need
+ * not respect this for any/all types of lock. For example, on certain OSs,
+ * there's no interruptable reader/writer mutex. If such a thing were requested
+ * on that OS, the fact that interruptable was requested takes priority over the
+ * reader/writer-ness, because reader/writer-ness is not necessary for correct
+ * operation.
+ * - Any lock can use the order parameter.
+ * - A onelock is an optimization hint specific to certain OSs. It can be
+ * specified when it is known that only one lock will be held by the thread,
+ * and so can provide faster mutual exclusion. This can be safely ignored if
+ * such optimization is not required/present.
+ *
+ * The absence of any flags (the value 0) results in a sleeping-mutex, which is interruptable.
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKFLAG_SPINLOCK = 0x1, /**< Specifically, don't sleep on those architectures that require it */
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE = 0x2, /**< The mutex cannot be interrupted, e.g. delivery of signals on those architectures where this is required */
+ _MALI_OSK_LOCKFLAG_READERWRITER = 0x4, /**< Optimise for readers/writers */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x8, /**< Use the order parameter; otherwise use automatic ordering */
+ _MALI_OSK_LOCKFLAG_ONELOCK = 0x10, /**< Each thread can only hold one lock at a time */
+ _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ = 0x20, /**< IRQ version of spinlock */
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks specified
+ * as _MALI_OSK_LOCKFLAG_READERWRITER. In this case, the RO mode can be used
+ * to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * _MALI_OSK_LOCKFLAG_READERWRITER is not supplied.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_t_struct _mali_osk_lock_t;
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum
+{
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct
+{
+ u32 magic_code;
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void * result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_irq_schedulework() to make use of the IRQ bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg );
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s
+{
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+/** @} */ /* end group _mali_osk_list */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief The known resource types
+ *
+ * @note \b IMPORTANT: these must remain fixed, and only be extended. This is
+ * because not all systems use a header file for reading in their resources.
+ * The resources may instead come from a data file where these resources are
+ * 'hard-coded' in, because there's no easy way of transferring the enum values
+ * into such data files. E.g. the C-Pre-processor does \em not process enums.
+ */
+typedef enum _mali_osk_resource_type
+{
+ RESOURCE_TYPE_FIRST =0, /**< Duplicate resource marker for the first resource*/
+ MEMORY =0, /**< Physically contiguous memory block, not managed by the OS */
+ OS_MEMORY =1, /**< Memory managed by and shared with the OS */
+ MALI200 =3, /**< Mali200 Programmable Fragment Shader */
+ MALIGP2 =4, /**< MaliGP2 Programmable Vertex Shader */
+ MMU =5, /**< Mali MMU (Memory Management Unit) */
+ FPGA_FRAMEWORK =6, /**< Mali registers specific to FPGA implementations */
+ MALI400L2 =7, /**< Mali400 L2 Cache */
+ MALI300L2 =7, /**< Mali300 L2 Cache */
+ MALI400GP =8, /**< Mali400 Programmable Vertex Shader Core */
+ MALI300GP =8, /**< Mali300 Programmable Vertex Shader Core */
+ MALI400PP =9, /**< Mali400 Programmable Fragment Shader Core */
+ MALI300PP =9, /**< Mali300 Programmable Fragment Shader Core */
+ MEM_VALIDATION =10, /**< External Memory Validator */
+ PMU =11, /**< Power Manangement Unit */
+ RESOURCE_TYPE_COUNT /**< The total number of known resources */
+} _mali_osk_resource_type_t;
+
+/** @brief resource description struct
+ *
+ * _mali_osk_resources_init() will enumerate objects of this type. Not all
+ * members have a valid meaning across all types.
+ *
+ * The mmu_id is used to group resources to a certain MMU, since there may be
+ * more than one MMU in the system, and each resource may be using a different
+ * MMU:
+ * - For MMU resources, the setting of mmu_id is a uniquely identifying number.
+ * - For Other resources, the setting of mmu_id determines which MMU the
+ * resource uses.
+ */
+typedef struct _mali_osk_resource
+{
+ _mali_osk_resource_type_t type; /**< type of the resource */
+ const char * description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ s32 cpu_usage_adjust; /**< Offset added to the base address of the resource to arrive at the CPU physical address of the resource (if different from the Mali physical address) */
+ u32 size; /**< Size in bytes of the resource - either the size of its register range, or the size of the memory block. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+ u32 flags; /**< Resources-specific flags. */
+ u32 mmu_id; /**< Identifier for Mali MMU resources. */
+ u32 alloc_order; /**< Order in which MEMORY/OS_MEMORY resources are used */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+#include "mali_kernel_memory_engine.h" /* include for mali_memory_allocation and mali_physical_memory_allocation type */
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Fake IRQ number for testing purposes
+ */
+#define _MALI_OSK_IRQ_NUMBER_FAKE ((u32)0xFFFFFFF1)
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief PMM Virtual IRQ number
+ */
+#define _MALI_OSK_IRQ_NUMBER_PMM ((u32)0xFFFFFFF2)
+
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * The _mali_osk_irq_t returned must be written into the resource-specific data
+ * pointed to by data. This is so that the upper and lower handlers can call
+ * _mali_osk_irq_schedulework().
+ *
+ * @note The caller must ensure that the resource does not generate an
+ * interrupt after _mali_osk_irq_init() finishes, and before the
+ * _mali_osk_irq_t is written into the resource-specific data. Otherwise,
+ * the upper-half handler will fail to call _mali_osk_irq_schedulework().
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and trigger_func and ack_func must be
+ * non-NULL.
+ * @param uhandler The upper-half handler, corresponding to a ISR handler for
+ * the resource
+ * @param bhandler The lower-half handler, corresponding to an IST handler for
+ * the resource
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param data resource-specific data, which will be passed to uhandler,
+ * bhandler and (if present) trigger_func and ack_funnc
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description );
+
+/** @brief Cause a queued, deferred call of the IRQ bottom-half.
+ *
+ * _mali_osk_irq_schedulework provides a mechanism for enqueuing deferred calls
+ * to the IRQ bottom-half handler. The queue is known as the IRQ work-queue.
+ * After calling _mali_osk_irq_schedulework(), the IRQ bottom-half handler will
+ * be scheduled to run at some point in the future.
+ *
+ * This is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_irq_schedulework() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_irq_schedulework() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the IRQ bottom-half handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_irq_schedulework()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The IRQ bottom-half callback (of type _mali_osk_irq_bhandler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_irq_schedulework() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_irq_schedulework() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * IRQs will be lost.
+ *
+ * The queue that _mali_osk_irq_schedulework implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_irq_schedulework
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered IRQs.
+ *
+ * The consequence of the queue being a 1-reader type is that calling
+ * _mali_osk_irq_schedulework() on different _mali_osk_irq_t objects causes
+ * their IRQ bottom-halves to be serialized, across all CPU-cores in the
+ * system.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ bottom-half must begin processing.
+ */
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for the
+ * IRQ work-queue to finish the work that is currently in the queue. That is,
+ * for every deferred call currently in the IRQ work-queue, it waits for each
+ * of those to be processed by their respective IRQ bottom-half handler.
+ *
+ * This function is used to ensure that the bottom-half handler of the supplied
+ * IRQ object will not be running at the completion of this function call.
+ * However, the caller must ensure that no other sources could call the
+ * _mali_osk_irq_schedulework() on the same IRQ object. For example, the
+ * relevant timers must be stopped.
+ *
+ * @note While this function is being called, other OSK-registered IRQs in the
+ * system may enqueue work for their respective bottom-half handlers. This
+ * function will not wait for those entries in the work-queue to be flushed.
+ *
+ * Since this blocks on the completion of work in the IRQ work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any OSK-registered IRQ bottom-half handler. To do so may cause a deadlock.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+
+/** @brief flushing workqueue.
+ *
+ * This will flush the workqueue.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_flush_workqueue( _mali_osk_irq_t *irq );
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * The counters have storage for signed 24-bit integers. Initializing to signed
+ * values requiring more than 24-bits storage will fail.
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * Although the value returned is a u32, only numbers with signed 24-bit
+ * precision (sign extended to u32) are returned.
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+/** @addtogroup _mali_osk_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * initial must be zero, since there is currently no means of expressing
+ * whether a reader/writer lock should be initially locked as a reader or
+ * writer. This would require some encoding to be used.
+ *
+ * 'Automatic' ordering means that locks must be obtained in the order that
+ * they were created. For all locks that can be held at the same time, they must
+ * either all provide the order parameter, or they all must use 'automatic'
+ * ordering - because there is no way of mixing 'automatic' and 'manual'
+ * ordering.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, @see _mali_osk_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. That is, locks obtained by the
+ * same thread must have been created with an increasing order parameter, for
+ * deadlock prevention. Setting to zero causes 'automatic' ordering to be used.
+ * @return On success, a pointer to a _mali_osk_lock_t object. NULL on failure.
+ */
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained)
+
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * In the case of Reader/Writer locks, multiple readers can obtain a lock in
+ * the absence of writers, which is a performance optimization (providing that
+ * the readers never write to the protected resource).
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * For locks marked as _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, it is a
+ * programming error for the function to exit without obtaining the lock. This
+ * means that the error code must only be checked for interruptible locks.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Unless the lock
+ * was created with _MALI_OSK_LOCKFLAG_READERWRITER, this must be
+ * _MALI_OSK_LOCKMODE_RW.
+ * @return On success, _MALI_OSK_ERR_OK. For interruptible locks, a suitable
+ * _mali_osk_errcode_t will be returned on failure, and the lock will not be
+ * obtained. In this case, the error code must be propagated up to the U/K
+ * interface.
+ */
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode);
+
+
+/** @brief Signal (release) a lock
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock.
+ *
+ * @note In the OSU, a flag exists to allow any thread to signal a
+ * lock. Such functionality is not present in the OSK.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode );
+
+/** @brief Terminate a lock
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _mali_osk_lock_term( _mali_osk_lock_t *lock );
+/** @} */ /* end group _mali_osk_lock */
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+#if MALI_STATE_TRACKING
+/** @brief Receive a notification from a queue
+ *
+ * Check if a notification queue is empty.
+ *
+ * @param queue The queue to check.
+ * @return MALI_TRUE if queue is empty, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue );
+#endif
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the absolute time at which a timer will expire, and start it if it is
+ * stopped. If \a expiry_tick is in the past (determined by
+ * _mali_osk_time_after() ), the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at absolute time \a expiry_tick, at which point, the
+ * callback function will be invoked with the callback-specific data, as set
+ * by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param expiry_tick the \em absolute time in ticks at which this timer should
+ * trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the IRQ work-queue by the timer (with
+ * \ref _mali_osk_irq_schedulework()) may still run. The timer callback and IRQ
+ * bottom-half handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+/** @} */ /* end group _mali_osk_math */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+void _mali_osk_profiling_add_event(u32 event_id, u32 data0);
+void _mali_osk_profiling_add_counter(u32 event_id, u32 data0);
+int _mali_osk_counter_event(u32 counter, u32 event);
+extern u32 counter_table[];
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "mali_osk_specific.h" /* include any per-os specifics */
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+ #error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+ #error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_osk_bitops.h b/drivers/media/video/samsung/mali/common/mali_osk_bitops.h
new file mode 100644
index 0000000..f262f7d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk_bitops.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz( isolated );
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+ u32 total;
+
+ for ( total = 0; total < maxbit; total += 32, ++addr )
+ {
+ int result;
+ result = _mali_internal_find_first_zero_bit( *addr );
+
+ /* non-negative signifies the bit was found */
+ if ( result >= 0 )
+ {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if ( total >= maxbit )
+ {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_osk_list.h b/drivers/media/video/samsung/mali/common/mali_osk_list.h
new file mode 100644
index 0000000..3a562bb
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk_list.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE int _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Join two lists
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to join a list into another list at a specific location
+ *
+ * @param list the new list to add
+ * @param at the location in a list to add the new list into
+ */
+MALI_STATIC_INLINE void _mali_osk_list_splice( _mali_osk_list_t *list, _mali_osk_list_t *at )
+{
+ if (!_mali_osk_list_empty(list))
+ {
+ /* insert all items from 'list' after 'at' */
+ _mali_osk_list_t *first = list->next;
+ _mali_osk_list_t *last = list->prev;
+ _mali_osk_list_t *split = at->next;
+
+ first->prev = at;
+ at->next = first;
+
+ last->next = split;
+ split->prev = last;
+ }
+}
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_osk_mali.h b/drivers/media/video/samsung/mali/common/mali_osk_mali.h
new file mode 100644
index 0000000..0b1d13a
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk_mali.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Initialize the OSK layer
+ *
+ * This function is used to setup any initialization of OSK functionality, if
+ * required.
+ *
+ * This must be the first function called from the common code, specifically,
+ * from the common code entry-point, mali_kernel_constructor.
+ *
+ * The OS-integration into the OS's kernel must handle calling of
+ * mali_kernel_constructor when the device driver is loaded.
+ *
+ * @return _MALI_OSK_ERR_OK on success, or a suitable _mali_osk_errcode_t on
+ * failure.
+ */
+_mali_osk_errcode_t _mali_osk_init( void );
+
+/** @brief Terminate the OSK layer
+ *
+ * This function is used to terminate any resources initialized by
+ * _mali_osk_init.
+ *
+ * This must be the last function called from the common code, specifically,
+ * from the common code closedown function, mali_kernel_destructor, and the
+ * error path in mali_kernel_constructor.
+ *
+ * The OS-integration into the OS's kernel must handle calling of
+ * mali_kernel_destructor when the device driver is terminated.
+ */
+void _mali_osk_term( void );
+
+/** @brief Read the Mali Resource configuration
+ *
+ * Populates a _mali_arch_resource_t array from configuration settings, which
+ * are stored in an OS-specific way.
+ *
+ * For example, these may be compiled in to a static structure, or read from
+ * the filesystem at startup.
+ *
+ * On failure, do not call _mali_osk_resources_term.
+ *
+ * @param arch_config a pointer to the store the pointer to the resources
+ * @param num_resources the number of resources read
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_NOMEM on allocation
+ * error. For other failures, a suitable _mali_osk_errcode_t is returned.
+ */
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources );
+
+/** @brief Free resources allocated by _mali_osk_resources_init.
+ *
+ * Frees the _mali_arch_resource_t array allocated by _mali_osk_resources_init
+ *
+ * @param arch_config a pointer to the stored the pointer to the resources
+ * @param num_resources the number of resources in the array
+ */
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources);
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Initialize a user-space accessible memory range
+ *
+ * This initializes a virtual address range such that it is reserved for the
+ * current process, but does not map any physical pages into this range.
+ *
+ * This function may initialize or adjust any members of the
+ * mali_memory_allocation \a descriptor supplied, before the physical pages are
+ * mapped in with _mali_osk_mem_mapregion_map().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The \a descriptor's process_addr_mapping_info member can be modified to
+ * allocate OS-specific information. Note that on input, this will be a
+ * ukk_private word from the U/K inteface, as inserted by _mali_ukk_mem_mmap().
+ * This is used to pass information from the U/K interface to the OSK interface,
+ * if necessary. The precise usage of the process_addr_mapping_info member
+ * depends on the U/K implementation of _mali_ukk_mem_mmap().
+ *
+ * Therefore, the U/K implementation of _mali_ukk_mem_mmap() and the OSK
+ * implementation of _mali_osk_mem_mapregion_init() must agree on the meaning and
+ * usage of the ukk_private word and process_addr_mapping_info member.
+ *
+ * Refer to \ref u_k_api for more information on the U/K interface.
+ *
+ * On successful return, \a descriptor's mapping member will be correct for
+ * use with _mali_osk_mem_mapregion_term() and _mali_osk_mem_mapregion_map().
+ *
+ * @param descriptor the mali_memory_allocation to initialize.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor );
+
+/** @brief Terminate a user-space accessible memory range
+ *
+ * This terminates a virtual address range reserved in the current user process,
+ * where none, some or all of the virtual address ranges have mappings to
+ * physical pages.
+ *
+ * It will unmap any physical pages that had been mapped into a reserved
+ * virtual address range for the current process, and then releases the virtual
+ * address range. Any extra book-keeping information or resources allocated
+ * during _mali_osk_mem_mapregion_init() will also be released.
+ *
+ * The \a descriptor itself is not freed - this must be handled by the caller of
+ * _mali_osk_mem_mapregion_term().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param descriptor the mali_memory_allocation to terminate.
+ */
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor );
+
+/** @brief Map physical pages into a user process's virtual address range
+ *
+ * This is used to map a number of physically contigous pages into a
+ * user-process's virtual address range, which was previously reserved by a
+ * call to _mali_osk_mem_mapregion_init().
+ *
+ * This need not provide a mapping for the entire virtual address range
+ * reserved for \a descriptor - it may be used to map single pages per call.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The function may supply \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC.
+ * In this case, \a size must be set to \ref _MALI_OSK_CPU_PAGE_SIZE, and the function
+ * will allocate the physical page itself. The physical address of the
+ * allocated page will be returned through \a phys_addr.
+ *
+ * It is an error to set \a size != \ref _MALI_OSK_CPU_PAGE_SIZE while
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC,
+ * since it is not always possible for OSs to support such a setting through this
+ * interface.
+ *
+ * @note \b IMPORTANT: This code must validate the input parameters. If the
+ * range defined by \a offset and \a size is outside the range allocated in
+ * \a descriptor, then this function \b MUST not attempt any mapping, and must
+ * instead return a suitable \ref _mali_osk_errcode_t \b failure code.
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor, and not the \a phys_addr parameter.
+ * It must be a multiple of \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in,out] phys_addr a pointer to the physical base address to begin the
+ * mapping from. If \a size == \ref _MALI_OSK_CPU_PAGE_SIZE and
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, then this
+ * function will allocate the physical page itself, and return the
+ * physical address of the page through \a phys_addr, which will be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE. Otherwise, \a *phys_addr must be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE, and is unmodified after the call.
+ * \a phys_addr is unaffected by the \a offset parameter.
+ *
+ * @param[in] size the number of bytes to map in. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return _MALI_OSK_ERR_OK on sucess, otherwise a _mali_osk_errcode_t value
+ * on failure
+ *
+ * @note could expand to use _mali_osk_mem_mapregion_flags_t instead of
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, but note that we must
+ * also modify the mali process address manager in the mmu/memory engine code.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size );
+
+
+/** @brief Unmap physical pages from a user process's virtual address range
+ *
+ * This is used to unmap a number of physically contigous pages from a
+ * user-process's virtual address range, which were previously mapped by a
+ * call to _mali_osk_mem_mapregion_map(). If the range specified was allocated
+ * from OS memory, then that memory will be returned to the OS. Whilst pages
+ * will be mapped out, the Virtual address range remains reserved, and at the
+ * same base address.
+ *
+ * When this function is used to unmap pages from OS memory
+ * (_mali_osk_mem_mapregion_map() was called with *phys_addr ==
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC), then the \a flags must
+ * include \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR. This is because
+ * it is not always easy for an OS implementation to discover whether the
+ * memory was OS allocated or not (and so, how it should release the memory).
+ *
+ * For this reason, only a range of pages of the same allocation type (all OS
+ * allocated, or none OS allocacted) may be unmapped in one call. Multiple
+ * calls must be made if allocations of these different types exist across the
+ * entire region described by the \a descriptor.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor. \a offset must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] size the number of bytes to unmap. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] flags specifies how the memory should be unmapped. For a range
+ * of pages that were originally OS allocated, this must have
+ * \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR set.
+ */
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags );
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_uk_types.h b/drivers/media/video/samsung/mali/common/mali_uk_types.h
new file mode 100644
index 0000000..e114fa8
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_uk_types.h
@@ -0,0 +1,1176 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __MALI_UK_TYPES_H__
+#define __MALI_UK_TYPES_H__
+
+/*
+ * NOTE: Because this file can be included from user-side and kernel-side,
+ * it is up to the includee to ensure certain typedefs (e.g. u32) are already
+ * defined when #including this.
+ */
+#include "regs/mali_200_regs.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_uk_core U/K Core
+ * @{ */
+
+/** Definition of subsystem numbers, to assist in creating a unique identifier
+ * for each U/K call.
+ *
+ * @see _mali_uk_functions */
+typedef enum
+{
+ _MALI_UK_CORE_SUBSYSTEM, /**< Core Group of U/K calls */
+ _MALI_UK_MEMORY_SUBSYSTEM, /**< Memory Group of U/K calls */
+ _MALI_UK_PP_SUBSYSTEM, /**< Fragment Processor Group of U/K calls */
+ _MALI_UK_GP_SUBSYSTEM, /**< Vertex Processor Group of U/K calls */
+ _MALI_UK_PROFILING_SUBSYSTEM, /**< Profiling Group of U/K calls */
+ _MALI_UK_PMM_SUBSYSTEM, /**< Power Management Module Group of U/K calls */
+ _MALI_UK_VSYNC_SUBSYSTEM, /**< VSYNC Group of U/K calls */
+} _mali_uk_subsystem_t;
+
+/** Within a function group each function has its unique sequence number
+ * to assist in creating a unique identifier for each U/K call.
+ *
+ * An ordered pair of numbers selected from
+ * ( \ref _mali_uk_subsystem_t,\ref _mali_uk_functions) will uniquely identify the
+ * U/K call across all groups of functions, and all functions. */
+typedef enum
+{
+ /** Core functions */
+
+ _MALI_UK_OPEN = 0, /**< _mali_ukk_open() */
+ _MALI_UK_CLOSE, /**< _mali_ukk_close() */
+ _MALI_UK_GET_SYSTEM_INFO_SIZE, /**< _mali_ukk_get_system_info_size() */
+ _MALI_UK_GET_SYSTEM_INFO, /**< _mali_ukk_get_system_info() */
+ _MALI_UK_WAIT_FOR_NOTIFICATION, /**< _mali_ukk_wait_for_notification() */
+ _MALI_UK_GET_API_VERSION, /**< _mali_ukk_get_api_version() */
+ _MALI_UK_POST_NOTIFICATION, /**< _mali_ukk_post_notification() */
+
+ /** Memory functions */
+
+ _MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
+ _MALI_UK_TERM_MEM, /**< _mali_ukk_term_mem() */
+ _MALI_UK_GET_BIG_BLOCK, /**< _mali_ukk_get_big_block() */
+ _MALI_UK_FREE_BIG_BLOCK, /**< _mali_ukk_free_big_block() */
+ _MALI_UK_MAP_MEM, /**< _mali_ukk_mem_mmap() */
+ _MALI_UK_UNMAP_MEM, /**< _mali_ukk_mem_munmap() */
+ _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, /**< _mali_ukk_mem_get_mmu_page_table_dump_size() */
+ _MALI_UK_DUMP_MMU_PAGE_TABLE, /**< _mali_ukk_mem_dump_mmu_page_table() */
+ _MALI_UK_ATTACH_UMP_MEM, /**< _mali_ukk_attach_ump_mem() */
+ _MALI_UK_RELEASE_UMP_MEM, /**< _mali_ukk_release_ump_mem() */
+ _MALI_UK_MAP_EXT_MEM, /**< _mali_uku_map_external_mem() */
+ _MALI_UK_UNMAP_EXT_MEM, /**< _mali_uku_unmap_external_mem() */
+ _MALI_UK_VA_TO_MALI_PA, /**< _mali_uku_va_to_mali_pa() */
+
+ /** Common functions for each core */
+
+ _MALI_UK_START_JOB = 0, /**< Start a Fragment/Vertex Processor Job on a core */
+ _MALI_UK_ABORT_JOB, /**< Abort a job */
+ _MALI_UK_GET_NUMBER_OF_CORES, /**< Get the number of Fragment/Vertex Processor cores */
+ _MALI_UK_GET_CORE_VERSION, /**< Get the Fragment/Vertex Processor version compatible with all cores */
+
+ /** Fragment Processor Functions */
+
+ _MALI_UK_PP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_pp_start_job() */
+ _MALI_UK_PP_ABORT_JOB = _MALI_UK_ABORT_JOB, /**< _mali_ukk_pp_abort_job() */
+ _MALI_UK_GET_PP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_pp_number_of_cores() */
+ _MALI_UK_GET_PP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_pp_core_version() */
+
+ /** Vertex Processor Functions */
+
+ _MALI_UK_GP_START_JOB = _MALI_UK_START_JOB, /**< _mali_ukk_gp_start_job() */
+ _MALI_UK_GP_ABORT_JOB = _MALI_UK_ABORT_JOB, /**< _mali_ukk_gp_abort_job() */
+ _MALI_UK_GET_GP_NUMBER_OF_CORES = _MALI_UK_GET_NUMBER_OF_CORES, /**< _mali_ukk_get_gp_number_of_cores() */
+ _MALI_UK_GET_GP_CORE_VERSION = _MALI_UK_GET_CORE_VERSION, /**< _mali_ukk_get_gp_core_version() */
+ _MALI_UK_GP_SUSPEND_RESPONSE, /**< _mali_ukk_gp_suspend_response() */
+
+ /** Profiling functions */
+
+ _MALI_UK_PROFILING_START = 0, /**< __mali_uku_profiling_start() */
+ _MALI_UK_PROFILING_ADD_EVENT, /**< __mali_uku_profiling_add_event() */
+ _MALI_UK_PROFILING_STOP, /**< __mali_uku_profiling_stop() */
+ _MALI_UK_PROFILING_GET_EVENT, /**< __mali_uku_profiling_get_event() */
+ _MALI_UK_PROFILING_CLEAR, /**< __mali_uku_profiling_clear() */
+ _MALI_UK_PROFILING_GET_CONFIG, /**< __mali_uku_profiling_get_config() */
+ _MALI_UK_TRANSFER_SW_COUNTERS,
+
+#if USING_MALI_PMM
+ /** Power Management Module Functions */
+ _MALI_UK_PMM_EVENT_MESSAGE = 0, /**< Raise an event message */
+#endif
+
+ /** VSYNC reporting fuctions */
+ _MALI_UK_VSYNC_EVENT_REPORT = 0, /**< _mali_ukk_vsync_event_report() */
+
+} _mali_uk_functions;
+
+/** @brief Get the size necessary for system info
+ *
+ * @see _mali_ukk_get_system_info_size()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of buffer necessary to hold system information data, in bytes */
+} _mali_uk_get_system_info_size_s;
+
+
+/** @defgroup _mali_uk_getsysteminfo U/K Get System Info
+ * @{ */
+
+/**
+ * Type definition for the core version number.
+ * Used when returning the version number read from a core
+ *
+ * Its format is that of the 32-bit Version register for a particular core.
+ * Refer to the "Mali200 and MaliGP2 3D Graphics Processor Technical Reference
+ * Manual", ARM DDI 0415C, for more information.
+ */
+typedef u32 _mali_core_version;
+
+/**
+ * Enum values for the different modes the driver can be put in.
+ * Normal is the default mode. The driver then uses a job queue and takes job objects from the clients.
+ * Job completion is reported using the _mali_ukk_wait_for_notification call.
+ * The driver blocks this io command until a job has completed or failed or a timeout occurs.
+ *
+ * The 'raw' mode is reserved for future expansion.
+ */
+typedef enum _mali_driver_mode
+{
+ _MALI_DRIVER_MODE_RAW = 1, /**< Reserved for future expansion */
+ _MALI_DRIVER_MODE_NORMAL = 2 /**< Normal mode of operation */
+} _mali_driver_mode;
+
+/** @brief List of possible cores
+ *
+ * add new entries to the end of this enum */
+typedef enum _mali_core_type
+{
+ _MALI_GP2 = 2, /**< MaliGP2 Programmable Vertex Processor */
+ _MALI_200 = 5, /**< Mali200 Programmable Fragment Processor */
+ _MALI_400_GP = 6, /**< Mali400 Programmable Vertex Processor */
+ _MALI_400_PP = 7, /**< Mali400 Programmable Fragment Processor */
+ /* insert new core here, do NOT alter the existing values */
+} _mali_core_type;
+
+/** @brief Information about each Mali Core
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Both Fragment Processor (PP) and Vertex Processor (GP) cores are represented
+ * by this struct.
+ *
+ * The type is reported by the type field, _mali_core_info::_mali_core_type.
+ *
+ * Each core is given a unique Sequence number identifying it, the core_nr
+ * member.
+ *
+ * Flags are taken directly from the resource's flags, and are currently unused.
+ *
+ * Multiple mali_core_info structs are linked in a single linked list using the next field
+ */
+typedef struct _mali_core_info
+{
+ _mali_core_type type; /**< Type of core */
+ _mali_core_version version; /**< Core Version, as reported by the Core's Version Register */
+ u32 reg_address; /**< Address of Registers */
+ u32 core_nr; /**< Sequence number */
+ u32 flags; /**< Flags. Currently Unused. */
+ struct _mali_core_info * next; /**< Next core in Linked List */
+} _mali_core_info;
+
+/** @brief Capabilities of Memory Banks
+ *
+ * These may be used to restrict memory banks for certain uses. They may be
+ * used when access is not possible (e.g. Bus does not support access to it)
+ * or when access is possible but not desired (e.g. Access is slow).
+ *
+ * In the case of 'possible but not desired', there is no way of specifying
+ * the flags as an optimization hint, so that the memory could be used as a
+ * last resort.
+ *
+ * @see _mali_mem_info
+ */
+typedef enum _mali_bus_usage
+{
+
+ _MALI_PP_READABLE = (1<<0), /** Readable by the Fragment Processor */
+ _MALI_PP_WRITEABLE = (1<<1), /** Writeable by the Fragment Processor */
+ _MALI_GP_READABLE = (1<<2), /** Readable by the Vertex Processor */
+ _MALI_GP_WRITEABLE = (1<<3), /** Writeable by the Vertex Processor */
+ _MALI_CPU_READABLE = (1<<4), /** Readable by the CPU */
+ _MALI_CPU_WRITEABLE = (1<<5), /** Writeable by the CPU */
+ _MALI_MMU_READABLE = _MALI_PP_READABLE | _MALI_GP_READABLE, /** Readable by the MMU (including all cores behind it) */
+ _MALI_MMU_WRITEABLE = _MALI_PP_WRITEABLE | _MALI_GP_WRITEABLE, /** Writeable by the MMU (including all cores behind it) */
+} _mali_bus_usage;
+
+/** @brief Information about the Mali Memory system
+ *
+ * Information is stored in a linked list, which is stored entirely in the
+ * buffer pointed to by the system_info member of the
+ * _mali_uk_get_system_info_s arguments provided to _mali_ukk_get_system_info()
+ *
+ * Each element of the linked list describes a single Mali Memory bank.
+ * Each allocation can only come from one bank, and will not cross multiple
+ * banks.
+ *
+ * Each bank is uniquely identified by its identifier member. On Mali-nonMMU
+ * systems, to allocate from this bank, the value of identifier must be passed
+ * as the type_id member of the _mali_uk_get_big_block_s arguments to
+ * _mali_ukk_get_big_block.
+ *
+ * On Mali-MMU systems, there is only one bank, which describes the maximum
+ * possible address range that could be allocated (which may be much less than
+ * the available physical memory)
+ *
+ * The flags member describes the capabilities of the memory. It is an error
+ * to attempt to build a job for a particular core (PP or GP) when the memory
+ * regions used do not have the capabilities for supporting that core. This
+ * would result in a job abort from the Device Driver.
+ *
+ * For example, it is correct to build a PP job where read-only data structures
+ * are taken from a memory with _MALI_PP_READABLE set and
+ * _MALI_PP_WRITEABLE clear, and a framebuffer with _MALI_PP_WRITEABLE set and
+ * _MALI_PP_READABLE clear. However, it would be incorrect to use a framebuffer
+ * where _MALI_PP_WRITEABLE is clear.
+ */
+typedef struct _mali_mem_info
+{
+ u32 size; /**< Size of the memory bank in bytes */
+ _mali_bus_usage flags; /**< Capabilitiy flags of the memory */
+ u32 maximum_order_supported; /**< log2 supported size */
+ u32 identifier; /**< Unique identifier, to be used in allocate calls */
+ struct _mali_mem_info * next; /**< Next List Link */
+} _mali_mem_info;
+
+/** @brief Info about the whole Mali system.
+ *
+ * This Contains a linked list of the cores and memory banks available. Each
+ * list pointer will remain inside the system_info buffer supplied in the
+ * _mali_uk_get_system_info_s arguments to a _mali_ukk_get_system_info call.
+ *
+ * The has_mmu member must be inspected to ensure the correct group of
+ * Memory function calls is obtained - that is, those for either Mali-MMU
+ * or Mali-nonMMU. @see _mali_uk_memory
+ */
+typedef struct _mali_system_info
+{
+ _mali_core_info * core_info; /**< List of _mali_core_info structures */
+ _mali_mem_info * mem_info; /**< List of _mali_mem_info structures */
+ u32 has_mmu; /**< Non-zero if Mali-MMU present. Zero otherwise. */
+ _mali_driver_mode drivermode; /**< Reserved. Must always be _MALI_DRIVER_MODE_NORMAL */
+} _mali_system_info;
+
+/** @brief Arguments to _mali_ukk_get_system_info()
+ *
+ * A buffer of the size returned by _mali_ukk_get_system_info_size() must be
+ * allocated, and the pointer to this buffer must be written into the
+ * system_info member. The buffer must be suitably aligned for storage of
+ * the _mali_system_info structure - for example, one returned by
+ * _mali_osk_malloc(), which will be suitably aligned for any structure.
+ *
+ * The ukk_private member must be set to zero by the user-side. Under an OS
+ * implementation, the U/K interface must write in the user-side base address
+ * into the ukk_private member, so that the common code in
+ * _mali_ukk_get_system_info() can determine how to adjust the pointers such
+ * that they are sensible from user space. Leaving ukk_private as NULL implies
+ * that no pointer adjustment is necessary - which will be the case on a
+ * bare-metal/RTOS system.
+ *
+ * @see _mali_system_info
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer provided to store system information data */
+ _mali_system_info * system_info; /**< [in,out] pointer to buffer to store system information data. No initialisation of buffer required on input. */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+} _mali_uk_get_system_info_s;
+/** @} */ /* end group _mali_uk_getsysteminfo */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @defgroup _mali_uk_gp_suspend_response_s Vertex Processor Suspend Response
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_gp_suspend_response()
+ *
+ * When _mali_wait_for_notification() receives notification that a
+ * Vertex Processor job was suspended, you need to send a response to indicate
+ * what needs to happen with this job. You can either abort or resume the job.
+ *
+ * - set @c code to indicate response code. This is either @c _MALIGP_JOB_ABORT or
+ * @c _MALIGP_JOB_RESUME_WITH_NEW_HEAP to indicate you will provide a new heap
+ * for the job that will resolve the out of memory condition for the job.
+ * - copy the @c cookie value from the @c _mali_uk_gp_job_suspended_s notification;
+ * this is an identifier for the suspended job
+ * - set @c arguments[0] and @c arguments[1] to zero if you abort the job. If
+ * you resume it, @c argument[0] should specify the Mali start address for the new
+ * heap and @c argument[1] the Mali end address of the heap.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ */
+typedef enum _maligp_job_suspended_response_code
+{
+ _MALIGP_JOB_ABORT, /**< Abort the Vertex Processor job */
+ _MALIGP_JOB_RESUME_WITH_NEW_HEAP /**< Resume the Vertex Processor job with a new heap */
+} _maligp_job_suspended_response_code;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] cookie from the _mali_uk_gp_job_suspended_s notification */
+ _maligp_job_suspended_response_code code; /**< [in] abort or resume response code, see \ref _maligp_job_suspended_response_code */
+ u32 arguments[2]; /**< [in] 0 when aborting a job. When resuming a job, the Mali start and end address for a new heap to resume the job with */
+} _mali_uk_gp_suspend_response_s;
+
+/** @} */ /* end group _mali_uk_gp_suspend_response_s */
+
+/** @defgroup _mali_uk_gpstartjob_s Vertex Processor Start Job
+ * @{ */
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum
+{
+ _MALI_UK_START_JOB_STARTED, /**< Job started */
+ _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED, /**< Job started and bumped a lower priority job that was pending execution */
+ _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE /**< Job could not be started at this time. Try starting the job again */
+} _mali_uk_start_job_status;
+
+/** @brief Status indicating the result of starting a Vertex or Fragment processor job */
+typedef enum
+{
+ MALI_UK_START_JOB_FLAG_DEFAULT = 0, /**< Default behaviour; Flush L2 caches before start, no following jobs */
+ MALI_UK_START_JOB_FLAG_NO_FLUSH = 1, /**< No need to flush L2 caches before start */
+ MALI_UK_START_JOB_FLAG_MORE_JOBS_FOLLOW = 2, /**< More related jobs follows, try to schedule them as soon as possible after this job */
+} _mali_uk_start_job_flags;
+
+/** @brief Status indicating the result of the execution of a Vertex or Fragment processor job */
+
+typedef enum
+{
+ _MALI_UK_JOB_STATUS_END_SUCCESS = 1<<(16+0),
+ _MALI_UK_JOB_STATUS_END_OOM = 1<<(16+1),
+ _MALI_UK_JOB_STATUS_END_ABORT = 1<<(16+2),
+ _MALI_UK_JOB_STATUS_END_TIMEOUT_SW = 1<<(16+3),
+ _MALI_UK_JOB_STATUS_END_HANG = 1<<(16+4),
+ _MALI_UK_JOB_STATUS_END_SEG_FAULT = 1<<(16+5),
+ _MALI_UK_JOB_STATUS_END_ILLEGAL_JOB = 1<<(16+6),
+ _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR = 1<<(16+7),
+ _MALI_UK_JOB_STATUS_END_SHUTDOWN = 1<<(16+8),
+ _MALI_UK_JOB_STATUS_END_SYSTEM_UNUSABLE = 1<<(16+9)
+} _mali_uk_job_status;
+
+#define MALIGP2_NUM_REGS_FRAME (6)
+
+/** @brief Arguments for _mali_ukk_gp_start_job()
+ *
+ * To start a Vertex Processor job
+ * - associate the request with a reference to a @c mali_gp_job_info by setting
+ * user_job_ptr to the address of the @c mali_gp_job_info of the job.
+ * - set @c priority to the priority of the @c mali_gp_job_info
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_gp_job_info into @c frame_registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ *
+ * When @c _mali_ukk_gp_start_job() returns @c _MALI_OSK_ERR_OK, status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again. If the job had a higher priority than the one currently pending
+ * execution (@c _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED), it will bump
+ * the lower priority job and returns the address of the @c mali_gp_job_info
+ * for that job in @c returned_user_job_ptr. That job should get requeued.
+ *
+ * After the job has started, @c _mali_wait_for_notification() will be notified
+ * that the job finished or got suspended. It may get suspended due to
+ * resource shortage. If it finished (see _mali_ukk_wait_for_notification())
+ * the notification will contain a @c _mali_uk_gp_job_finished_s result. If
+ * it got suspended the notification will contain a @c _mali_uk_gp_job_suspended_s
+ * result.
+ *
+ * The @c _mali_uk_gp_job_finished_s contains the job status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ * In case the job got suspended, @c _mali_uk_gp_job_suspended_s contains
+ * the @c user_job_ptr identifier used to start the job with, the @c reason
+ * why the job stalled (see \ref _maligp_job_suspended_reason) and a @c cookie
+ * to identify the core on which the job stalled. This @c cookie will be needed
+ * when responding to this nofication by means of _mali_ukk_gp_suspend_response().
+ * (see _mali_ukk_gp_suspend_response()). The response is either to abort or
+ * resume the job. If the job got suspended due to an out of memory condition
+ * you may be able to resolve this by providing more memory and resuming the job.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 watchdog_msecs; /**< [in] maximum allowed runtime in milliseconds. The job gets killed if it runs longer than this. A value of 0 selects the default used by the device driver. */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 returned_user_job_ptr; /**< [out] identifier for the returned job in user space, a @c mali_gp_job_info* */
+ _mali_uk_start_job_status status; /**< [out] indicates job start status (success, previous job returned, requeue) */
+ u32 abort_id; /**< [in] abort id of this job, used to identify this job for later abort requests */
+ u32 perf_counter_l2_src0; /**< [in] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [in] source id for Mali-400 MP L2 cache performance counter 1 */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+} _mali_uk_gp_start_job_s;
+
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE (1<<0) /**< Enable performance counter SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE (1<<1) /**< Enable performance counter SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC0_ENABLE (1<<2) /**< Enable performance counter L2_SRC0 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_SRC1_ENABLE (1<<3) /**< Enable performance counter L2_SRC1 for a job */
+#define _MALI_PERFORMANCE_COUNTER_FLAG_L2_RESET (1<<4) /**< Enable performance counter L2_RESET for a job */
+
+/** @} */ /* end group _mali_uk_gpstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 irq_status; /**< [out] value of the GP interrupt rawstat register (see ARM DDI0415A) */
+ u32 status_reg_on_stop; /**< [out] value of the GP control register */
+ u32 vscl_stop_addr; /**< [out] value of the GP VLSCL start register */
+ u32 plbcl_stop_addr; /**< [out] value of the GP PLBCL start register */
+ u32 heap_current_addr; /**< [out] value of the GP PLB PL heap start address register */
+ u32 perf_counter_src0; /**< [out] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [out] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+ u32 render_time; /**< [out] number of microseconds it took for the job to render */
+ u32 perf_counter_l2_src0; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0; /**< [out] Value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1; /**< [out] Value of the Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_gp_job_finished_s;
+
+typedef enum _maligp_job_suspended_reason
+{
+ _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY /**< Polygon list builder unit (PLBU) has run out of memory */
+} _maligp_job_suspended_reason;
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _maligp_job_suspended_reason reason; /**< [out] reason why the job stalled */
+ u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
+} _mali_uk_gp_job_suspended_s;
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @defgroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @defgroup _mali_uk_ppstartjob_s Fragment Processor Start Job
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_pp_start_job()
+ *
+ * To start a Fragment Processor job
+ * - associate the request with a reference to a mali_pp_job by setting
+ * @c user_job_ptr to the address of the @c mali_pp_job of the job.
+ * - set @c priority to the priority of the mali_pp_job
+ * - specify a timeout for the job by setting @c watchdog_msecs to the number of
+ * milliseconds the job is allowed to run. Specifying a value of 0 selects the
+ * default timeout in use by the device driver.
+ * - copy the frame registers from the @c mali_pp_job into @c frame_registers.
+ * For MALI200 you also need to copy the write back 0,1 and 2 registers.
+ * - set the @c perf_counter_flag, @c perf_counter_src0 and @c perf_counter_src1 to zero
+ * for a non-instrumented build. For an instrumented build you can use up
+ * to two performance counters. Set the corresponding bit in @c perf_counter_flag
+ * to enable them. @c perf_counter_src0 and @c perf_counter_src1 specify
+ * the source of what needs to get counted (e.g. number of vertex loader
+ * cache hits). For source id values, see ARM DDI0415A, Table 3-60.
+ * - pass in the user-kernel context in @c ctx that was returned from _mali_ukk_open()
+ *
+ * When _mali_ukk_pp_start_job() returns @c _MALI_OSK_ERR_OK, @c status contains the
+ * result of the request (see \ref _mali_uk_start_job_status). If the job could
+ * not get started (@c _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE) it should be
+ * tried again. If the job had a higher priority than the one currently pending
+ * execution (@c _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED), it will bump
+ * the lower priority job and returns the address of the @c mali_pp_job
+ * for that job in @c returned_user_job_ptr. That job should get requeued.
+ *
+ * After the job has started, _mali_wait_for_notification() will be notified
+ * when the job finished. The notification will contain a
+ * @c _mali_uk_pp_job_finished_s result. It contains the @c user_job_ptr
+ * identifier used to start the job with, the job @c status (see \ref _mali_uk_job_status),
+ * the number of milliseconds the job took to render, and values of core registers
+ * when the job finished (irq status, performance counters, renderer list
+ * address). A job has finished succesfully when its status is
+ * @c _MALI_UK_JOB_STATUS_FINISHED. If the hardware detected a timeout while rendering
+ * the job, or software detected the job is taking more than @c watchdog_msecs to
+ * complete, the status will indicate @c _MALI_UK_JOB_STATUS_HANG.
+ * If the hardware detected a bus error while accessing memory associated with the
+ * job, status will indicate @c _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * status will indicate @c _MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to
+ * stop the job but the job didn't start on the hardware yet, e.g. when the
+ * driver shutdown.
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 user_job_ptr; /**< [in] identifier for the job in user space */
+ u32 priority; /**< [in] job priority. A lower number means higher priority */
+ u32 watchdog_msecs; /**< [in] maximum allowed runtime in milliseconds. The job gets killed if it runs longer than this. A value of 0 selects the default used by the device driver. */
+ u32 frame_registers[MALI200_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job, see ARM DDI0415A */
+ u32 wb0_registers[MALI200_NUM_REGS_WBx];
+ u32 wb1_registers[MALI200_NUM_REGS_WBx];
+ u32 wb2_registers[MALI200_NUM_REGS_WBx];
+ u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 returned_user_job_ptr; /**< [out] identifier for the returned job in user space */
+ _mali_uk_start_job_status status; /**< [out] indicates job start status (success, previous job returned, requeue) */
+ u32 abort_id; /**< [in] abort id of this job, used to identify this job for later abort requests */
+ u32 perf_counter_l2_src0; /**< [in] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [in] source id for Mali-400 MP L2 cache performance counter 1 */
+ u32 frame_builder_id; /**< [in] id of the originating frame builder */
+ u32 flush_id; /**< [in] flush id within the originating frame builder */
+ _mali_uk_start_job_flags flags; /**< [in] Flags for job, see _mali_uk_start_job_flags for more information */
+} _mali_uk_pp_start_job_s;
+/** @} */ /* end group _mali_uk_ppstartjob_s */
+
+typedef struct
+{
+ u32 user_job_ptr; /**< [out] identifier for the job in user space */
+ _mali_uk_job_status status; /**< [out] status of finished job */
+ u32 irq_status; /**< [out] value of interrupt rawstat register (see ARM DDI0415A) */
+ u32 last_tile_list_addr; /**< [out] value of renderer list register (see ARM DDI0415A); necessary to restart a stopped job */
+ u32 perf_counter_src0; /**< [out] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< [out] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter0; /**< [out] value of perfomance counter 0 (see ARM DDI0415A) */
+ u32 perf_counter1; /**< [out] value of perfomance counter 1 (see ARM DDI0415A) */
+ u32 render_time; /**< [out] number of microseconds it took for the job to render */
+ u32 perf_counter_l2_src0; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_src1; /**< [out] soruce id for Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0; /**< [out] Value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1; /**< [out] Value of the Mali-400 MP L2 cache performance counter 1 */
+ u32 perf_counter_l2_val0_raw; /**< [out] Raw value of the Mali-400 MP L2 cache performance counter 0 */
+ u32 perf_counter_l2_val1_raw; /**< [out] Raw value of the Mali-400 MP L2 cache performance counter 1 */
+} _mali_uk_pp_job_finished_s;
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ * @{ */
+
+/** @defgroup _mali_uk_waitfornotification_s Wait For Notification
+ * @{ */
+
+/** @brief Notification type encodings
+ *
+ * Each Notification type is an ordered pair of (subsystem,id), and is unique.
+ *
+ * The encoding of subsystem,id into a 32-bit word is:
+ * encoding = (( subsystem << _MALI_NOTIFICATION_SUBSYSTEM_SHIFT ) & _MALI_NOTIFICATION_SUBSYSTEM_MASK)
+ * | (( id << _MALI_NOTIFICATION_ID_SHIFT ) & _MALI_NOTIFICATION_ID_MASK)
+ *
+ * @see _mali_uk_wait_for_notification_s
+ */
+typedef enum
+{
+ /** core notifications */
+
+ _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x20,
+ _MALI_NOTIFICATION_APPLICATION_QUIT = (_MALI_UK_CORE_SUBSYSTEM << 16) | 0x40,
+
+ /** Fragment Processor notifications */
+
+ _MALI_NOTIFICATION_PP_FINISHED = (_MALI_UK_PP_SUBSYSTEM << 16) | 0x10,
+
+ /** Vertex Processor notifications */
+
+ _MALI_NOTIFICATION_GP_FINISHED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x10,
+ _MALI_NOTIFICATION_GP_STALLED = (_MALI_UK_GP_SUBSYSTEM << 16) | 0x20,
+} _mali_uk_notification_type;
+
+/** to assist in splitting up 32-bit notification value in subsystem and id value */
+#define _MALI_NOTIFICATION_SUBSYSTEM_MASK 0xFFFF0000
+#define _MALI_NOTIFICATION_SUBSYSTEM_SHIFT 16
+#define _MALI_NOTIFICATION_ID_MASK 0x0000FFFF
+#define _MALI_NOTIFICATION_ID_SHIFT 0
+
+
+/** @brief Arguments for _mali_ukk_wait_for_notification()
+ *
+ * On successful return from _mali_ukk_wait_for_notification(), the members of
+ * this structure will indicate the reason for notification.
+ *
+ * Specifically, the source of the notification can be identified by the
+ * subsystem and id fields of the mali_uk_notification_type in the code.type
+ * member. The type member is encoded in a way to divide up the types into a
+ * subsystem field, and a per-subsystem ID field. See
+ * _mali_uk_notification_type for more information.
+ *
+ * Interpreting the data union member depends on the notification type:
+ *
+ * - type == _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS
+ * - The kernel side is shutting down. No further
+ * _mali_uk_wait_for_notification() calls should be made.
+ * - In this case, the value of the data union member is undefined.
+ * - This is used to indicate to the user space client that it should close
+ * the connection to the Mali Device Driver.
+ * - type == _MALI_NOTIFICATION_PP_FINISHED
+ * - The notification data is of type _mali_uk_pp_job_finished_s. It contains the user_job_ptr
+ * identifier used to start the job with, the job status, the number of milliseconds the job took to render,
+ * and values of core registers when the job finished (irq status, performance counters, renderer list
+ * address).
+ * - A job has finished succesfully when its status member is _MALI_UK_JOB_STATUS_FINISHED.
+ * - If the hardware detected a timeout while rendering the job, or software detected the job is
+ * taking more than watchdog_msecs (see _mali_ukk_pp_start_job()) to complete, the status member will
+ * indicate _MALI_UK_JOB_STATUS_HANG.
+ * - If the hardware detected a bus error while accessing memory associated with the job, status will
+ * indicate _MALI_UK_JOB_STATUS_SEG_FAULT.
+ * - Status will indicate MALI_UK_JOB_STATUS_NOT_STARTED if the driver had to stop the job but the job
+ * didn't start the hardware yet, e.g. when the driver closes.
+ * - type == _MALI_NOTIFICATION_GP_FINISHED
+ * - The notification data is of type _mali_uk_gp_job_finished_s. The notification is similar to that of
+ * type == _MALI_NOTIFICATION_PP_FINISHED, except that several other GP core register values are returned.
+ * The status values have the same meaning for type == _MALI_NOTIFICATION_PP_FINISHED.
+ * - type == _MALI_NOTIFICATION_GP_STALLED
+ * - The nofication data is of type _mali_uk_gp_job_suspended_s. It contains the user_job_ptr
+ * identifier used to start the job with, the reason why the job stalled and a cookie to identify the core on
+ * which the job stalled.
+ * - The reason member of gp_job_suspended is set to _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY
+ * when the polygon list builder unit has run out of memory.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [out] Type of notification available */
+ union
+ {
+ _mali_uk_gp_job_suspended_s gp_job_suspended;/**< [out] Notification data for _MALI_NOTIFICATION_GP_STALLED notification type */
+ _mali_uk_gp_job_finished_s gp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_GP_FINISHED notification type */
+ _mali_uk_pp_job_finished_s pp_job_finished; /**< [out] Notification data for _MALI_NOTIFICATION_PP_FINISHED notification type */
+ } data;
+} _mali_uk_wait_for_notification_s;
+
+/** @brief Arguments for _mali_ukk_post_notification()
+ *
+ * Posts the specified notification to the notification queue for this application.
+ * This is used to send a quit message to the callback thread.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_notification_type type; /**< [in] Type of notification to post */
+} _mali_uk_post_notification_s;
+/** @} */ /* end group _mali_uk_waitfornotification_s */
+
+/** @defgroup _mali_uk_getapiversion_s Get API Version
+ * @{ */
+
+/** helpers for Device Driver API version handling */
+
+/** @brief Encode a version ID from a 16-bit input
+ *
+ * @note the input is assumed to be 16 bits. It must not exceed 16 bits. */
+#define _MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+
+/** @brief Check whether a 32-bit value is likely to be Device Driver API
+ * version ID. */
+#define _IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+
+/** @brief Decode a 16-bit version number from a 32-bit Device Driver API version
+ * ID */
+#define _GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+
+/** @brief Determine whether two 32-bit encoded version IDs match */
+#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * For example, for version 1 the value would be 0x00010001
+ */
+#define _MALI_API_VERSION 10
+#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
+
+/**
+ * The API version is a 16-bit integer stored in both the lower and upper 16-bits
+ * of a 32-bit value. The 16-bit API version value is incremented on each API
+ * change. Version 1 would be 0x00010001. Used in _mali_uk_get_api_version_s.
+ */
+typedef u32 _mali_uk_api_version;
+
+/** @brief Arguments for _mali_uk_get_api_version()
+ *
+ * The user-side interface version must be written into the version member,
+ * encoded using _MAKE_VERSION_ID(). It will be compared to the API version of
+ * the kernel-side interface.
+ *
+ * On successful return, the version member will be the API version of the
+ * kernel-side interface. _MALI_UK_API_VERSION macro defines the current version
+ * of the API.
+ *
+ * The compatible member must be checked to see if the version of the user-side
+ * interface is compatible with the kernel-side interface, since future versions
+ * of the interface may be backwards compatible.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+ int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
+} _mali_uk_get_api_version_s;
+/** @} */ /* end group _mali_uk_getapiversion_s */
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @defgroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_init_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 mali_address_base; /**< [out] start of MALI address space */
+ u32 memory_size; /**< [out] total MALI address space available */
+} _mali_uk_init_mem_s;
+
+/** @brief Arguments for _mali_ukk_term_mem(). */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_term_mem_s;
+
+/** @brief Arguments for _mali_ukk_get_big_block()
+ *
+ * - type_id should be set to the value of the identifier member of one of the
+ * _mali_mem_info structures returned through _mali_ukk_get_system_info()
+ * - ukk_private must be zero when calling from user-side. On Kernel-side, the
+ * OS implementation of the U/K interface can use it to communicate data to the
+ * OS implementation of the OSK layer. Specifically, ukk_private will be placed
+ * into the ukk_private member of the _mali_uk_mem_mmap_s structure. See
+ * _mali_ukk_mem_mmap() for more details.
+ * - minimum_size_requested will be updated if it is too small
+ * - block_size will always be >= minimum_size_requested, because the underlying
+ * allocation mechanism may only be able to divide up memory regions in certain
+ * ways. To avoid wasting memory, block_size should always be taken into account
+ * rather than assuming minimum_size_requested was really allocated.
+ * - to free the memory, the returned cookie member must be stored, and used to
+ * refer to it.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 type_id; /**< [in] the type id of the memory bank to allocate memory from */
+ u32 minimum_size_requested; /**< [in,out] minimum size of the allocation */
+ u32 ukk_private; /**< [in] Kernel-side private word inserted by certain U/K interface implementations. Caller must set to Zero. */
+ u32 mali_address; /**< [out] address of the allocation in mali address space */
+ void *cpuptr; /**< [out] address of the allocation in the current process address space */
+ u32 block_size; /**< [out] size of the block that got allocated */
+ u32 flags; /**< [out] flags associated with the allocated block, of type _mali_bus_usage */
+ u32 cookie; /**< [out] identifier for the allocated block in kernel space */
+} _mali_uk_get_big_block_s;
+
+/** @brief Arguments for _mali_ukk_free_big_block()
+ *
+ * All that is required is that the cookie member must be set to the value of
+ * the cookie member returned through _mali_ukk_get_big_block()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_free_big_block_s;
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 phys_addr; /**< [in] physical address */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_map_external_mem_s;
+
+/** Flag for _mali_uk_map_external_mem_s and _mali_uk_attach_ump_mem_s */
+#define _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE (1<<0)
+
+/** @note Mali-MMU only */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_unmap_external_mem_s;
+
+/** @note This is identical to _mali_uk_map_external_mem_s above, however phys_addr is replaced by secure_id */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure id */
+ u32 size; /**< [in] size */
+ u32 mali_address; /**< [in] mali address to map the physical memory to */
+ u32 rights; /**< [in] rights necessary for accessing memory */
+ u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
+ u32 cookie; /**< [out] identifier for mapped memory object in kernel space */
+} _mali_uk_attach_ump_mem_s;
+
+/** @note Mali-MMU only; will be supported in future version */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 cookie; /**< [in] identifier for mapped memory object in kernel space */
+} _mali_uk_release_ump_mem_s;
+
+/** @brief Arguments for _mali_ukk_va_to_mali_pa()
+ *
+ * if size is zero or not a multiple of the system's page size, it will be
+ * rounded up to the next multiple of the page size. This will occur before
+ * any other use of the size parameter.
+ *
+ * if va is not PAGE_SIZE aligned, it will be rounded down to the next page
+ * boundary.
+ *
+ * The range (va) to ((u32)va)+(size-1) inclusive will be checked for physical
+ * contiguity.
+ *
+ * The implementor will check that the entire physical range is allowed to be mapped
+ * into user-space.
+ *
+ * Failure will occur if either of the above are not satisfied.
+ *
+ * Otherwise, the physical base address of the range is returned through pa,
+ * va is updated to be page aligned, and size is updated to be a non-zero
+ * multiple of the system's pagesize.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *va; /**< [in,out] Virtual address of the start of the range */
+ u32 pa; /**< [out] Physical base address of the range */
+ u32 size; /**< [in,out] Size of the range, in bytes. */
+} _mali_uk_va_to_mali_pa_s;
+
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [out] size of MMU page table information (registers + page tables) */
+} _mali_uk_query_mmu_page_table_dump_size_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 size; /**< [in] size of buffer to receive mmu page table information */
+ void *buffer; /**< [in,out] buffer to receive mmu page table information */
+ u32 register_writes_size; /**< [out] size of MMU register dump */
+ u32 *register_writes; /**< [out] pointer within buffer where MMU register dump is stored */
+ u32 page_table_dump_size; /**< [out] size of MMU page table dump */
+ u32 *page_table_dump; /**< [out] pointer within buffer where MMU page table dump is stored */
+} _mali_uk_dump_mmu_page_table_s;
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_pp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_number_of_cores(), @c number_of_cores
+ * will contain the number of Fragment Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Fragment Processor cores in the system */
+} _mali_uk_get_pp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_pp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_pp_core_version(), @c version contains
+ * the version that all Fragment Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_pp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 abort_id; /**< [in] ID of job(s) to abort */
+} _mali_uk_pp_abort_job_s;
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ * @{ */
+
+/** @brief Arguments for _mali_ukk_get_gp_number_of_cores()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_number_of_cores(), @c number_of_cores
+ * will contain the number of Vertex Processor cores in the system.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 number_of_cores; /**< [out] number of Vertex Processor cores in the system */
+} _mali_uk_get_gp_number_of_cores_s;
+
+/** @brief Arguments for _mali_ukk_get_gp_core_version()
+ *
+ * - pass in the user-kernel context @c ctx that was returned from _mali_ukk_open()
+ * - Upon successful return from _mali_ukk_get_gp_core_version(), @c version contains
+ * the version that all Vertex Processor cores are compatible with.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_core_version version; /**< [out] version returned from core, see \ref _mali_core_version */
+} _mali_uk_get_gp_core_version_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 abort_id; /**< [in] ID of job(s) to abort */
+} _mali_uk_gp_abort_job_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 limit; /**< [in,out] The desired limit for number of events to record on input, actual limit on output */
+} _mali_uk_profiling_start_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 event_id; /**< [in] event id to register (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [in] event specific data */
+} _mali_uk_profiling_add_event_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 count; /**< [out] The number of events sampled */
+} _mali_uk_profiling_stop_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 index; /**< [in] which index to get (starting at zero) */
+ u64 timestamp; /**< [out] timestamp of event */
+ u32 event_id; /**< [out] event id of event (see enum mali_profiling_events for values) */
+ u32 data[5]; /**< [out] event specific data */
+} _mali_uk_profiling_get_event_s;
+
+typedef struct
+{
+ void *ctx;
+
+ u32 id;
+ s64 value;
+} _mali_uk_sw_counters_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+} _mali_uk_profiling_clear_s;
+
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 enable_events; /**< [out] 1 if user space process should generate events, 0 if not */
+} _mali_uk_profiling_get_config_s;
+
+
+/** @} */ /* end group _mali_uk_gp */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ * @{ */
+
+/** @brief Arguments to _mali_ukk_mem_mmap()
+ *
+ * Use of the phys_addr member depends on whether the driver is compiled for
+ * Mali-MMU or nonMMU:
+ * - in the nonMMU case, this is the physical address of the memory as seen by
+ * the CPU (which may be a constant offset from that used by Mali)
+ * - in the MMU case, this is the Mali Virtual base address of the memory to
+ * allocate, and the particular physical pages used to back the memory are
+ * entirely determined by _mali_ukk_mem_mmap(). The details of the physical pages
+ * are not reported to user-space for security reasons.
+ *
+ * The cookie member must be stored for use later when freeing the memory by
+ * calling _mali_ukk_mem_munmap(). In the Mali-MMU case, the cookie is secure.
+ *
+ * The ukk_private word must be set to zero when calling from user-space. On
+ * Kernel-side, the OS implementation of the U/K interface can use it to
+ * communicate data to the OS implementation of the OSK layer. In particular,
+ * _mali_ukk_get_big_block() directly calls _mali_ukk_mem_mmap directly, and
+ * will communicate its own ukk_private word through the ukk_private member
+ * here. The common code itself will not inspect or modify the ukk_private
+ * word, and so it may be safely used for whatever purposes necessary to
+ * integrate Mali Memory handling into the OS.
+ *
+ * The uku_private member is currently reserved for use by the user-side
+ * implementation of the U/K interface. Its value must be zero.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ u32 size; /**< [in] Size of the requested mapping */
+ u32 phys_addr; /**< [in] Physical address - could be offset, depending on caller+callee convention */
+ u32 cookie; /**< [out] Returns a cookie for use in munmap calls */
+ void *uku_private; /**< [in] User-side Private word used by U/K interface */
+ void *ukk_private; /**< [in] Kernel-side Private word used by U/K interface */
+} _mali_uk_mem_mmap_s;
+
+/** @brief Arguments to _mali_ukk_mem_munmap()
+ *
+ * The cookie and mapping members must be that returned from the same previous
+ * call to _mali_ukk_mem_mmap(). The size member must correspond to cookie
+ * and mapping - that is, it must be the value originally supplied to a call to
+ * _mali_ukk_mem_mmap that returned the values of mapping and cookie.
+ *
+ * An error will be returned if an attempt is made to unmap only part of the
+ * originally obtained range, or to unmap more than was originally obtained.
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] The mapping returned from mmap call */
+ u32 size; /**< [in] The size passed to mmap call */
+ u32 cookie; /**< [in] Cookie from mmap call */
+} _mali_uk_mem_munmap_s;
+/** @} */ /* end group _mali_uk_memory */
+
+#if USING_MALI_PMM
+
+/** @defgroup _mali_uk_pmm U/K Power Management Module
+ * @{ */
+
+/** @brief Power management event message identifiers.
+ *
+ * U/K events start after id 200, and can range up to 999
+ * Adding new events will require updates to the PMM mali_pmm_event_id type
+ */
+#define _MALI_PMM_EVENT_UK_EXAMPLE 201
+
+/** @brief Generic PMM message data type, that will be dependent on the event msg
+ */
+typedef u32 mali_pmm_message_data;
+
+
+/** @brief Arguments to _mali_ukk_pmm_event_message()
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 id; /**< [in] event id */
+ mali_pmm_message_data data; /**< [in] specific data associated with the event */
+} _mali_uk_pmm_message_s;
+
+/** @} */ /* end group _mali_uk_pmm */
+#endif /* USING_MALI_PMM */
+
+/** @defgroup _mali_uk_vsync U/K VSYNC Wait Reporting Module
+ * @{ */
+
+/** @brief VSYNC events
+ *
+ * These events are reported when DDK starts to wait for vsync and when the
+ * vsync has occured and the DDK can continue on the next frame.
+ */
+typedef enum _mali_uk_vsync_event
+{
+ _MALI_UK_VSYNC_EVENT_BEGIN_WAIT = 0,
+ _MALI_UK_VSYNC_EVENT_END_WAIT
+} _mali_uk_vsync_event;
+
+/** @brief Arguments to _mali_ukk_vsync_event()
+ *
+ */
+typedef struct
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_vsync_event event; /**< [in] VSYNCH event type */
+} _mali_uk_vsync_event_report_s;
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UK_TYPES_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_ukk.h b/drivers/media/video/samsung/mali/common/mali_ukk.h
new file mode 100644
index 0000000..94efdf5
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_ukk.h
@@ -0,0 +1,723 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - On OS systems (not including direct function call U/K interface
+ * implementations), _mali_ukk_get_big_block() may succeed, but the subsequent
+ * copying to user space may fail.
+ * - A problem scenario exists: some memory has been reserved by
+ * _mali_ukk_get_big_block(), but the user-mode will be unaware of it (it will
+ * never receive any information about this memory). In this case, the U/K
+ * implementation must do everything necessary to 'rollback' the \em atomic
+ * _mali_ukk_get_big_block() transaction.
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Returns the size of the buffer needed for a _mali_ukk_get_system_info call
+ *
+ * This function must be called before a call is made to
+ * _mali_ukk_get_system_info, so that memory of the correct size can be
+ * allocated, and a pointer to this memory written into the system_info member
+ * of _mali_uk_get_system_info_s.
+ *
+ * @param args see _mali_uk_get_system_info_size_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info_size( _mali_uk_get_system_info_size_s *args );
+
+/** @brief Returns information about the system (cores and memory banks)
+ *
+ * A buffer for this needs to be allocated by the caller. The size of the buffer required is returned by
+ * _mali_ukk_get_system_info_size(). The user is responsible for freeing the buffer.
+ *
+ * The _mali_system_info structure will be written to the start of this buffer,
+ * and the core_info and mem_info lists will be written to locations inside
+ * the buffer, and will be suitably aligned.
+ *
+ * Under OS implementations of the U/K interface we need to pack/unpack
+ * pointers across the user/kernel boundary. This has required that we malloc()
+ * an intermediate buffer inside the kernel-side U/K interface, and free it
+ * before returning to user-side. To avoid modifying common code, we do the
+ * following pseudo-code, which we shall call 'pointer switching':
+ *
+ * @code
+ * {
+ * Copy_From_User(kargs, args, ... );
+ * void __user * local_ptr = kargs->system_info;
+ * kargs->system_info = _mali_osk_malloc( ... );
+ * _mali_ukk_get_system_info( kargs );
+ * Copy_To_User( local_ptr, kargs->system_info, ... );
+ * _mali_osk_free( kargs->system_info );
+ * }
+ * @endcode
+ * @note The user-side's args->system_info members was unmodified here.
+ *
+ * However, the current implementation requires an extra ukk_private word so that the common code can work out
+ * how to patch pointers to user-mode for an OS's U/K implementation, this should be set to the user-space
+ * destination address for pointer-patching to occur. When NULL, it is unused, an no pointer-patching occurs in the
+ * common code.
+ *
+ * @param args see _mali_uk_get_system_info_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_system_info( _mali_uk_get_system_info_s *args );
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/**
+ * @brief Initialize the Mali-MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called before any
+ * other functions in the \ref _mali_uk_memory group are called.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_init_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args );
+
+/**
+ * @brief Terminate the MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called when
+ * functions in the \ref _mali_uk_memory group will no longer be called. This
+ * function must be called before the application terminates.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_term_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args );
+
+/** @brief Map a block of memory into the current user process
+ *
+ * Allocates a minimum of minimum_size_requested bytes of MALI memory and maps it into the current
+ * process space. The number of bytes allocated is returned in args->block_size.
+ *
+ * This is only used for Mali-nonMMU mode.
+ *
+ * @param args see _mali_uk_get_big_block_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_big_block( _mali_uk_get_big_block_s *args );
+
+/** @brief Unmap a block of memory from the current user process
+ *
+ * Frees allocated MALI memory and unmaps it from the current process space. The previously allocated memory
+ * is indicated by the cookie as returned by _mali_ukk_get_big_block().
+ *
+ * This is only used for Mali-nonMMU mode.
+ *
+ * @param args see _mali_uk_free_big_block_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_free_big_block( _mali_uk_free_big_block_s *args );
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode. In Mali-nonMMU mode, the function is callable
+ * from the kernel side, and is used to implement _mali_ukk_get_big_block() in this case.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * This means that in the first (nonMMU) case, the caller must manage the physical address allocations. The caller
+ * in this case is _mali_ukk_get_big_block(), which does indeed manage the Mali physical address ranges.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * If an existing lower priority job is returned, args->returned_user_job_ptr contains a
+ * pointer to the returned job and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_pp_start_job_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Abort any PP jobs with the given ID.
+ *
+ * Jobs internally queued or currently running on the hardware is to be stopped/aborted.
+ * Jobs aborted are reported via the normal job completion system.
+ * Any jobs, running or internally queued should be aborted imediately.
+ * Normal notifiction procedures to report on the status of these jobs.
+ *
+ *
+ * @param args see _malu_uk_pp_abort_job_s in "mali_uk_types.h"
+ */
+void _mali_ukk_pp_abort_job( _mali_uk_pp_abort_job_s *args );
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * If an existing lower priority job is returned, args->returned_user_job_ptr contains a pointer to
+ * the returned job and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED_LOW_PRI_JOB_RETURNED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_gp_start_job_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @brief Abort any GP jobs with the given ID.
+ *
+ * Jobs internally queued or currently running on the hardware is to be stopped/aborted.
+ * Jobs aborted are reported via the normal job completion system.
+ *
+ * Any jobs, running or internally queued should be aborted imediately.
+ * Normal notifiction procedures to report on the status of these jobs.
+ *
+ * @param args see _mali_uk_gp_abort_job_s in "mali_uk_types.h"
+ */
+void _mali_ukk_gp_abort_job( _mali_uk_gp_abort_job_s *args );
+/** @} */ /* end group _mali_uk_gp */
+
+#if USING_MALI_PMM
+/** @addtogroup _mali_uk_pmm U/K Power Management Module
+ * @{ */
+
+/* @brief Power Management Module event message
+ *
+ * @note The event message can fail to be sent due to OOM but this is
+ * stored in the PMM state machine to be handled later
+ *
+ * @param args see _mali_uk_pmm_event_message_s in "mali_uk_types.h"
+ */
+void _mali_ukk_pmm_event_message( _mali_uk_pmm_message_s *args );
+/** @} */ /* end group _mali_uk_pmm */
+#endif /* USING_MALI_PMM */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @brief Get the profiling config applicable for calling process.
+ *
+ * @param args see _mali_uk_profiling_get_config_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_config(_mali_uk_profiling_get_config_s *args);
+
+/** @brief Transfer software counters from user to kernel space
+ *
+ * @param args see _mali_uk_transfer_sw_counters_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_transfer_sw_counters(_mali_uk_sw_counters_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm.c b/drivers/media/video/samsung/mali/common/pmm/mali_pmm.c
new file mode 100644
index 0000000..7041391
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm.c
@@ -0,0 +1,1024 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm.c
+ * Implementation of the power management module for the kernel device driver
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_pmm_pmu.h"
+#include "mali_platform.h"
+#include "mali_kernel_pm.h"
+
+/* Internal PMM subsystem state */
+static _mali_pmm_internal_state_t *pmm_state = NULL;
+/* Mali kernel subsystem id */
+static mali_kernel_subsystem_identifier mali_subsystem_pmm_id = -1;
+
+#define GET_PMM_STATE_PTR (pmm_state)
+
+/* Internal functions */
+static _mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource);
+static void pmm_event_process( void );
+_mali_osk_errcode_t malipmm_irq_uhandler(void *data);
+void malipmm_irq_bhandler(void *data);
+
+/** @brief Start the PMM subsystem
+ *
+ * @param id Subsystem id to uniquely identify this subsystem
+ * @return _MALI_OSK_ERR_OK if the system started successfully, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t malipmm_kernel_subsystem_start( mali_kernel_subsystem_identifier id );
+
+/** @brief Perform post start up of the PMM subsystem
+ *
+ * Post start up includes initializing the current policy, now that the system is
+ * completely started - to stop policies turning off hardware during the start up
+ *
+ * @param id the unique subsystem id
+ * @return _MALI_OSK_ERR_OK if the post startup was successful, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t malipmm_kernel_load_complete( mali_kernel_subsystem_identifier id );
+
+/** @brief Terminate the PMM subsystem
+ *
+ * @param id the unique subsystem id
+ */
+void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id );
+
+#if MALI_STATE_TRACKING
+u32 malipmm_subsystem_dump_state( char *buf, u32 size );
+#endif
+
+
+/* This will be one of the subsystems in the array of subsystems:
+ static struct mali_kernel_subsystem * subsystems[];
+ found in file: mali_kernel_core.c
+*/
+struct mali_kernel_subsystem mali_subsystem_pmm=
+{
+ malipmm_kernel_subsystem_start, /* startup */
+ NULL, /*malipmm_kernel_subsystem_terminate,*/ /* shutdown */
+ malipmm_kernel_load_complete, /* loaded all subsystems */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+#if MALI_STATE_TRACKING
+ malipmm_subsystem_dump_state, /* dump_state */
+#endif
+};
+
+#if PMM_OS_TEST
+
+u32 power_test_event = 0;
+mali_bool power_test_flag = MALI_FALSE;
+_mali_osk_timer_t *power_test_timer = NULL;
+
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data)
+{
+ MALI_PRINT(("POWER TEST OS UP DONE\n"));
+}
+
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data)
+{
+ MALI_PRINT(("POWER TEST OS DOWN DONE\n"));
+}
+
+/**
+ * Symbian OS Power Up call to the driver
+ */
+void power_test_callback( void *arg )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ power_test_flag = MALI_TRUE;
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+void power_test_start()
+{
+ power_test_timer = _mali_osk_timer_init();
+ _mali_osk_timer_setcallback( power_test_timer, power_test_callback, NULL );
+
+ /* First event is power down */
+ power_test_event = MALI_PMM_EVENT_OS_POWER_DOWN;
+ _mali_osk_timer_add( power_test_timer, 10000 );
+}
+
+mali_bool power_test_check()
+{
+ if( power_test_flag )
+ {
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ 0,
+ 1 };
+ event.id = power_test_event;
+
+ power_test_flag = MALI_FALSE;
+
+ /* Send event */
+ _mali_ukk_pmm_event_message( &event );
+
+ /* Switch to next event to test */
+ if( power_test_event == MALI_PMM_EVENT_OS_POWER_DOWN )
+ {
+ power_test_event = MALI_PMM_EVENT_OS_POWER_UP;
+ }
+ else
+ {
+ power_test_event = MALI_PMM_EVENT_OS_POWER_DOWN;
+ }
+ _mali_osk_timer_add( power_test_timer, 5000 );
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void power_test_end()
+{
+ _mali_osk_timer_del( power_test_timer );
+ _mali_osk_timer_term( power_test_timer );
+ power_test_timer = NULL;
+}
+
+#endif
+
+void _mali_ukk_pmm_event_message( _mali_uk_pmm_message_s *args )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ _mali_osk_notification_t *msg;
+ mali_pmm_message_t *event;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: sending message\n") );
+
+#if MALI_PMM_TRACE && MALI_PMM_TRACE_SENT_EVENTS
+ _mali_pmm_trace_event_message( args, MALI_FALSE );
+#endif
+
+ msg = _mali_osk_notification_create( MALI_PMM_NOTIFICATION_TYPE, sizeof( mali_pmm_message_t ) );
+
+ if( msg )
+ {
+ event = (mali_pmm_message_t *)msg->result_buffer;
+ event->id = args->id;
+ event->ts = _mali_osk_time_tickcount();
+ event->data = args->data;
+
+ _mali_osk_atomic_inc( &(pmm->messages_queued) );
+
+ if( args->id > MALI_PMM_EVENT_INTERNALS )
+ {
+ /* Internal PMM message */
+ _mali_osk_notification_queue_send( pmm->iqueue, msg );
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->imessages_sent++;
+ #endif
+ }
+ else
+ {
+ /* Real event */
+ _mali_osk_notification_queue_send( pmm->queue, msg );
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->messages_sent++;
+ #endif
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Could not send message %d", args->id) );
+ /* Make note of this OOM - which has caused a missed event */
+ pmm->missed++;
+ }
+
+ /* Schedule time to look at the event or the fact we couldn't create an event */
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+mali_pmm_state _mali_pmm_state( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm && (mali_subsystem_pmm_id != -1) )
+ {
+ return pmm->state;
+ }
+
+ /* No working subsystem yet */
+ return MALI_PMM_STATE_UNAVAILABLE;
+}
+
+
+mali_pmm_core_mask _mali_pmm_cores_list( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return pmm->cores_registered;
+}
+
+mali_pmm_core_mask _mali_pmm_cores_powered( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return pmm->cores_powered;
+}
+
+
+_mali_osk_errcode_t _mali_pmm_list_policies(
+ u32 policy_list_size,
+ mali_pmm_policy *policy_list,
+ u32 *policies_available )
+{
+ /* TBD - This is currently a stub function for basic power management */
+
+ MALI_ERROR( _MALI_OSK_ERR_UNSUPPORTED );
+}
+
+_mali_osk_errcode_t _mali_pmm_set_policy( mali_pmm_policy policy )
+{
+ /* TBD - This is currently a stub function for basic power management */
+
+/* TBD - When this is not a stub... include tracing...
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_policy_change( old, newpolicy );
+#endif
+*/
+ MALI_ERROR( _MALI_OSK_ERR_UNSUPPORTED );
+}
+
+_mali_osk_errcode_t _mali_pmm_get_policy( mali_pmm_policy *policy )
+{
+ if( policy )
+ {
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm )
+ {
+ *policy = pmm->policy;
+ MALI_SUCCESS;
+ }
+ else
+ {
+ *policy = MALI_PMM_POLICY_NONE;
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+ }
+
+ /* No return argument */
+ MALI_ERROR( _MALI_OSK_ERR_INVALID_ARGS );
+}
+
+#if ( MALI_PMM_TRACE || MALI_STATE_TRACKING )
+
+/* Event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events[] = {
+ "OS_POWER_UP",
+ "OS_POWER_DOWN",
+ "JOB_SCHEDULED",
+ "JOB_QUEUED",
+ "JOB_FINISHED",
+ "TIMEOUT",
+};
+
+/* State names - order must match mali_pmm_state enum */
+static char *pmm_trace_state[] = {
+ "UNAVAILABLE",
+ "SYSTEM ON",
+ "SYSTEM OFF",
+ "SYSTEM TRANSITION",
+};
+
+/* Policy names - order must match mali_pmm_policy enum */
+static char *pmm_trace_policy[] = {
+ "NONE",
+ "ALWAYS ON",
+ "JOB CONTROL",
+};
+
+/* Status names - order must match mali_pmm_status enum */
+static char *pmm_trace_status[] = {
+ "MALI_PMM_STATUS_IDLE", /**< PMM is waiting next event */
+ "MALI_PMM_STATUS_POLICY_POWER_DOWN", /**< Policy initiated power down */
+ "MALI_PMM_STATUS_POLICY_POWER_UP", /**< Policy initiated power down */
+ "MALI_PMM_STATUS_OS_WAITING", /**< PMM is waiting for OS power up */
+ "MALI_PMM_STATUS_OS_POWER_DOWN", /**< OS initiated power down */
+ "MALI_PMM_STATUS_RUNTIME_IDLE_IN_PROGRESS",
+ "MALI_PMM_STATUS_DVFS_PAUSE", /**< PMM DVFS Status Pause */
+ "MALI_PMM_STATUS_OS_POWER_UP", /**< OS initiated power up */
+ "MALI_PMM_STATUS_OFF", /**< PMM is not active */
+};
+
+#endif /* MALI_PMM_TRACE || MALI_STATE_TRACKING */
+#if MALI_PMM_TRACE
+
+/* UK event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events_uk[] = {
+ "UKS",
+ "UK_EXAMPLE",
+};
+
+/* Internal event names - order must match mali_pmm_event_id enum */
+static char *pmm_trace_events_internal[] = {
+ "INTERNALS",
+ "INTERNAL_POWER_UP_ACK",
+ "INTERNAL_POWER_DOWN_ACK",
+};
+
+void _mali_pmm_trace_hardware_change( mali_pmm_core_mask old, mali_pmm_core_mask newstate )
+{
+ const char *dname;
+ const char *cname;
+ const char *ename;
+
+ if( old != newstate )
+ {
+ if( newstate == 0 )
+ {
+ dname = "NO cores";
+ }
+ else
+ {
+ dname = pmm_trace_get_core_name( newstate );
+ }
+
+ /* These state checks only work if the assumption that only cores can be
+ * turned on or turned off in seperate actions is true. If core power states can
+ * be toggled (some one, some off) at the same time, this check does not work
+ */
+ if( old > newstate )
+ {
+ /* Cores have turned off */
+ cname = pmm_trace_get_core_name( old - newstate );
+ ename = "OFF";
+ }
+ else
+ {
+ /* Cores have turned on */
+ cname = pmm_trace_get_core_name( newstate - old );
+ ename = "ON";
+ }
+ MALI_PRINT( ("PMM Trace: Hardware %s ON, %s just turned %s. { 0x%08x -> 0x%08x }", dname, cname, ename, old, newstate) );
+ }
+}
+
+void _mali_pmm_trace_state_change( mali_pmm_state old, mali_pmm_state newstate )
+{
+ if( old != newstate )
+ {
+ MALI_PRINT( ("PMM Trace: State changed from %s to %s", pmm_trace_state[old], pmm_trace_state[newstate]) );
+ }
+}
+
+void _mali_pmm_trace_policy_change( mali_pmm_policy old, mali_pmm_policy newpolicy )
+{
+ if( old != newpolicy )
+ {
+ MALI_PRINT( ("PMM Trace: Policy changed from %s to %s", pmm_trace_policy[old], pmm_trace_policy[newpolicy]) );
+ }
+}
+
+void _mali_pmm_trace_event_message( mali_pmm_message_t *event, mali_bool received )
+{
+ const char *ename;
+ const char *dname;
+ const char *tname;
+ const char *format = "PMM Trace: Event %s { (%d) %s, %d ticks, (0x%x) %s }";
+
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ tname = (received) ? "received" : "sent";
+
+ if( event->id >= MALI_PMM_EVENT_INTERNALS )
+ {
+ ename = pmm_trace_events_internal[((int)event->id) - MALI_PMM_EVENT_INTERNALS];
+ }
+ else if( event->id >= MALI_PMM_EVENT_UKS )
+ {
+ ename = pmm_trace_events_uk[((int)event->id) - MALI_PMM_EVENT_UKS];
+ }
+ else
+ {
+ ename = pmm_trace_events[event->id];
+ }
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ dname = "os event";
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ dname = pmm_trace_get_core_name( (mali_pmm_core_mask)event->data );
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ dname = "timeout start";
+ /* Print data with a different format */
+ format = "PMM Trace: Event %s { (%d) %s, %d ticks, %d ticks %s }";
+ break;
+ default:
+ dname = "unknown data";
+ }
+
+ MALI_PRINT( (format, tname, (u32)event->id, ename, event->ts, (u32)event->data, dname) );
+}
+
+#endif /* MALI_PMM_TRACE */
+
+
+/****************** Mali Kernel API *****************/
+
+_mali_osk_errcode_t malipmm_kernel_subsystem_start( mali_kernel_subsystem_identifier id )
+{
+ mali_subsystem_pmm_id = id;
+ MALI_CHECK_NO_ERROR(_mali_kernel_core_register_resource_handler(PMU, malipmm_create));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t malipmm_create(_mali_osk_resource_t *resource)
+{
+ /* Create PMM state memory */
+ MALI_DEBUG_ASSERT( pmm_state == NULL );
+ pmm_state = (_mali_pmm_internal_state_t *) _mali_osk_malloc(sizeof(*pmm_state));
+ MALI_CHECK_NON_NULL( pmm_state, _MALI_OSK_ERR_NOMEM );
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pmm_state, 0, sizeof(*pmm_state));
+
+ /* Set up the initial PMM state */
+ pmm_state->waiting = 0;
+ pmm_state->status = MALI_PMM_STATUS_IDLE;
+ pmm_state->state = MALI_PMM_STATE_UNAVAILABLE; /* Until a core registers */
+
+ /* Set up policy via compile time option for the moment */
+#if MALI_PMM_ALWAYS_ON
+ pmm_state->policy = MALI_PMM_POLICY_ALWAYS_ON;
+#else
+ pmm_state->policy = MALI_PMM_POLICY_JOB_CONTROL;
+#endif
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_policy_change( MALI_PMM_POLICY_NONE, pmm_state->policy );
+#endif
+
+ /* Set up assumes all values are initialized to NULL or MALI_FALSE, so
+ * we can exit halfway through set up and perform clean up
+ */
+
+#if USING_MALI_PMU
+ if( mali_pmm_pmu_init(resource) != _MALI_OSK_ERR_OK ) goto pmm_fail_cleanup;
+ pmm_state->pmu_initialized = MALI_TRUE;
+#endif
+ pmm_state->queue = _mali_osk_notification_queue_init();
+ if( !pmm_state->queue ) goto pmm_fail_cleanup;
+
+ pmm_state->iqueue = _mali_osk_notification_queue_init();
+ if( !pmm_state->iqueue ) goto pmm_fail_cleanup;
+
+ /* We are creating an IRQ handler just for the worker thread it gives us */
+ pmm_state->irq = _mali_osk_irq_init( _MALI_OSK_IRQ_NUMBER_PMM,
+ malipmm_irq_uhandler,
+ malipmm_irq_bhandler,
+ NULL,
+ NULL,
+ (void *)pmm_state, /* PMM state is passed to IRQ */
+ "PMM handler" );
+
+ if( !pmm_state->irq ) goto pmm_fail_cleanup;
+
+ pmm_state->lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 75);
+ if( !pmm_state->lock ) goto pmm_fail_cleanup;
+
+ if( _mali_osk_atomic_init( &(pmm_state->messages_queued), 0 ) != _MALI_OSK_ERR_OK )
+ {
+ goto pmm_fail_cleanup;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem created, policy=%d\n", pmm_state->policy) );
+
+ MALI_SUCCESS;
+
+pmm_fail_cleanup:
+ MALI_PRINT_ERROR( ("PMM: subsystem failed to be created\n") );
+ if( pmm_state )
+ {
+ if( pmm_state->lock ) _mali_osk_lock_term( pmm_state->lock );
+ if( pmm_state->irq ) _mali_osk_irq_term( pmm_state->irq );
+ if( pmm_state->queue ) _mali_osk_notification_queue_term( pmm_state->queue );
+ if( pmm_state->iqueue ) _mali_osk_notification_queue_term( pmm_state->iqueue );
+#if USING_MALI_PMU
+ if( pmm_state->pmu_initialized )
+ {
+ _mali_osk_resource_type_t t = PMU;
+ mali_pmm_pmu_deinit(&t);
+ }
+#endif /* USING_MALI_PMU */
+
+ _mali_osk_free(pmm_state);
+ pmm_state = NULL;
+ }
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+}
+
+_mali_osk_errcode_t malipmm_kernel_load_complete( mali_kernel_subsystem_identifier id )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem loaded, policy initializing\n") );
+
+#if PMM_OS_TEST
+ power_test_start();
+#endif
+
+ /* Initialize the profile now the system has loaded - so that cores are
+ * not turned off during start up
+ */
+ return pmm_policy_init( pmm );
+}
+
+void malipmm_force_powerup( void )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_LOCK(pmm);
+ pmm->status = MALI_PMM_STATUS_OFF;
+ MALI_PMM_UNLOCK(pmm);
+
+ /* flush PMM workqueue */
+ _mali_osk_flush_workqueue( pmm->irq );
+
+ if (pmm->cores_powered == 0)
+ {
+ malipmm_powerup(pmm->cores_registered);
+ }
+}
+
+void malipmm_kernel_subsystem_terminate( mali_kernel_subsystem_identifier id )
+{
+ /* Check this is the right system */
+ MALI_DEBUG_ASSERT( id == mali_subsystem_pmm_id );
+ MALI_DEBUG_ASSERT_POINTER(pmm_state);
+
+ if( pmm_state )
+ {
+#if PMM_OS_TEST
+ power_test_end();
+#endif
+ /* Get the lock so we can shutdown */
+ MALI_PMM_LOCK(pmm_state);
+#if MALI_STATE_TRACKING
+ pmm_state->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+ pmm_state->status = MALI_PMM_STATUS_OFF;
+#if MALI_STATE_TRACKING
+ pmm_state->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+ MALI_PMM_UNLOCK(pmm_state);
+ _mali_osk_pmm_ospmm_cleanup();
+ pmm_policy_term(pmm_state);
+ _mali_osk_irq_term( pmm_state->irq );
+ _mali_osk_notification_queue_term( pmm_state->queue );
+ _mali_osk_notification_queue_term( pmm_state->iqueue );
+ if (pmm_state->cores_registered) malipmm_powerdown(pmm_state->cores_registered,MALI_POWER_MODE_LIGHT_SLEEP);
+#if USING_MALI_PMU
+ if( pmm_state->pmu_initialized )
+ {
+ _mali_osk_resource_type_t t = PMU;
+ mali_pmm_pmu_deinit(&t);
+ }
+#endif /* USING_MALI_PMU */
+
+ _mali_osk_atomic_term( &(pmm_state->messages_queued) );
+ MALI_PMM_LOCK_TERM(pmm_state);
+ _mali_osk_free(pmm_state);
+ pmm_state = NULL;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: subsystem terminated\n") );
+}
+
+_mali_osk_errcode_t malipmm_powerup( u32 cores )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ /* If all the cores are powered down, power up the MALI */
+ if (pmm->cores_powered == 0) {
+ mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ /* Initiate the power up */
+ if (_mali_osk_pmm_dev_activate() < 0) {
+ MALI_PRINT(("PMM: Try again PD_G3D enable\n"));
+ if (mali_pd_enable() < 0) {
+ MALI_PRINT(("PMM: Mali PMM device activate failed\n"));
+ err = _MALI_OSK_ERR_FAULT;
+ return err;
+ }
+ }
+#endif
+ }
+
+#if USING_MALI_PMU
+ err = mali_pmm_pmu_powerup( cores );
+#endif
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ mali_platform_powerup(cores);
+#endif
+
+ return err;
+}
+
+_mali_osk_errcode_t malipmm_powerdown( u32 cores, mali_power_mode power_mode )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ mali_platform_powerdown(cores);
+#endif
+
+#if USING_MALI_PMU
+ err = mali_pmm_pmu_powerdown( cores );
+#endif
+
+ /* If all cores are powered down, power off the MALI */
+ if (pmm->cores_powered == 0)
+ {
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ /* Initiate the power down */
+ _mali_osk_pmm_dev_idle();
+#endif
+ mali_platform_power_mode_change(power_mode);
+ }
+ return err;
+}
+
+_mali_osk_errcode_t malipmm_core_register( mali_pmm_core_id core )
+{
+ _mali_osk_errcode_t err;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ if( pmm == NULL )
+ {
+ /* PMM state has not been created, this is because the PMU resource has not been
+ * created yet.
+ * This probably means that the PMU resource has not been specfied as the first
+ * resource in the config file
+ */
+ MALI_PRINT_ERROR( ("PMM: Cannot register core %s because the PMU resource has not been\n initialized. Please make sure the PMU resource is the first resource in the\n resource configuration.\n",
+ pmm_trace_get_core_name(core)) );
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Check if the core is registered more than once in PMM */
+ MALI_DEBUG_ASSERT( (pmm->cores_registered & core) == 0 );
+
+ MALIPMM_DEBUG_PRINT( ("PMM: core registered: (0x%x) %s\n", core, pmm_trace_get_core_name(core)) );
+
+#if !MALI_PMM_NO_PMU
+ /* Make sure the core is powered up */
+ err = malipmm_powerup( core );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+ if( _MALI_OSK_ERR_OK == err )
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Assume a registered core is now powered up and idle */
+ pmm->cores_registered |= core;
+ pmm->cores_idle |= core;
+ pmm->cores_powered |= core;
+ pmm_update_system_state( pmm );
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+ else
+ {
+ MALI_PRINT_ERROR( ("PMM: Error(%d) powering up registered core: (0x%x) %s\n",
+ err, core, pmm_trace_get_core_name(core)) );
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+
+ return err;
+}
+
+void malipmm_core_unregister( mali_pmm_core_id core )
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Check if the core is registered in PMM */
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, core );
+
+ MALIPMM_DEBUG_PRINT( ("PMM: core unregistered: (0x%x) %s\n", core, pmm_trace_get_core_name(core)) );
+
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+
+ /* Remove the core from the system */
+ pmm->cores_idle &= (~core);
+ pmm->cores_powered &= (~core);
+ pmm->cores_pend_down &= (~core);
+ pmm->cores_pend_up &= (~core);
+ pmm->cores_ack_down &= (~core);
+ pmm->cores_ack_up &= (~core);
+
+ pmm_update_system_state( pmm );
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+}
+void malipmm_core_power_down_okay( mali_pmm_core_id core )
+{
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK,
+ 0 };
+
+ event.data = core;
+
+ _mali_ukk_pmm_event_message( &event );
+}
+
+void malipmm_set_policy_check()
+{
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ pmm->check_policy = MALI_TRUE;
+
+ /* To check the policy we need to schedule some work */
+ _mali_osk_irq_schedulework( pmm->irq );
+}
+
+_mali_osk_errcode_t malipmm_irq_uhandler(void *data)
+{
+ MALIPMM_DEBUG_PRINT( ("PMM: uhandler - not expected to be used\n") );
+
+ MALI_SUCCESS;
+}
+
+void malipmm_irq_bhandler(void *data)
+{
+ _mali_pmm_internal_state_t *pmm;
+ pmm = (_mali_pmm_internal_state_t *)data;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+#if PMM_OS_TEST
+ if( power_test_check() ) return;
+#endif
+
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* Quick out when we are shutting down */
+ if( pmm->status == MALI_PMM_STATUS_OFF )
+ {
+
+ #if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+ #endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ return;
+ }
+
+ MALIPMM_DEBUG_PRINT( ("PMM: bhandler - Processing event\n") );
+
+ if( pmm->missed > 0 )
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to send %d events", pmm->missed) );
+ pmm_fatal_reset( pmm );
+ }
+
+ if( pmm->check_policy )
+ {
+ pmm->check_policy = MALI_FALSE;
+ pmm_policy_check_policy(pmm);
+ }
+ else
+ {
+ /* Perform event processing */
+ pmm_event_process();
+ if( pmm->fatal_power_err )
+ {
+ /* Try a reset */
+ pmm_fatal_reset( pmm );
+ }
+ }
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+}
+
+static void pmm_event_process( void )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_notification_t *msg = NULL;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+ mali_pmm_message_t *event;
+ u32 process_messages;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+
+ /* Max number of messages to process before exiting - as we shouldn't stay
+ * processing the messages for a long time
+ */
+ process_messages = _mali_osk_atomic_read( &(pmm->messages_queued) );
+
+ while( process_messages > 0 )
+ {
+ /* Check internal message queue first */
+ err = _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg );
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ if( pmm->status == MALI_PMM_STATUS_IDLE || pmm->status == MALI_PMM_STATUS_OS_WAITING || pmm->status == MALI_PMM_STATUS_DVFS_PAUSE)
+ {
+ if( pmm->waiting > 0 ) pmm->waiting--;
+
+ /* We aren't busy changing state, so look at real events */
+ err = _mali_osk_notification_queue_dequeue( pmm->queue, &msg );
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ pmm->no_events++;
+ MALIPMM_DEBUG_PRINT( ("PMM: event_process - No message to process\n") );
+ /* Nothing to do - so return */
+ return;
+ }
+ else
+ {
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->messages_received++;
+ #endif
+ }
+ }
+ else
+ {
+ /* Waiting for an internal message */
+ pmm->waiting++;
+ MALIPMM_DEBUG_PRINT( ("PMM: event_process - Waiting for internal message, messages queued=%d\n", pmm->waiting) );
+ return;
+ }
+ }
+ else
+ {
+ #if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ pmm->imessages_received++;
+ #endif
+ }
+
+ MALI_DEBUG_ASSERT_POINTER( msg );
+ /* Check the message type matches */
+ MALI_DEBUG_ASSERT( msg->notification_type == MALI_PMM_NOTIFICATION_TYPE );
+
+ event = msg->result_buffer;
+
+ _mali_osk_atomic_dec( &(pmm->messages_queued) );
+ process_messages--;
+
+ #if MALI_PMM_TRACE
+ /* Trace before we process the event in case we have an error */
+ _mali_pmm_trace_event_message( event, MALI_TRUE );
+ #endif
+ err = pmm_policy_process( pmm, event );
+
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Error(%d) in policy %d when processing event message with id: %d",
+ err, pmm->policy, event->id) );
+ }
+
+ /* Delete notification */
+ _mali_osk_notification_delete ( msg );
+
+ if( pmm->fatal_power_err )
+ {
+ /* Nothing good has happened - exit */
+ return;
+ }
+
+
+ #if MALI_PMM_TRACE
+ MALI_PRINT( ("PMM Trace: Event processed, msgs (sent/read) = %d/%d, int msgs (sent/read) = %d/%d, no events = %d, waiting = %d\n",
+ pmm->messages_sent, pmm->messages_received, pmm->imessages_sent, pmm->imessages_received, pmm->no_events, pmm->waiting) );
+ #endif
+ }
+
+ if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->waiting > 0 )
+ {
+ /* For events we ignored whilst we were busy, add a new
+ * scheduled time to look at them */
+ _mali_osk_irq_schedulework( pmm->irq );
+ }
+}
+
+#if MALI_STATE_TRACKING
+u32 malipmm_subsystem_dump_state(char *buf, u32 size)
+{
+ int len = 0;
+ _mali_pmm_internal_state_t *pmm = GET_PMM_STATE_PTR;
+
+ if( !pmm )
+ {
+ len += _mali_osk_snprintf(buf + len, size + len, "PMM: Null state\n");
+ }
+ else
+ {
+ len += _mali_osk_snprintf(buf+len, size+len, "Locks:\n PMM lock acquired: %s\n",
+ pmm->mali_pmm_lock_acquired ? "true" : "false");
+ len += _mali_osk_snprintf(buf+len, size+len,
+ "PMM state:\n Previous status: %s\n Status: %s\n Current event: %s\n Policy: %s\n Check policy: %s\n State: %s\n",
+ pmm_trace_status[pmm->mali_last_pmm_status], pmm_trace_status[pmm->status],
+ pmm_trace_events[pmm->mali_new_event_status], pmm_trace_policy[pmm->policy],
+ pmm->check_policy ? "true" : "false", pmm_trace_state[pmm->state]);
+ len += _mali_osk_snprintf(buf+len, size+len,
+ "PMM cores:\n Cores registered: %d\n Cores powered: %d\n Cores idle: %d\n"
+ " Cores pending down: %d\n Cores pending up: %d\n Cores ack down: %d\n Cores ack up: %d\n",
+ pmm->cores_registered, pmm->cores_powered, pmm->cores_idle, pmm->cores_pend_down,
+ pmm->cores_pend_up, pmm->cores_ack_down, pmm->cores_ack_up);
+ len += _mali_osk_snprintf(buf+len, size+len, "PMM misc:\n PMU init: %s\n Messages queued: %d\n"
+ " Waiting: %d\n No events: %d\n Missed events: %d\n Fatal power error: %s\n",
+ pmm->pmu_initialized ? "true" : "false", _mali_osk_atomic_read(&(pmm->messages_queued)),
+ pmm->waiting, pmm->no_events, pmm->missed, pmm->fatal_power_err ? "true" : "false");
+ }
+ return len;
+}
+#endif /* MALI_STATE_TRACKING */
+
+#endif /* USING_MALI_PMM */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm.h b/drivers/media/video/samsung/mali/common/pmm/mali_pmm.h
new file mode 100644
index 0000000..5170650
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm.h
+ * Defines the power management module for the kernel device driver
+ */
+
+#ifndef __MALI_PMM_H__
+#define __MALI_PMM_H__
+
+/* For mali_pmm_message_data and MALI_PMM_EVENT_UK_* defines */
+#include "mali_uk_types.h"
+#include "mali_platform.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @defgroup pmmapi Power Management Module APIs
+ *
+ * @{
+ */
+
+/** OS event tester */
+#define PMM_OS_TEST 0
+
+/** @brief Compile option to turn on/off tracing */
+#define MALI_PMM_TRACE 0
+#define MALI_PMM_TRACE_SENT_EVENTS 0
+
+/** @brief Compile option to switch between always on or job control PMM policy */
+#define MALI_PMM_ALWAYS_ON 0
+
+/** @brief Overrides hardware PMU and uses software simulation instead
+ * @note This even stops intialization of PMU and cores being powered on at start up
+ */
+#define MALI_PMM_NO_PMU 0
+
+/** @brief PMM debug print to control debug message level */
+#define MALIPMM_DEBUG_PRINT(args) \
+ MALI_DEBUG_PRINT(3, args)
+
+
+/** @brief power management event message identifiers.
+ */
+/* These must match up with the pmm_trace_events & pmm_trace_events_internal
+ * arrays
+ */
+typedef enum mali_pmm_event_id
+{
+ MALI_PMM_EVENT_OS_POWER_UP = 0, /**< OS power up event */
+ MALI_PMM_EVENT_OS_POWER_DOWN = 1, /**< OS power down event */
+ MALI_PMM_EVENT_JOB_SCHEDULED = 2, /**< Job scheduled to run event */
+ MALI_PMM_EVENT_JOB_QUEUED = 3, /**< Job queued (but not run) event */
+ MALI_PMM_EVENT_JOB_FINISHED = 4, /**< Job finished event */
+ MALI_PMM_EVENT_TIMEOUT = 5, /**< Time out timer has expired */
+ MALI_PMM_EVENT_DVFS_PAUSE = 6, /**< Mali device pause event */
+ MALI_PMM_EVENT_DVFS_RESUME = 7, /**< Mali device resume event */
+
+ MALI_PMM_EVENT_UKS = 200, /**< Events from the user-side start here */
+ MALI_PMM_EVENT_UK_EXAMPLE = _MALI_PMM_EVENT_UK_EXAMPLE,
+
+ MALI_PMM_EVENT_INTERNALS = 1000,
+ MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK = 1001, /**< Internal power up acknowledgement */
+ MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK = 1002, /**< Internal power down acknowledgment */
+} mali_pmm_event_id;
+
+
+/** @brief Use this when the power up/down callbacks do not need any OS data. */
+#define MALI_PMM_NO_OS_DATA 1
+
+
+/* @brief Geometry and pixel processor identifiers for the PMM
+ *
+ * @note these match the ARM Mali 400 PMU hardware definitions, apart from the "SYSTEM"
+ */
+typedef enum mali_pmm_core_id_tag
+{
+ MALI_PMM_CORE_SYSTEM = 0x00000000, /**< All of the Mali hardware */
+ MALI_PMM_CORE_GP = 0x00000001, /**< Mali GP2 */
+ MALI_PMM_CORE_L2 = 0x00000002, /**< Level 2 cache */
+ MALI_PMM_CORE_PP0 = 0x00000004, /**< Mali 200 pixel processor 0 */
+ MALI_PMM_CORE_PP1 = 0x00000008, /**< Mali 200 pixel processor 1 */
+ MALI_PMM_CORE_PP2 = 0x00000010, /**< Mali 200 pixel processor 2 */
+ MALI_PMM_CORE_PP3 = 0x00000020, /**< Mali 200 pixel processor 3 */
+ MALI_PMM_CORE_PP_ALL = 0x0000003C /**< Mali 200 pixel processors 0-3 */
+} mali_pmm_core_id;
+
+
+/* @brief PMM bitmask of mali_pmm_core_ids
+ */
+typedef u32 mali_pmm_core_mask;
+
+/* @brief PMM event timestamp type
+ */
+typedef u32 mali_pmm_timestamp;
+
+/** @brief power management event message struct
+ */
+typedef struct _mali_pmm_message
+{
+ mali_pmm_event_id id; /**< event id */
+ mali_pmm_message_data data; /**< specific data associated with the event */
+ mali_pmm_timestamp ts; /**< timestamp the event was placed in the event queue */
+} mali_pmm_message_t;
+
+
+
+/** @brief the state of the power management module.
+ */
+/* These must match up with the pmm_trace_state array */
+typedef enum mali_pmm_state_tag
+{
+ MALI_PMM_STATE_UNAVAILABLE = 0, /**< PMM is not available */
+ MALI_PMM_STATE_SYSTEM_ON = 1, /**< All of the Mali hardware is on */
+ MALI_PMM_STATE_SYSTEM_OFF = 2, /**< All of the Mali hardware is off */
+ MALI_PMM_STATE_SYSTEM_TRANSITION = 3 /**< System is changing state */
+} mali_pmm_state;
+
+
+/** @brief a power management policy.
+ */
+/* These must match up with the pmm_trace_policy array */
+typedef enum mali_pmm_policy_tag
+{
+ MALI_PMM_POLICY_NONE = 0, /**< No policy */
+ MALI_PMM_POLICY_ALWAYS_ON = 1, /**< Always on policy */
+ MALI_PMM_POLICY_JOB_CONTROL = 2, /**< Job control policy */
+ MALI_PMM_POLICY_RUNTIME_JOB_CONTROL = 3 /**< Run time power management control policy */
+} mali_pmm_policy;
+
+/** @brief Function to power up MALI
+ *
+ * @param cores core mask to power up the cores
+ *
+ * @return error code if MALI fails to power up
+ */
+_mali_osk_errcode_t malipmm_powerup( u32 cores );
+
+/** @brief Function to power down MALI
+ *
+ * @param cores core mask to power down the cores
+ * @param The power mode to which MALI transitions
+ *
+ * @return error code if MALI fails to power down
+ */
+_mali_osk_errcode_t malipmm_powerdown( u32 cores, mali_power_mode power_mode );
+
+/** @brief Function to report to the OS when the power down has finished
+ *
+ * @param data The event message data that initiated the power down
+ */
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data);
+
+/** @brief Function to report to the OS when the power up has finished
+ *
+ * @param data The event message data that initiated the power up
+ */
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data);
+
+/** @brief Function to report that DVFS operation done
+ *
+ * @param data The event message data
+ */
+void _mali_osk_pmm_dvfs_operation_done(mali_pmm_message_data data);
+
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief Function to notify power management events
+ *
+ * @param data The event message data
+ */
+void _mali_osk_pmm_policy_events_notifications(mali_pmm_event_id event_id);
+
+#endif
+
+/** @brief Function to power up MALI
+ *
+ * @note powers up the MALI during MALI device driver is unloaded
+ */
+void malipmm_force_powerup( void );
+
+/** @brief Function to report the OS that device is idle
+ *
+ * @note inform the OS that device is idle
+ */
+_mali_osk_errcode_t _mali_osk_pmm_dev_idle( void );
+
+/** @brief Function to report the OS to activate device
+ *
+ * @note inform the os that device needs to be activated
+ */
+int _mali_osk_pmm_dev_activate( void );
+
+/** @brief Function to report OS PMM for cleanup
+ *
+ * @note Function to report OS PMM for cleanup
+ */
+void _mali_osk_pmm_ospmm_cleanup( void );
+
+/** @brief Queries the current state of the PMM software
+ *
+ * @note the state of the PMM can change after this call has returned
+ *
+ * @return the current PMM state value
+ */
+mali_pmm_state _mali_pmm_state( void );
+
+/** @brief List of cores that are registered with the PMM
+ *
+ * This will return the cores that have been currently registered with the PMM,
+ * which is a bitwise OR of the mali_pmm_core_id_tags. A value of 0x0 means that
+ * there are no cores registered.
+ *
+ * @note the list of cores can change after this call has returned
+ *
+ * @return a bit mask representing all the cores that have been registered with the PMM
+ */
+mali_pmm_core_mask _mali_pmm_cores_list( void );
+
+/** @brief List of cores that are powered up in the PMM
+ *
+ * This will return the subset of the cores that can be listed using mali_pmm_cores_
+ * list, that have power. It is a bitwise OR of the mali_pmm_core_id_tags. A value of
+ * 0x0 means that none of the cores registered are powered.
+ *
+ * @note the list of cores can change after this call has returned
+ *
+ * @return a bit mask representing all the cores that are powered up
+ */
+mali_pmm_core_mask _mali_pmm_cores_powered( void );
+
+
+/** @brief List of power management policies that are supported by the PMM
+ *
+ * Given an empty array of policies - policy_list - which contains the number
+ * of entries as specified by - policy_list_size, this function will populate
+ * the list with the available policies. If the policy_list is too small for
+ * all the policies then only policy_list_size entries will be returned. If the
+ * policy_list is bigger than the number of available policies then, the extra
+ * entries will be set to MALI_PMM_POLICY_NONE.
+ * The function will also update available_policies with the number of policies
+ * that are available, even if it exceeds the policy_list_size.
+ * The function will succeed if all policies could be returned, else it will
+ * fail if none or only a subset of policies could be returned.
+ * The function will also fail if no policy_list is supplied, though
+ * available_policies is optional.
+ *
+ * @note this is a STUB function and is not yet implemented
+ *
+ * @param policy_list_size is the number of policies that can be returned in
+ * the policy_list argument
+ * @param policy_list is an array of policies that should be populated with
+ * the list of policies that are supported by the PMM
+ * @param policies_available optional argument, if non-NULL will be set to the
+ * number of policies available
+ * @return _MALI_OSK_ERR_OK if the policies could be listed, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_list_policies(
+ u32 policy_list_size,
+ mali_pmm_policy *policy_list,
+ u32 *policies_available );
+
+/** @brief Set the power management policy in the PMM
+ *
+ * Given a valid supported policy, this function will change the PMM to use
+ * this new policy
+ * The function will fail if the policy given is invalid or unsupported.
+ *
+ * @note this is a STUB function and is not yet implemented
+ *
+ * @param policy the new policy to be set
+ * @return _MALI_OSK_ERR_OK if the policy could be set, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_set_policy( mali_pmm_policy policy );
+
+/** @brief Get the current power management policy in the PMM
+ *
+ * Given a pointer to a policy data type, this function will return the current
+ * policy that is in effect for the PMM. This maybe out of date if there is a
+ * pending set policy call that has not been serviced.
+ * The function will fail if the policy given is NULL.
+ *
+ * @note the policy of the PMM can change after this call has returned
+ *
+ * @param policy a pointer to a policy that can be updated to the current
+ * policy
+ * @return _MALI_OSK_ERR_OK if the policy could be returned, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t _mali_pmm_get_policy( mali_pmm_policy *policy );
+
+#if MALI_PMM_TRACE
+
+/** @brief Indicates when a hardware state change occurs in the PMM
+ *
+ * @param old a mask of the cores indicating the previous state of the cores
+ * @param newstate a mask of the cores indicating the new current state of the cores
+ */
+void _mali_pmm_trace_hardware_change( mali_pmm_core_mask old, mali_pmm_core_mask newstate );
+
+/** @brief Indicates when a state change occurs in the PMM
+ *
+ * @param old the previous state for the PMM
+ * @param newstate the new current state of the PMM
+ */
+void _mali_pmm_trace_state_change( mali_pmm_state old, mali_pmm_state newstate );
+
+/** @brief Indicates when a policy change occurs in the PMM
+ *
+ * @param old the previous policy for the PMM
+ * @param newpolicy the new current policy of the PMM
+ */
+void _mali_pmm_trace_policy_change( mali_pmm_policy old, mali_pmm_policy newpolicy );
+
+/** @brief Records when an event message is read by the event system
+ *
+ * @param event the message details
+ * @param received MALI_TRUE when the message is received by the PMM, else it is being sent
+ */
+void _mali_pmm_trace_event_message( mali_pmm_message_t *event, mali_bool received );
+
+#endif /* MALI_PMM_TRACE */
+
+/** @brief Dumps the current state of OS PMM thread
+ */
+#if MALI_STATE_TRACKING
+u32 mali_pmm_dump_os_thread_state( char *buf, u32 size );
+#endif /* MALI_STATE_TRACKING */
+
+/** @} */ /* end group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_H__ */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_pmu.c b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_pmu.c
new file mode 100644
index 0000000..a8160ac
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_pmu.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#if USING_MALI_PMU
+#if USING_MALI_PMM
+
+#include "mali_pmm.h"
+
+/* Internal test on/off */
+#define PMU_TEST 0
+
+#if MALI_POWER_MGMT_TEST_SUITE
+#include "mali_platform_pmu_internal_testing.h"
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/** @brief PMU hardware info
+ */
+typedef struct platform_pmu
+{
+ u32 reg_base_addr; /**< PMU registers base address */
+ u32 reg_size; /**< PMU registers size */
+ const char *name; /**< PMU name */
+ u32 irq_num; /**< PMU irq number */
+
+ mali_io_address reg_mapped; /**< IO-mapped pointer to registers */
+} platform_pmu_t;
+
+static platform_pmu_t *pmu_info = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_STAT = 0x14, /*< Interrupt status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Software delay register */
+ PMU_REG_ADDR_MGMT_MASTER_PWR_UP = 0x24, /*< Master power up register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+/* Internal functions */
+static u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address);
+static void pmu_reg_write(platform_pmu_t *pmu, u32 relative_address, u32 new_val);
+static mali_pmm_core_mask pmu_translate_cores_to_pmu(mali_pmm_core_mask cores);
+#if PMU_TEST
+static void pmm_pmu_dump_regs( platform_pmu_t *pmu );
+static pmm_pmu_test( platform_pmu_t *pmu, u32 cores );
+#endif
+
+_mali_osk_errcode_t mali_pmm_pmu_init(_mali_osk_resource_t *resource)
+{
+
+ if( resource->type == PMU )
+ {
+ if( (resource->base == 0) ||
+ (resource->description == NULL) )
+ {
+ /* NOTE: We currently don't care about any other resource settings */
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Missing PMU set up information\n"));
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+ pmu_info = (platform_pmu_t *)_mali_osk_malloc(sizeof(*pmu_info));
+ MALI_CHECK_NON_NULL( pmu_info, _MALI_OSK_ERR_NOMEM );
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pmu_info, 0, sizeof(*pmu_info));
+
+ pmu_info->reg_base_addr = resource->base;
+ pmu_info->reg_size = (u32)PMU_REGISTER_ADDRESS_SPACE_SIZE;
+ pmu_info->name = resource->description;
+ pmu_info->irq_num = resource->irq;
+
+ if( _MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->name) )
+ {
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Could not request register region (0x%08X - 0x%08X) for %s\n",
+ pmu_info->reg_base_addr, pmu_info->reg_base_addr + pmu_info->reg_size - 1, pmu_info->name));
+ goto cleanup;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: request_mem_region: (0x%08X - 0x%08X) for %s\n",
+ pmu_info->reg_base_addr, pmu_info->reg_base_addr + pmu_info->reg_size - 1, pmu_info->name));
+ }
+
+ pmu_info->reg_mapped = _mali_osk_mem_mapioregion( pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->name );
+
+ if( 0 == pmu_info->reg_mapped )
+ {
+ MALI_PRINT_ERROR(("PLATFORM mali400-pmu: Could not ioremap registers for %s .\n", pmu_info->name));
+ _mali_osk_mem_unreqregion( pmu_info->reg_base_addr, pmu_info->reg_size );
+ goto cleanup;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: ioremap_nocache: Internal ptr: (0x%08X - 0x%08X) for %s\n",
+ (u32) pmu_info->reg_mapped,
+ ((u32)pmu_info->reg_mapped)+ pmu_info->reg_size - 1,
+ pmu_info->name));
+ }
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Success: Mapping registers to %s\n", pmu_info->name));
+
+#if PMU_TEST
+ pmu_test(pmu_info, (MALI_PMM_CORE_GP));
+ pmu_test(pmu_info, (MALI_PMM_CORE_GP|MALI_PMM_CORE_L2|MALI_PMM_CORE_PP0));
+#endif
+
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Initialized - %s\n", pmu_info->name) );
+ }
+ else
+ {
+ /* Didn't expect a different resource */
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_SUCCESS;
+
+cleanup:
+ _mali_osk_free(pmu_info);
+ pmu_info = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+}
+
+_mali_osk_errcode_t mali_pmm_pmu_deinit(_mali_osk_resource_type_t *type)
+{
+ if (*type == PMU)
+ {
+ if( pmu_info )
+ {
+ _mali_osk_mem_unmapioregion(pmu_info->reg_base_addr, pmu_info->reg_size, pmu_info->reg_mapped);
+ _mali_osk_mem_unreqregion(pmu_info->reg_base_addr, pmu_info->reg_size);
+ _mali_osk_free(pmu_info);
+ pmu_info = NULL;
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: Terminated PMU\n") );
+ }
+ }
+ else
+ {
+ /* Didn't expect a different resource */
+ MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+ }
+
+ MALI_SUCCESS;
+
+}
+
+_mali_osk_errcode_t mali_pmm_pmu_powerdown(u32 cores)
+{
+ u32 stat;
+ u32 timeout;
+ u32 cores_pmu;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu_info);
+ MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power down (0x%x)\n", cores) );
+
+ cores_pmu = pmu_translate_cores_to_pmu(cores);
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_DOWN, cores_pmu );
+
+ /* Wait for cores to be powered down */
+ timeout = 10; /* 10ms */
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ stat &= cores_pmu;
+ if( stat == cores_pmu ) break; /* All cores we wanted are now asleep */
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_pmm_pmu_powerup(u32 cores)
+{
+ u32 cores_pmu;
+ u32 stat;
+ u32 timeout;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu_info);
+ MALI_DEBUG_ASSERT( cores != 0 ); /* Shouldn't receive zero from PMM */
+ MALI_DEBUG_PRINT( 4, ("PLATFORM mali400-pmu: power up (0x%x)\n", cores) );
+
+ /* Don't use interrupts - just poll status */
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_INT_MASK, 0 );
+ cores_pmu = pmu_translate_cores_to_pmu(cores);
+ pmu_reg_write( pmu_info, (u32)PMU_REG_ADDR_MGMT_POWER_UP, cores_pmu );
+
+ timeout = 10; /* 10ms */
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = pmu_reg_read( pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ stat &= cores_pmu;
+ if( stat == 0 ) break; /* All cores we wanted are now awake */
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 ) MALI_ERROR(_MALI_OSK_ERR_TIMEOUT);
+
+ MALI_SUCCESS;
+}
+
+
+/***** INTERNAL *****/
+
+/** @brief Internal PMU function to translate the cores bit mask
+ * into something the hardware PMU understands
+ *
+ * @param cores PMM cores bitmask
+ * @return PMU hardware cores bitmask
+ */
+static u32 pmu_translate_cores_to_pmu(mali_pmm_core_mask cores)
+{
+ /* For Mali 400 PMU the cores mask is already the same as what
+ * the hardware PMU expects.
+ * For other hardware, some translation can be done here, by
+ * translating the MALI_PMM_CORE_* bits into specific hardware
+ * bits
+ */
+ return cores;
+}
+
+/** @brief Internal PMU function to read a PMU register
+ *
+ * @param pmu handle that identifies the PMU hardware
+ * @param relative_address relative PMU hardware address to read from
+ * @return 32-bit value that was read from the address
+ */
+static u32 pmu_reg_read(platform_pmu_t *pmu, u32 relative_address)
+{
+ u32 read_val;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+ MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);
+
+ read_val = _mali_osk_mem_ioread32(pmu->reg_mapped, relative_address);
+
+ MALI_DEBUG_PRINT( 5, ("PMU: reg_read: %s Addr:0x%04X Val:0x%08x\n",
+ pmu->name, relative_address, read_val));
+
+ return read_val;
+}
+
+/** @brief Internal PMU function to write to a PMU register
+ *
+ * @param pmu handle that identifies the PMU hardware
+ * @param relative_address relative PMU hardware address to write to
+ * @param new_val new 32-bit value to write into the address
+ */
+static void pmu_reg_write(platform_pmu_t *pmu, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT((relative_address & 0x03) == 0);
+ MALI_DEBUG_ASSERT(relative_address < pmu->reg_size);
+
+ MALI_DEBUG_PRINT( 5, ("PMU: reg_write: %s Addr:0x%04X Val:0x%08x\n",
+ pmu->name, relative_address, new_val));
+
+ _mali_osk_mem_iowrite32(pmu->reg_mapped, relative_address, new_val);
+}
+
+#if PMU_TEST
+
+/***** TEST *****/
+
+static void pmu_dump_regs( platform_pmu_t *pmu )
+{
+ u32 addr;
+ for( addr = 0x0; addr < PMU_REGISTER_ADDRESS_SPACE_SIZE; addr += 0x4 )
+ {
+ MALI_PRINT( ("PMU_REG: 0x%08x: 0x%04x\n", (addr + pmu->reg_base_addr), pmu_reg_read( pmu, addr ) ) );
+ }
+}
+
+/* This function is an internal test for the PMU without any Mali h/w interaction */
+static void pmu_test( platform_pmu_t *pmu, u32 cores )
+{
+ u32 stat;
+ u32 timeout;
+
+ MALI_PRINT( ("PMU_TEST: Start\n") );
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Power down cores: 0x%x\n", cores) );
+ _mali_pmm_pmu_power_down( pmu, cores, MALI_TRUE );
+
+ stat = pmu_reg_read( pmu, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ MALI_PRINT( ("PMU_TEST: %s\n", (stat & cores) == cores ? "SUCCESS" : "FAIL" ) );
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Power up cores: 0x%x\n", cores) );
+ _mali_pmm_pmu_power_up( pmu, cores, MALI_FALSE );
+
+ MALI_PRINT( ("PMU_TEST: Waiting for power up...\n") );
+ timeout = 1000; /* 1 sec */
+ while( !_mali_pmm_pmu_irq_power_up(pmu) && timeout > 0 )
+ {
+ _mali_osk_time_ubusydelay(1000); /* 1ms */
+ timeout--;
+ }
+
+ MALI_PRINT( ("PMU_TEST: Waited %dms for interrupt\n", (1000-timeout)) );
+ stat = pmu_reg_read( pmu, (u32)PMU_REG_ADDR_MGMT_STATUS );
+ MALI_PRINT( ("PMU_TEST: %s\n", (stat & cores) == 0 ? "SUCCESS" : "FAIL" ) );
+
+ _mali_pmm_pmu_irq_power_up_clear(pmu);
+
+ pmu_dump_regs( pmu );
+
+ MALI_PRINT( ("PMU_TEST: Finish\n") );
+}
+#endif /* PMU_TEST */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+u32 pmu_get_power_up_down_info(void)
+{
+ return pmu_reg_read(pmu_info, (u32)PMU_REG_ADDR_MGMT_STATUS);
+}
+
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* USING_MALI_PMM */
+#endif /* USING_MALI_PMU */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_pmu.h b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_pmu.h
new file mode 100644
index 0000000..7525cac
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_pmu.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#include "mali_osk.h"
+
+#if !USING_MALI_PMM
+/* @brief System power up/down cores that can be passed into mali_platform_powerdown/up() */
+#define MALI_PLATFORM_SYSTEM 0
+#endif
+
+#if USING_MALI_PMM
+#if USING_MALI_PMU
+#include "mali_pmm.h"
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ * When using PMM, it is also called from the PMM start up to initialise the
+ * system PMU
+ *
+ * @param resource This is NULL when called on first driver start up, else it will
+ * be a pointer to a PMU resource
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmm_pmu_init(_mali_osk_resource_t *resource);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ * When using PMM, it is also called from the PMM termination code to clean up the
+ * system PMU
+ *
+ * @param type This is NULL when called on driver exit, else it will
+ * be a pointer to a PMU resource type (not the full resource)
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmm_pmu_deinit(_mali_osk_resource_type_t *type);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Called as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ *
+ * @param cores This is MALI_PLATFORM_SYSTEM when called without PMM, else it will
+ * be a mask of cores to power down based on the mali_pmm_core_id enum
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmm_pmu_powerdown(u32 cores);
+
+/** @brief Platform specific powerup sequence of MALI
+ *
+ * Called as part of platform deinit if there is no PMM support, else the
+ * PMM will call it.
+ *
+ * @param cores This is MALI_PLATFORM_SYSTEM when called without PMM, else it will
+ * be a mask of cores to power down based on the mali_pmm_core_id enum
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmm_pmu_powerup(u32 cores);
+
+#if MALI_POWER_MGMT_TEST_SUITE
+#if USING_MALI_PMM
+#if USING_MALI_PMU
+/** @brief function to get status of individual cores
+ *
+ * This function is used by power management test suite to get the status of powered up/down the number
+ * of cores
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+u32 pmu_get_power_up_down_info(void);
+#endif
+#endif
+#endif
+#endif
+#endif
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy.c b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy.c
new file mode 100644
index 0000000..87b6ec2
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.c
+ * Implementation of the common routines for power management module
+ * policies
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+
+#include "mali_pmm_policy_alwayson.h"
+#include "mali_pmm_policy_jobcontrol.h"
+
+/* Call back function for timer expiration */
+static void pmm_policy_timer_callback( void *arg );
+
+_mali_osk_errcode_t pmm_policy_timer_init( _pmm_policy_timer_t *pptimer, u32 timeout, mali_pmm_event_id id )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* All values get 0 as default */
+ _mali_osk_memset(pptimer, 0, sizeof(*pptimer));
+
+ pptimer->timer = _mali_osk_timer_init();
+ if( pptimer->timer )
+ {
+ _mali_osk_timer_setcallback( pptimer->timer, pmm_policy_timer_callback, (void *)pptimer );
+ pptimer->timeout = timeout;
+ pptimer->event_id = id;
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void pmm_policy_timer_callback( void *arg )
+{
+ _pmm_policy_timer_t *pptimer = (_pmm_policy_timer_t *)arg;
+
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT( pptimer->set );
+
+ /* Set timer expired and flag there is a policy to check */
+ pptimer->expired = MALI_TRUE;
+ malipmm_set_policy_check();
+}
+
+
+void pmm_policy_timer_term( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ _mali_osk_timer_del( pptimer->timer );
+ _mali_osk_timer_term( pptimer->timer );
+ pptimer->timer = NULL;
+}
+
+mali_bool pmm_policy_timer_start( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT_POINTER(pptimer->timer);
+
+ if( !(pptimer->set) )
+ {
+ pptimer->set = MALI_TRUE;
+ pptimer->expired = MALI_FALSE;
+ pptimer->start = _mali_osk_time_tickcount();
+ _mali_osk_timer_add( pptimer->timer, pptimer->timeout );
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_stop( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+ MALI_DEBUG_ASSERT_POINTER(pptimer->timer);
+
+ if( pptimer->set )
+ {
+ _mali_osk_timer_del( pptimer->timer );
+ pptimer->set = MALI_FALSE;
+ pptimer->expired = MALI_FALSE;
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_raise_event( _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ if( pptimer->expired )
+ {
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_TIMEOUT, /* Assume timeout id, but set it below */
+ 0 };
+
+ event.id = pptimer->event_id;
+ event.data = (mali_pmm_message_data)pptimer->start;
+
+ /* Don't need to do any other notification with this timer */
+ pptimer->expired = MALI_FALSE;
+ /* Unset timer so it is free to be set again */
+ pptimer->set = MALI_FALSE;
+
+ _mali_ukk_pmm_event_message( &event );
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+mali_bool pmm_policy_timer_valid( u32 timer_start, u32 other_start )
+{
+ return (_mali_osk_time_after( other_start, timer_start ) == 0);
+}
+
+
+_mali_osk_errcode_t pmm_policy_init(_mali_pmm_internal_state_t *pmm)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ err = pmm_policy_init_always_on();
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ err = pmm_policy_init_job_control(pmm);
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ err = _MALI_OSK_ERR_FAULT;
+ }
+
+ return err;
+}
+
+void pmm_policy_term(_mali_pmm_internal_state_t *pmm)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ pmm_policy_term_always_on();
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ pmm_policy_term_job_control();
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ MALI_PRINT_ERROR( ("PMM: Invalid policy terminated %d\n", pmm->policy) );
+ }
+}
+
+
+_mali_osk_errcode_t pmm_policy_process(_mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_ALWAYS_ON:
+ {
+ err = pmm_policy_process_always_on( pmm, event );
+ }
+ break;
+
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ err = pmm_policy_process_job_control( pmm, event );
+ }
+ break;
+
+ case MALI_PMM_POLICY_NONE:
+ default:
+ err = _MALI_OSK_ERR_FAULT;
+ }
+
+ return err;
+}
+
+
+void pmm_policy_check_policy( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ switch( pmm->policy )
+ {
+ case MALI_PMM_POLICY_JOB_CONTROL:
+ {
+ pmm_policy_check_job_control();
+ }
+ break;
+
+ default:
+ /* Nothing needs to be done */
+ break;
+ }
+}
+
+
+#endif /* USING_MALI_PMM */
+
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy.h b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy.h
new file mode 100644
index 0000000..75ac8c8
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.h
+ * Defines the power management module policies
+ */
+
+#ifndef __MALI_PMM_POLICY_H__
+#define __MALI_PMM_POLICY_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief Generic timer for use with policies
+ */
+typedef struct _pmm_policy_timer
+{
+ u32 timeout; /**< Timeout for this timer in ticks */
+ mali_pmm_event_id event_id; /**< Event id that will be raised when timer expires */
+ _mali_osk_timer_t *timer; /**< Timer */
+ mali_bool set; /**< Timer set */
+ mali_bool expired; /**< Timer expired - event needs to be raised */
+ u32 start; /**< Timer start ticks */
+} _pmm_policy_timer_t;
+
+/** @brief Policy timer initialization
+ *
+ * This will create a timer for use in policies, but won't start it
+ *
+ * @param pptimer An empty timer structure to be initialized
+ * @param timeout Timeout in ticks for the timer
+ * @param id Event id that will be raised on timeout
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_timer_init( _pmm_policy_timer_t *pptimer, u32 timeout, mali_pmm_event_id id );
+
+/** @brief Policy timer termination
+ *
+ * This will clean up a timer that was previously used in policies, it
+ * will also stop it if started
+ *
+ * @param pptimer An initialized timer structure to be terminated
+ */
+void pmm_policy_timer_term( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer start
+ *
+ * This will start a previously created timer for use in policies
+ * When the timer expires after the initialized timeout it will raise
+ * a PMM event of the event id given on initialization
+ * As data for the event it will pass the start time of the timer
+ *
+ * @param pptimer A previously initialized policy timer
+ * @return MALI_TRUE if the timer was started, MALI_FALSE if it is already started
+ */
+mali_bool pmm_policy_timer_start( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer stop
+ *
+ * This will stop a previously created timer for use in policies
+ *
+ * @param pptimer A previously started policy timer
+ * @return MALI_TRUE if the timer was stopped, MALI_FALSE if it is already stopped
+ */
+mali_bool pmm_policy_timer_stop( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer stop
+ *
+ * This raise an event for an expired timer
+ *
+ * @param pptimer An expired policy timer
+ * @return MALI_TRUE if an event was raised, else MALI_FALSE
+ */
+mali_bool pmm_policy_timer_raise_event( _pmm_policy_timer_t *pptimer );
+
+/** @brief Policy timer valid checker
+ *
+ * This will check that a timer was started after a given time
+ *
+ * @param timer_start Time the timer was started
+ * @param other_start Time when another event or action occurred
+ * @return MALI_TRUE if the timer was started after the other time, else MALI_FALSE
+ */
+mali_bool pmm_policy_timer_valid( u32 timer_start, u32 other_start );
+
+
+/** @brief Common policy initialization
+ *
+ * This will initialize the current policy
+ *
+ * @note Any previously initialized policy should be terminated first
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Common policy termination
+ *
+ * This will terminate the current policy.
+ * @note This can be called when a policy has not been initialized
+ */
+void pmm_policy_term( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Common policy state changer
+ *
+ * Given the next available event message, this routine passes it to
+ * the current policy for processing
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+
+/** @brief Common policy checker
+ *
+ * If a policy timer fires then this function will be called to
+ * allow the policy to take the correct action
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_policy_check_policy( _mali_pmm_internal_state_t *pmm );
+
+/** @} */ /* End group pmmapi_policy */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_H__ */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_alwayson.c b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_alwayson.c
new file mode 100644
index 0000000..0a6b471
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_alwayson.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_alwayson.c
+ * Implementation of the power management module policy - always on
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy_alwayson.h"
+
+_mali_osk_errcode_t pmm_policy_init_always_on(void)
+{
+ /* Nothing to set up */
+ MALI_SUCCESS;
+}
+
+void pmm_policy_term_always_on(void)
+{
+ /* Nothing to tear down */
+}
+
+_mali_osk_errcode_t pmm_policy_process_always_on( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* We aren't going to do anything, but signal so we don't block the OS
+ * NOTE: This may adversely affect any jobs Mali is currently running
+ */
+ _mali_osk_pmm_power_down_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ /* Not expected in this policy */
+ MALI_DEBUG_ASSERT( MALI_FALSE );
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ /* Nothing to do */
+ _mali_osk_pmm_power_up_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ /* Nothing to do - we are always on */
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ /* Not expected in this policy */
+ MALI_DEBUG_ASSERT( MALI_FALSE );
+ break;
+
+ default:
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+
+ MALI_SUCCESS;
+}
+
+#endif
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_alwayson.h b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_alwayson.h
new file mode 100644
index 0000000..da13224
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_alwayson.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_alwayson.h
+ * Defines the power management module policy for always on
+ */
+
+#ifndef __MALI_PMM_POLICY_ALWAYSON_H__
+#define __MALI_PMM_POLICY_ALWAYSON_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief Always on policy initialization
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init_always_on(void);
+
+/** @brief Always on policy termination
+ */
+void pmm_policy_term_always_on(void);
+
+/** @brief Always on policy state changer
+ *
+ * Given the next available event message, this routine processes it
+ * for the policy and changes state as needed.
+ *
+ * Always on policy will ignore all events and keep the Mali cores on
+ * all the time
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process_always_on( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @} */ /* End group pmmapi_policies */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_ALWAYSON_H__ */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_jobcontrol.c b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_jobcontrol.c
new file mode 100644
index 0000000..237d702
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_jobcontrol.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy_jobcontrol.c
+ * Implementation of the power management module policy - job control
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_system.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_policy.h"
+#include "mali_pmm_policy_jobcontrol.h"
+
+typedef struct _pmm_policy_data_job_control
+{
+ _pmm_policy_timer_t latency; /**< Latency timeout timer for all cores */
+ u32 core_active_start; /**< Last time a core was set to active */
+ u32 timeout; /**< Timeout in ticks for latency timer */
+} _pmm_policy_data_job_control_t;
+
+
+/* @ brief Local data for this policy
+ */
+static _pmm_policy_data_job_control_t *data_job_control = NULL;
+
+/* @brief Set up the timeout if it hasn't already been set and if there are active cores */
+static void job_control_timeout_setup( _mali_pmm_internal_state_t *pmm, _pmm_policy_timer_t *pptimer )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* Do we have an inactivity time out and some powered cores? */
+ if( pptimer->timeout > 0 && pmm->cores_powered != 0 )
+ {
+ /* Is the system idle and all the powered cores are idle? */
+ if( pmm->status == MALI_PMM_STATUS_IDLE && pmm->cores_idle == pmm->cores_powered )
+ {
+ if( pmm_policy_timer_start(pptimer) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM policy - Job control: Setting in-activity latency timer\n") );
+ }
+ }
+ else
+ {
+ /* We are not idle so there is no need for an inactivity timer
+ */
+ if( pmm_policy_timer_stop(pptimer) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM policy - Job control: Removing in-activity latency timer\n") );
+ }
+ }
+ }
+}
+
+/* @brief Check the validity of the timeout - and if there is one set */
+static mali_bool job_control_timeout_valid( _mali_pmm_internal_state_t *pmm, _pmm_policy_timer_t *pptimer, u32 timer_start )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(pptimer);
+
+ /* Not a valid timer! */
+ if( pptimer->timeout == 0 ) return MALI_FALSE;
+
+ /* Are some cores powered and are they all idle? */
+ if( (pmm->cores_powered != 0) && (pmm->cores_idle == pmm->cores_powered) )
+ {
+ /* Has latency timeout started after the last core was active? */
+ if( pmm_policy_timer_valid( timer_start, data_job_control->core_active_start ) )
+ {
+ return MALI_TRUE;
+ }
+ else
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - out of date\n") );
+ }
+ }
+ else
+ {
+ if( pmm->cores_powered == 0 )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - cores already off\n") );
+ }
+ else
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: In-activity latency time out ignored - cores active\n") );
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+_mali_osk_errcode_t pmm_policy_init_job_control( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER( pmm );
+ MALI_DEBUG_ASSERT( data_job_control == NULL );
+
+ data_job_control = (_pmm_policy_data_job_control_t *) _mali_osk_malloc(sizeof(*data_job_control));
+ MALI_CHECK_NON_NULL( data_job_control, _MALI_OSK_ERR_NOMEM );
+
+ data_job_control->core_active_start = _mali_osk_time_tickcount();
+ data_job_control->timeout = MALI_PMM_POLICY_JOBCONTROL_INACTIVITY_TIMEOUT;
+
+ err = pmm_policy_timer_init( &data_job_control->latency, data_job_control->timeout, MALI_PMM_EVENT_TIMEOUT );
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_free( data_job_control );
+ data_job_control = NULL;
+ return err;
+ }
+
+ /* Start the latency timeout */
+ job_control_timeout_setup( pmm, &data_job_control->latency );
+
+ MALI_SUCCESS;
+}
+
+void pmm_policy_term_job_control(void)
+{
+ if( data_job_control != NULL )
+ {
+ pmm_policy_timer_term( &data_job_control->latency );
+ _mali_osk_free( data_job_control );
+ data_job_control = NULL;
+ }
+}
+
+static void pmm_policy_job_control_job_queued( _mali_pmm_internal_state_t *pmm )
+{
+ mali_pmm_core_mask cores;
+ mali_pmm_core_mask cores_subset;
+
+ /* Make sure that all cores are powered in this
+ * simple policy
+ */
+ cores = pmm->cores_registered;
+ cores_subset = pmm_cores_to_power_up( pmm, cores );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ if( !pmm_invoke_power_up( pmm ) )
+ {
+ /* Need to wait until finished */
+ pmm->status = MALI_PMM_STATUS_POLICY_POWER_UP;
+ }
+ }
+}
+
+_mali_osk_errcode_t pmm_policy_process_job_control( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ mali_pmm_core_mask cores;
+ mali_pmm_core_mask cores_subset;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+ MALI_DEBUG_ASSERT_POINTER(data_job_control);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Job control policy process start - status=%d\n", pmm->status) );
+
+ /* Mainly the data is the cores */
+ cores = pmm_cores_from_event_data( pmm, event );
+
+#if MALI_STATE_TRACKING
+ pmm->mali_last_pmm_status = pmm->status;
+#endif /* MALI_STATE_TRACKING */
+
+ switch( pmm->status )
+ {
+ /**************** IDLE ****************/
+ case MALI_PMM_STATUS_IDLE:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ /* Not expected in this state */
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+
+ /* Update idle cores to indicate active - remove these! */
+ pmm_cores_set_active( pmm, cores );
+ /* Remember when this happened */
+ data_job_control->core_active_start = event->ts;
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_SCHEDULED);
+#endif
+
+ /*** FALL THROUGH to QUEUED to check POWER UP ***/
+
+ case MALI_PMM_EVENT_JOB_QUEUED:
+
+ pmm_policy_job_control_job_queued( pmm );
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_QUEUED);
+#endif
+ break;
+
+ case MALI_PMM_EVENT_DVFS_PAUSE:
+
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_FALSE );
+ if ( cores_subset != 0 )
+ {
+ if ( !pmm_power_down_okay( pmm ) )
+ {
+ pmm->is_dvfs_active = 1;
+ pmm->status = MALI_PMM_STATUS_OS_POWER_DOWN;
+ pmm_save_os_event_data( pmm, event->data );
+ break;
+ }
+ }
+ pmm->status = MALI_PMM_STATUS_DVFS_PAUSE;
+ _mali_osk_pmm_dvfs_operation_done(0);
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+
+ /* Need to power down all cores even if we need to wait for them */
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_FALSE );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering down */
+ if( !pmm_invoke_power_down( pmm, MALI_POWER_MODE_DEEP_SLEEP ) )
+ {
+ /* We need to wait until they are idle */
+
+ pmm->status = MALI_PMM_STATUS_OS_POWER_DOWN;
+ /* Save the OS data to respond later */
+ pmm_save_os_event_data( pmm, event->data );
+ /* Exit this case - as we have to wait */
+ break;
+ }
+ }
+ else
+ {
+ mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
+
+ }
+ /* Set waiting status */
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ /* All cores now down - respond to OS power event */
+ _mali_osk_pmm_power_down_done( event->data );
+ break;
+
+ case MALI_PMM_EVENT_JOB_FINISHED:
+
+ /* Update idle cores - add these! */
+ pmm_cores_set_idle( pmm, cores );
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_JOB_FINISHED);
+#endif
+ if( data_job_control->timeout > 0 )
+ {
+ /* Wait for time out to fire */
+ break;
+ }
+ /* For job control policy - turn off all cores */
+ cores = pmm->cores_powered;
+
+ /*** FALL THROUGH to TIMEOUT TEST as NO TIMEOUT ***/
+
+ case MALI_PMM_EVENT_TIMEOUT:
+
+ /* Main job control policy - turn off cores after inactivity */
+ if( job_control_timeout_valid( pmm, &data_job_control->latency, (u32)event->data ) )
+ {
+ /* Valid timeout of inactivity - so find out if we can power down
+ * immedately - if we can't then this means the cores are still in fact
+ * active
+ */
+ cores_subset = pmm_cores_to_power_down( pmm, cores, MALI_TRUE );
+ if( cores_subset != 0 )
+ {
+ /* Check if we can really power down, if not then we are not
+ * really in-active
+ */
+ if( !pmm_invoke_power_down( pmm, MALI_POWER_MODE_LIGHT_SLEEP ) )
+ {
+ pmm_power_down_cancel( pmm );
+ }
+ }
+ /* else there are no cores powered up! */
+ }
+#if MALI_POWER_MGMT_TEST_SUITE
+ _mali_osk_pmm_policy_events_notifications(MALI_PMM_EVENT_TIMEOUT);
+#endif
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ /******************DVFS PAUSE**************/
+ case MALI_PMM_STATUS_DVFS_PAUSE:
+ switch ( event->id )
+ {
+ case MALI_PMM_EVENT_DVFS_RESUME:
+
+ if ( pmm->cores_powered != 0 )
+ {
+ pmm->cores_ack_down =0;
+ pmm_power_down_cancel( pmm );
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ }
+ else
+ {
+ pmm_policy_job_control_job_queued( pmm );
+ }
+ _mali_osk_pmm_dvfs_operation_done( 0 );
+ break;
+
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* Set waiting status */
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ if ( pmm->cores_powered != 0 )
+ {
+ if ( pmm_invoke_power_down( pmm, MALI_POWER_MODE_DEEP_SLEEP ) )
+ {
+ _mali_osk_pmm_power_down_done( 0 );
+ break;
+ }
+ }
+ else
+ {
+ mali_platform_power_mode_change(MALI_POWER_MODE_DEEP_SLEEP);
+ }
+ _mali_osk_pmm_power_down_done( 0 );
+ break;
+ default:
+ break;
+ }
+ break;
+
+ /**************** POWER UP ****************/
+ case MALI_PMM_STATUS_OS_POWER_UP:
+ case MALI_PMM_STATUS_POLICY_POWER_UP:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ /* Make sure cores powered off equal what we expect */
+ MALI_DEBUG_ASSERT( cores == pmm->cores_pend_up );
+ pmm_cores_set_up_ack( pmm, cores );
+
+ if( pmm_invoke_power_up( pmm ) )
+ {
+ if( pmm->status == MALI_PMM_STATUS_OS_POWER_UP )
+ {
+ /* Get the OS data and respond to the power up */
+ _mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ }
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ /**************** POWER DOWN ****************/
+ case MALI_PMM_STATUS_OS_POWER_DOWN:
+ case MALI_PMM_STATUS_POLICY_POWER_DOWN:
+ switch( event->id )
+ {
+
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+
+ pmm_cores_set_down_ack( pmm, cores );
+
+ if ( pmm->is_dvfs_active == 1 )
+ {
+ if( pmm_power_down_okay( pmm ) )
+ {
+ pmm->is_dvfs_active = 0;
+ pmm->status = MALI_PMM_STATUS_DVFS_PAUSE;
+ _mali_osk_pmm_dvfs_operation_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ break;
+ }
+
+ /* Now check if we can power down */
+ if( pmm_invoke_power_down( pmm, MALI_POWER_MODE_DEEP_SLEEP ) )
+ {
+ if( pmm->status == MALI_PMM_STATUS_OS_POWER_DOWN )
+ {
+ /* Get the OS data and respond to the power down */
+ _mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ pmm->status = MALI_PMM_STATUS_OS_WAITING;
+ }
+ break;
+
+ default:
+ /* Unexpected event */
+ MALI_ERROR(_MALI_OSK_ERR_ITEM_NOT_FOUND);
+ }
+ break;
+
+ case MALI_PMM_STATUS_OS_WAITING:
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ cores_subset = pmm_cores_to_power_up( pmm, cores );
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ if( !pmm_invoke_power_up( pmm ) )
+ {
+ /* Need to wait until power up complete */
+ pmm->status = MALI_PMM_STATUS_OS_POWER_UP;
+ /* Save the OS data to respond later */
+ pmm_save_os_event_data( pmm, event->data );
+ /* Exit this case - as we have to wait */
+ break;
+ }
+ }
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ /* All cores now up - respond to OS power up event */
+ _mali_osk_pmm_power_up_done( event->data );
+ break;
+
+ default:
+ /* All other messages are ignored in this state */
+ break;
+ }
+ break;
+
+ default:
+ /* Unexpected state */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Set in-activity latency timer - if required */
+ job_control_timeout_setup( pmm, &data_job_control->latency );
+
+ /* Update the PMM state */
+ pmm_update_system_state( pmm );
+#if MALI_STATE_TRACKING
+ pmm->mali_new_event_status = event->id;
+#endif /* MALI_STATE_TRACKING */
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Job control policy process end - status=%d and event=%d\n", pmm->status,event->id) );
+
+ MALI_SUCCESS;
+}
+
+void pmm_policy_check_job_control()
+{
+ MALI_DEBUG_ASSERT_POINTER(data_job_control);
+
+ /* Latency timer must have expired raise the event */
+ pmm_policy_timer_raise_event(&data_job_control->latency);
+}
+
+
+#endif /* USING_MALI_PMM */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_jobcontrol.h b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_jobcontrol.h
new file mode 100644
index 0000000..dcfa438
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_policy_jobcontrol.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmm_policy.h
+ * Defines the power management module policies
+ */
+
+#ifndef __MALI_PMM_POLICY_JOBCONTROL_H__
+#define __MALI_PMM_POLICY_JOBCONTROL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi_policy Power Management Module Policies
+ *
+ * @{
+ */
+
+/** @brief The jobcontrol policy inactivity latency timeout (in ticks)
+ * before the hardware is switched off
+ *
+ * @note Setting this low whilst tracing or producing debug output can
+ * cause alot of timeouts to fire which can affect the PMM behaviour
+ */
+#define MALI_PMM_POLICY_JOBCONTROL_INACTIVITY_TIMEOUT 50
+
+/** @brief Job control policy initialization
+ *
+ * @return _MALI_OSK_ERR_OK if the policy could be initialized, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_init_job_control(_mali_pmm_internal_state_t *pmm);
+
+/** @brief Job control policy termination
+ */
+void pmm_policy_term_job_control(void);
+
+/** @brief Job control policy state changer
+ *
+ * Given the next available event message, this routine processes it
+ * for the policy and changes state as needed.
+ *
+ * Job control policy depends on events from the Mali cores, and will
+ * power down all cores after an inactivity latency timeout. It will
+ * power the cores back on again when a job is scheduled to run.
+ *
+ * @param pmm internal PMM state
+ * @param event PMM event to process
+ * @return _MALI_OSK_ERR_OK if the policy state completed okay, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+_mali_osk_errcode_t pmm_policy_process_job_control( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @brief Job control policy checker
+ *
+ * The latency timer has fired and we need to raise the correct event to
+ * handle it
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_policy_check_job_control(void);
+
+/** @} */ /* End group pmmapi_policy */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_POLICY_JOBCONTROL_H__ */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_state.c b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_state.c
new file mode 100644
index 0000000..d529b9a
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_state.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#if USING_MALI_PMM
+
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_subsystem.h"
+
+#include "mali_pmm.h"
+#include "mali_pmm_state.h"
+#include "mali_pmm_system.h"
+
+#include "mali_kernel_core.h"
+#include "mali_platform.h"
+
+#define SIZEOF_CORES_LIST 6
+
+/* NOTE: L2 *MUST* be first on the list so that it
+ * is correctly powered on first and powered off last
+ */
+static mali_pmm_core_id cores_list[] = { MALI_PMM_CORE_L2,
+ MALI_PMM_CORE_GP,
+ MALI_PMM_CORE_PP0,
+ MALI_PMM_CORE_PP1,
+ MALI_PMM_CORE_PP2,
+ MALI_PMM_CORE_PP3 };
+
+
+
+void pmm_update_system_state( _mali_pmm_internal_state_t *pmm )
+{
+ mali_pmm_state state;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ if( pmm->cores_registered == 0 )
+ {
+ state = MALI_PMM_STATE_UNAVAILABLE;
+ }
+ else if( pmm->cores_powered == 0 )
+ {
+ state = MALI_PMM_STATE_SYSTEM_OFF;
+ }
+ else if( pmm->cores_powered == pmm->cores_registered )
+ {
+ state = MALI_PMM_STATE_SYSTEM_ON;
+ }
+ else
+ {
+ /* Some other state where not everything is on or off */
+ state = MALI_PMM_STATE_SYSTEM_TRANSITION;
+ }
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_state_change( pmm->state, state );
+#endif
+ pmm->state = state;
+}
+
+mali_pmm_core_mask pmm_cores_from_event_data( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event )
+{
+ mali_pmm_core_mask cores;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_DEBUG_ASSERT_POINTER(event);
+
+ switch( event->id )
+ {
+ case MALI_PMM_EVENT_OS_POWER_UP:
+ case MALI_PMM_EVENT_OS_POWER_DOWN:
+ /* All cores - the system */
+ cores = pmm->cores_registered;
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ case MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK:
+ case MALI_PMM_EVENT_INTERNAL_POWER_DOWN_ACK:
+ /* Currently the main event data is only the cores
+ * for these messages
+ */
+ cores = (mali_pmm_core_mask)event->data;
+ if( cores == MALI_PMM_CORE_SYSTEM )
+ {
+ cores = pmm->cores_registered;
+ }
+ else if( cores == MALI_PMM_CORE_PP_ALL )
+ {
+ /* Get the subset of registered PP cores */
+ cores = (pmm->cores_registered & MALI_PMM_CORE_PP_ALL);
+ }
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+ break;
+
+ default:
+ /* Assume timeout messages - report cores still powered */
+ cores = pmm->cores_powered;
+ break;
+ }
+
+ return cores;
+}
+
+mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ mali_pmm_core_mask cores_subset;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check that cores aren't pending power down when asked for power up */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );
+
+ cores_subset = (~(pmm->cores_powered) & cores);
+ if( cores_subset != 0 )
+ {
+ /* There are some cores that need powering up */
+ pmm->cores_pend_up = cores_subset;
+ }
+
+ return cores_subset;
+}
+
+mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only )
+{
+ mali_pmm_core_mask cores_subset;
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check that cores aren't pending power up when asked for power down */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );
+
+ cores_subset = (pmm->cores_powered & cores);
+ if( cores_subset != 0 )
+ {
+ int n;
+ volatile mali_pmm_core_mask *ppowered = &(pmm->cores_powered);
+
+ /* There are some cores that need powering up, but we may
+ * need to wait until they are idle
+ */
+ for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
+ {
+ if( (cores_list[n] & cores_subset) != 0 )
+ {
+ /* Core is to be powered down */
+ pmm->cores_pend_down |= cores_list[n];
+
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following function could fail
+ * as the core is unregistered before we tell it to power
+ * down, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* Signal the core to power down
+ * If it is busy (not idle) it will set a pending power down flag
+ * (as long as we don't want to only immediately power down).
+ * If it isn't busy it will move out of the idle queue right
+ * away
+ */
+ err = mali_core_signal_power_down( cores_list[n], immediate_only );
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ /* Re-read cores_subset in case it has changed */
+ cores_subset = (*ppowered & cores);
+
+ if( err == _MALI_OSK_ERR_OK )
+ {
+ /* We moved an idle core to the power down queue
+ * which means it is now acknowledged (if it is still
+ * registered)
+ */
+ pmm->cores_ack_down |= (cores_list[n] & cores_subset);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1,("PMM: In pmm_cores_to_power_down, the error and cores powered are..%x....%x",err,*ppowered));
+ MALI_DEBUG_ASSERT( err == _MALI_OSK_ERR_BUSY ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*ppowered & cores_list[n]) == 0) );
+ /* If we didn't move a core - it must be active, so
+ * leave it pending, so we get an acknowledgement (when
+ * not in immediate only mode)
+ * Alternatively we are shutting down and the core has
+ * been unregistered
+ */
+ }
+ }
+ }
+ }
+
+ return cores_subset;
+}
+
+void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm )
+{
+ int n;
+ mali_pmm_core_mask pd, ad;
+ _mali_osk_errcode_t err;
+ volatile mali_pmm_core_mask *pregistered;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ MALIPMM_DEBUG_PRINT( ("PMM: Cancelling power down\n") );
+
+ pd = pmm->cores_pend_down;
+ ad = pmm->cores_ack_down;
+ /* Clear the pending cores so that they don't move to the off
+ * queue if they haven't already
+ */
+ pmm->cores_pend_down = 0;
+ pmm->cores_ack_down = 0;
+ pregistered = &(pmm->cores_registered);
+
+ /* Power up all the pending power down cores - just so
+ * we make sure the system is in a known state, as a
+ * pending core might have sent an acknowledged message
+ * which hasn't been read yet.
+ */
+ for( n = 0; n < SIZEOF_CORES_LIST; n++ )
+ {
+ if( (cores_list[n] & pd) != 0 )
+ {
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following power up function could fail
+ * as the core is unregistered before we tell it to power
+ * up, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* As we are cancelling - only move the cores back to the queue -
+ * no reset needed
+ */
+ err = mali_core_signal_power_up( cores_list[n], MALI_TRUE );
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* Update pending list with the current registered cores */
+ pd &= (*pregistered);
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_BUSY &&
+ ((cores_list[n] & ad) == 0)) ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*pregistered & cores_list[n]) == 0) );
+ /* If we didn't power up a core - it must be active and
+ * hasn't actually tried to power down - this is expected
+ * for cores that haven't acknowledged
+ * Alternatively we are shutting down and the core has
+ * been unregistered
+ */
+ }
+ }
+ }
+ /* Only used in debug builds */
+ MALI_IGNORE(ad);
+}
+
+
+mali_bool pmm_power_down_okay( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return ( pmm->cores_pend_down == pmm->cores_ack_down ? MALI_TRUE : MALI_FALSE );
+}
+
+mali_bool pmm_invoke_power_down( _mali_pmm_internal_state_t *pmm, mali_power_mode power_mode )
+{
+ _mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ /* Check that cores are pending power down during power down invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down != 0 );
+ /* Check that cores are not pending power up during power down invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up == 0 );
+
+ if( !pmm_power_down_okay( pmm ) )
+ {
+ MALIPMM_DEBUG_PRINT( ("PMM: Waiting for cores to go idle for power off - 0x%08x / 0x%08x\n",
+ pmm->cores_pend_down, pmm->cores_ack_down) );
+ return MALI_FALSE;
+ }
+ else
+ {
+ pmm->cores_powered &= ~(pmm->cores_pend_down);
+#if !MALI_PMM_NO_PMU
+ err = malipmm_powerdown( pmm->cores_pend_down, power_mode);
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+
+ if( err == _MALI_OSK_ERR_OK )
+ {
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Remove powered down cores from idle and powered list */
+ pmm->cores_idle &= ~(pmm->cores_pend_down);
+ /* Reset pending/acknowledged status */
+ pmm->cores_pend_down = 0;
+ pmm->cores_ack_down = 0;
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ }
+ else
+ {
+ pmm->cores_powered |= pmm->cores_pend_down;
+ MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power down cores - (0x%x) %s",
+ pmm->cores_pend_down, pmm_trace_get_core_name(pmm->cores_pend_down)) );
+ pmm->fatal_power_err = MALI_TRUE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+
+
+mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ return ( pmm->cores_pend_up == pmm->cores_ack_up ? MALI_TRUE : MALI_FALSE );
+}
+
+
+mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+
+ /* Check that cores are pending power up during power up invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_up != 0 );
+ /* Check that cores are not pending power down during power up invoke */
+ MALI_DEBUG_ASSERT( pmm->cores_pend_down == 0 );
+
+ if( pmm_power_up_okay( pmm ) )
+ {
+ /* Power up has completed - sort out subsystem core status */
+
+ int n;
+ /* Use volatile to access, so that it is updated if any cores are unregistered */
+ volatile mali_pmm_core_mask *ppendup = &(pmm->cores_pend_up);
+#if MALI_PMM_TRACE
+ mali_pmm_core_mask old_power = pmm->cores_powered;
+#endif
+ /* Move cores into idle queues */
+ for( n = 0; n < SIZEOF_CORES_LIST; n++ )
+ {
+ if( (cores_list[n] & (*ppendup)) != 0 )
+ {
+ /* Can't hold the power lock, when acessing subsystem mutex via
+ * the core power call.
+ * Due to terminatation of driver requiring a subsystem mutex
+ * and then power lock held to unregister a core.
+ * This does mean that the following function could fail
+ * as the core is unregistered before we tell it to power
+ * up, but it does not matter as we are terminating
+ */
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
+ MALI_PMM_LOCK(pmm);
+
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_DEBUG_PRINT(1,("In pmm_invoke_power_up:: The error and pending cores to be powered up are...%x...%x",err,*ppendup));
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_FAULT &&
+ (*ppendup & cores_list[n]) == 0) );
+ /* We only expect this to fail when we are shutting down
+ * and the core has been unregistered
+ */
+ }
+ }
+ }
+ /* Finished power up - add cores to idle and powered list */
+ pmm->cores_powered |= (*ppendup);
+ pmm->cores_idle |= (*ppendup);
+ /* Reset pending/acknowledge status */
+ pmm->cores_pend_up = 0;
+ pmm->cores_ack_up = 0;
+
+#if MALI_PMM_TRACE
+ _mali_pmm_trace_hardware_change( old_power, pmm->cores_powered );
+#endif
+ return MALI_TRUE;
+ }
+ else
+ {
+#if !MALI_PMM_NO_PMU
+ /* Power up must now be done */
+ err = malipmm_powerup( pmm->cores_pend_up );
+#else
+ err = _MALI_OSK_ERR_OK;
+#endif
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ MALI_PRINT_ERROR( ("PMM: Failed to get PMU to power up cores - (0x%x) %s",
+ pmm->cores_pend_up, pmm_trace_get_core_name(pmm->cores_pend_up)) );
+ pmm->fatal_power_err = MALI_TRUE;
+ }
+ else
+ {
+ /* TBD - Update core status immediately rather than use event message */
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ MALI_PMM_EVENT_INTERNAL_POWER_UP_ACK,
+ 0 };
+ /* All the cores that were pending power up, have now completed power up */
+ event.data = pmm->cores_pend_up;
+ _mali_ukk_pmm_event_message( &event );
+ MALIPMM_DEBUG_PRINT( ("PMM: Sending ACK to power up") );
+ }
+ }
+
+ /* Always return false, as we need an interrupt to acknowledge
+ * when power up is complete
+ */
+ return MALI_FALSE;
+}
+
+mali_pmm_core_mask pmm_cores_set_active( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ pmm->cores_idle &= (~cores);
+ return pmm->cores_idle;
+}
+
+mali_pmm_core_mask pmm_cores_set_idle( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ pmm->cores_idle |= (cores);
+ return pmm->cores_idle;
+}
+
+mali_pmm_core_mask pmm_cores_set_down_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check core is not pending a power down */
+ MALI_DEBUG_ASSERT( (pmm->cores_pend_down & cores) != 0 );
+ /* Check core has not acknowledged power down more than once */
+ MALI_DEBUG_ASSERT( (pmm->cores_ack_down & cores) == 0 );
+
+ pmm->cores_ack_down |= (cores);
+
+ return pmm->cores_ack_down;
+}
+
+void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm )
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_notification_t *msg = NULL;
+ mali_pmm_status status;
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALIPMM_DEBUG_PRINT( ("PMM: Fatal Reset called") );
+
+ MALI_DEBUG_ASSERT( pmm->status != MALI_PMM_STATUS_OFF );
+
+ /* Reset the common status */
+ pmm->waiting = 0;
+ pmm->missed = 0;
+ pmm->fatal_power_err = MALI_FALSE;
+ pmm->no_events = 0;
+ pmm->check_policy = MALI_FALSE;
+ pmm->cores_pend_down = 0;
+ pmm->cores_pend_up = 0;
+ pmm->cores_ack_down = 0;
+ pmm->cores_ack_up = 0;
+ pmm->is_dvfs_active = 0;
+#if MALI_PMM_TRACE
+ pmm->messages_sent = 0;
+ pmm->messages_received = 0;
+ pmm->imessages_sent = 0;
+ pmm->imessages_received = 0;
+ MALI_PRINT( ("PMM Trace: *** Fatal reset occurred ***") );
+#endif
+
+ /* Set that we are unavailable whilst resetting */
+ pmm->state = MALI_PMM_STATE_UNAVAILABLE;
+ status = pmm->status;
+ pmm->status = MALI_PMM_STATUS_OFF;
+
+ /* We want all cores powered */
+ pmm->cores_powered = pmm->cores_registered;
+ /* The cores may not be idle, but this state will be rectified later */
+ pmm->cores_idle = pmm->cores_registered;
+
+ /* So power on any cores that are registered */
+ if( pmm->cores_registered != 0 )
+ {
+ int n;
+ volatile mali_pmm_core_mask *pregistered = &(pmm->cores_registered);
+#if !MALI_PMM_NO_PMU
+ err = malipmm_powerup( pmm->cores_registered );
+#endif
+ if( err != _MALI_OSK_ERR_OK )
+ {
+ /* This is very bad as we can't even be certain the cores are now
+ * powered up
+ */
+ MALI_PRINT_ERROR( ("PMM: Failed to perform PMM reset!\n") );
+ /* TBD driver exit? */
+ }
+
+ for( n = SIZEOF_CORES_LIST-1; n >= 0; n-- )
+ {
+ if( (cores_list[n] & (*pregistered)) != 0 )
+ {
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 0;
+#endif /* MALI_STATE_TRACKING */
+
+ MALI_PMM_UNLOCK(pmm);
+ /* Core is now active - so try putting it in the idle queue */
+ err = mali_core_signal_power_up( cores_list[n], MALI_FALSE );
+ MALI_PMM_LOCK(pmm);
+#if MALI_STATE_TRACKING
+ pmm->mali_pmm_lock_acquired = 1;
+#endif /* MALI_STATE_TRACKING */
+
+ /* We either succeeded, or we were not off anyway, or we have
+ * just be deregistered
+ */
+ MALI_DEBUG_ASSERT( (err == _MALI_OSK_ERR_OK) ||
+ (err == _MALI_OSK_ERR_BUSY) ||
+ (err == _MALI_OSK_ERR_FAULT &&
+ (*pregistered & cores_list[n]) == 0) );
+ }
+ }
+ }
+
+ /* Unblock any pending OS event */
+ if( status == MALI_PMM_STATUS_OS_POWER_UP )
+ {
+ /* Get the OS data and respond to the power up */
+ _mali_osk_pmm_power_up_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+ if( status == MALI_PMM_STATUS_OS_POWER_DOWN )
+ {
+ /* Get the OS data and respond to the power down
+ * NOTE: We are not powered down at this point due to power problems,
+ * so we are lying to the system, but something bad has already
+ * happened and we are trying unstick things
+ * TBD - Add busy loop to power down cores?
+ */
+ _mali_osk_pmm_power_down_done( pmm_retrieve_os_event_data( pmm ) );
+ }
+
+ /* Purge the event queues */
+ do
+ {
+ if( _mali_osk_notification_queue_dequeue( pmm->iqueue, &msg ) == _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_notification_delete ( msg );
+ break;
+ }
+ } while (MALI_TRUE);
+
+ do
+ {
+ if( _mali_osk_notification_queue_dequeue( pmm->queue, &msg ) == _MALI_OSK_ERR_OK )
+ {
+ _mali_osk_notification_delete ( msg );
+ break;
+ }
+ } while (MALI_TRUE);
+
+ /* Return status/state to normal */
+ pmm->status = MALI_PMM_STATUS_IDLE;
+ pmm_update_system_state(pmm);
+}
+
+mali_pmm_core_mask pmm_cores_set_up_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores )
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( pmm->cores_registered, cores );
+
+ /* Check core is not pending a power up */
+ MALI_DEBUG_ASSERT( (pmm->cores_pend_up & cores) != 0 );
+ /* Check core has not acknowledged power up more than once */
+ MALI_DEBUG_ASSERT( (pmm->cores_ack_up & cores) == 0 );
+
+ pmm->cores_ack_up |= (cores);
+
+ return pmm->cores_ack_up;
+}
+
+void pmm_save_os_event_data(_mali_pmm_internal_state_t *pmm, mali_pmm_message_data data)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ /* Check that there is no saved data */
+ MALI_DEBUG_ASSERT( pmm->os_data == 0 );
+ /* Can't store zero data - as retrieve check will fail */
+ MALI_DEBUG_ASSERT( data != 0 );
+
+ pmm->os_data = data;
+}
+
+mali_pmm_message_data pmm_retrieve_os_event_data(_mali_pmm_internal_state_t *pmm)
+{
+ mali_pmm_message_data data;
+
+ MALI_DEBUG_ASSERT_POINTER(pmm);
+ /* Check that there is saved data */
+ MALI_DEBUG_ASSERT( pmm->os_data != 0 );
+
+ /* Get data, and clear the saved version */
+ data = pmm->os_data;
+ pmm->os_data = 0;
+
+ return data;
+}
+
+/* Create list of core names to look up
+ * We are doing it this way to overcome the need for
+ * either string allocation, or stack space, so we
+ * use constant strings instead
+ */
+typedef struct pmm_trace_corelist
+{
+ mali_pmm_core_mask id;
+ const char *name;
+} pmm_trace_corelist_t;
+
+static pmm_trace_corelist_t pmm_trace_cores[] = {
+ { MALI_PMM_CORE_SYSTEM, "SYSTEM" },
+ { MALI_PMM_CORE_GP, "GP" },
+ { MALI_PMM_CORE_L2, "L2" },
+ { MALI_PMM_CORE_PP0, "PP0" },
+ { MALI_PMM_CORE_PP1, "PP1" },
+ { MALI_PMM_CORE_PP2, "PP2" },
+ { MALI_PMM_CORE_PP3, "PP3" },
+ { MALI_PMM_CORE_PP_ALL, "PP (all)" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_L2 | MALI_PMM_CORE_PP0),
+ "GP+L2+PP0" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_PP0),
+ "GP+PP0" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_L2 | MALI_PMM_CORE_PP0 | MALI_PMM_CORE_PP1),
+ "GP+L2+PP0+PP1" },
+ { (MALI_PMM_CORE_GP | MALI_PMM_CORE_PP0 | MALI_PMM_CORE_PP1),
+ "GP+PP0+PP1" },
+ { 0, NULL } /* Terminator of list */
+};
+
+const char *pmm_trace_get_core_name( mali_pmm_core_mask cores )
+{
+ const char *dname = NULL;
+ int cl;
+
+ /* Look up name in corelist */
+ cl = 0;
+ while( pmm_trace_cores[cl].name != NULL )
+ {
+ if( pmm_trace_cores[cl].id == cores )
+ {
+ dname = pmm_trace_cores[cl].name;
+ break;
+ }
+ cl++;
+ }
+
+ if( dname == NULL )
+ {
+ /* We don't know a good short-hand for the configuration */
+ dname = "[multi-core]";
+ }
+
+ return dname;
+}
+
+#endif /* USING_MALI_PMM */
+
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_state.h b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_state.h
new file mode 100644
index 0000000..4768344
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_state.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PMM_STATE_H__
+#define __MALI_PMM_STATE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_state Power Management Module State
+ *
+ * @{
+ */
+
+/* Check that the subset is really a subset of cores */
+#define MALI_PMM_DEBUG_ASSERT_CORES_SUBSET( cores, subset ) \
+ MALI_DEBUG_ASSERT( ((~(cores)) & (subset)) == 0 )
+
+
+/* Locking macros */
+#define MALI_PMM_LOCK(pmm) \
+ _mali_osk_lock_wait( pmm->lock, _MALI_OSK_LOCKMODE_RW )
+#define MALI_PMM_UNLOCK(pmm) \
+ _mali_osk_lock_signal( pmm->lock, _MALI_OSK_LOCKMODE_RW )
+#define MALI_PMM_LOCK_TERM(pmm) \
+ _mali_osk_lock_term( pmm->lock )
+
+/* Notification type for messages */
+#define MALI_PMM_NOTIFICATION_TYPE 0
+
+/** @brief Status of the PMM state machine
+ */
+typedef enum mali_pmm_status_tag
+{
+ MALI_PMM_STATUS_IDLE, /**< PMM is waiting next event */
+ MALI_PMM_STATUS_POLICY_POWER_DOWN, /**< Policy initiated power down */
+ MALI_PMM_STATUS_POLICY_POWER_UP, /**< Policy initiated power down */
+ MALI_PMM_STATUS_OS_WAITING, /**< PMM is waiting for OS power up */
+ MALI_PMM_STATUS_OS_POWER_DOWN, /**< OS initiated power down */
+ MALI_PMM_STATUS_DVFS_PAUSE, /**< PMM DVFS Status Pause */
+ MALI_PMM_STATUS_OS_POWER_UP, /**< OS initiated power up */
+ MALI_PMM_STATUS_OFF, /**< PMM is not active */
+} mali_pmm_status;
+
+
+/** @brief Internal state of the PMM
+ */
+typedef struct _mali_pmm_internal_state
+{
+ mali_pmm_status status; /**< PMM state machine */
+ mali_pmm_policy policy; /**< PMM policy */
+ mali_bool check_policy; /**< PMM policy needs checking */
+ mali_pmm_state state; /**< PMM state */
+ mali_pmm_core_mask cores_registered; /**< Bitmask of cores registered */
+ mali_pmm_core_mask cores_powered; /**< Bitmask of cores powered up */
+ mali_pmm_core_mask cores_idle; /**< Bitmask of cores idle */
+ mali_pmm_core_mask cores_pend_down; /**< Bitmask of cores pending power down */
+ mali_pmm_core_mask cores_pend_up; /**< Bitmask of cores pending power up */
+ mali_pmm_core_mask cores_ack_down; /**< Bitmask of cores acknowledged power down */
+ mali_pmm_core_mask cores_ack_up; /**< Bitmask of cores acknowledged power up */
+
+ _mali_osk_notification_queue_t *queue; /**< PMM event queue */
+ _mali_osk_notification_queue_t *iqueue; /**< PMM internal event queue */
+ _mali_osk_irq_t *irq; /**< PMM irq handler */
+ _mali_osk_lock_t *lock; /**< PMM lock */
+
+ mali_pmm_message_data os_data; /**< OS data sent via the OS events */
+
+ mali_bool pmu_initialized; /**< PMU initialized */
+
+ _mali_osk_atomic_t messages_queued; /**< PMM event messages queued */
+ u32 waiting; /**< PMM waiting events - due to busy */
+ u32 no_events; /**< PMM called to process when no events */
+
+ u32 missed; /**< PMM missed events due to OOM */
+ mali_bool fatal_power_err; /**< PMM has had a fatal power error? */
+ u32 is_dvfs_active; /**< PMM DVFS activity */
+
+#if MALI_STATE_TRACKING
+ mali_pmm_status mali_last_pmm_status; /**< The previous PMM status */
+ mali_pmm_event_id mali_new_event_status;/**< The type of the last PMM event */
+ mali_bool mali_pmm_lock_acquired; /**< Is the PMM lock held somewhere or not */
+#endif
+
+#if (MALI_PMM_TRACE || MALI_STATE_TRACKING)
+ u32 messages_sent; /**< Total event messages sent */
+ u32 messages_received; /**< Total event messages received */
+ u32 imessages_sent; /**< Total event internal messages sent */
+ u32 imessages_received; /**< Total event internal messages received */
+#endif
+} _mali_pmm_internal_state_t;
+
+/** @brief Sets that a policy needs a check before processing events
+ *
+ * A timer or something has expired that needs dealing with
+ */
+void malipmm_set_policy_check(void);
+
+/** @brief Update the PMM externally viewable state depending on the current PMM internal state
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the timeout is valid, else MALI_FALSE
+ */
+void pmm_update_system_state( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Returns the core mask from the event data - if applicable
+ *
+ * @param pmm internal PMM state
+ * @param event event message to get the core mask from
+ * @return mask of cores that is relevant to this event message
+ */
+mali_pmm_core_mask pmm_cores_from_event_data( _mali_pmm_internal_state_t *pmm, mali_pmm_message_t *event );
+
+/** @brief Sort out which cores need to be powered up from the given core mask
+ *
+ * All cores that can be powered up will be put into a pending state
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to check if they need to be powered up
+ * @return mask of cores that need to be powered up, this can be 0 if all cores
+ * are powered up already
+ */
+mali_pmm_core_mask pmm_cores_to_power_up( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Sort out which cores need to be powered down from the given core mask
+ *
+ * All cores that can be powered down will be put into a pending state. If they
+ * can be powered down immediately they will also be acknowledged that they can be
+ * powered down. If the immediate_only flag is set, then only those cores that
+ * can be acknowledged for power down will be put into a pending state.
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to check if they need to be powered down
+ * @param immediate_only MALI_TRUE means that only cores that can power down now will
+ * be put into a pending state
+ * @return mask of cores that need to be powered down, this can be 0 if all cores
+ * are powered down already
+ */
+mali_pmm_core_mask pmm_cores_to_power_down( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores, mali_bool immediate_only );
+
+/** @brief Cancel an invokation to power down (pmm_invoke_power_down)
+ *
+ * @param pmm internal PMM state
+ */
+void pmm_power_down_cancel( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Check if a call to invoke power down should succeed, or fail
+ *
+ * This will report MALI_FALSE if some of the cores are still active and need
+ * to acknowledge that they are ready to power down
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores to power down have acknowledged they
+ * can power down, else MALI_FALSE
+ */
+mali_bool pmm_power_down_okay( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Try to make all the pending cores power down
+ *
+ * If all the pending cores have acknowledged they can power down, this will call the
+ * PMU power down function to turn them off
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores have been powered down, else MALI_FALSE
+ */
+mali_bool pmm_invoke_power_down( _mali_pmm_internal_state_t *pmm, mali_power_mode power_mode );
+
+/** @brief Check if all the pending cores to power up have done so
+ *
+ * This will report MALI_FALSE if some of the cores are still powered off
+ * and have not acknowledged that they have powered up
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores to power up have acknowledged they
+ * are now powered up, else MALI_FALSE
+ */
+mali_bool pmm_power_up_okay( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Try to make all the pending cores power up
+ *
+ * If all the pending cores have acknowledged they have powered up, this will
+ * make the cores start processing jobs again, else this will call the PMU
+ * power up function to turn them on, and the PMM is then expected to wait for an
+ * interrupt to acknowledge the power up
+ *
+ * @param pmm internal PMM state
+ * @return MALI_TRUE if the pending cores have been powered up, else MALI_FALSE
+ */
+mali_bool pmm_invoke_power_up( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Set the cores that are now active in the system
+ *
+ * Updates which cores are active and returns which cores are still idle
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to set to active
+ * @return mask of all the cores that are idle
+ */
+mali_pmm_core_mask pmm_cores_set_active( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that are now idle in the system
+ *
+ * Updates which cores are idle and returns which cores are still idle
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores to set to idle
+ * @return mask of all the cores that are idle
+ */
+mali_pmm_core_mask pmm_cores_set_idle( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that have acknowledged a pending power down
+ *
+ * Updates which cores have acknowledged the pending power down and are now ready
+ * to be turned off
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores that have acknowledged the pending power down
+ * @return mask of all the cores that have acknowledged the power down
+ */
+mali_pmm_core_mask pmm_cores_set_down_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+/** @brief Set the cores that have acknowledged a pending power up
+ *
+ * Updates which cores have acknowledged the pending power up and are now
+ * fully powered and ready to run jobs
+ *
+ * @param pmm internal PMM state
+ * @param cores mask of cores that have acknowledged the pending power up
+ * @return mask of all the cores that have acknowledged the power up
+ */
+mali_pmm_core_mask pmm_cores_set_up_ack( _mali_pmm_internal_state_t *pmm, mali_pmm_core_mask cores );
+
+
+/** @brief Tries to reset the PMM and PMU hardware to a known state after any fatal issues
+ *
+ * This will try and make all the cores powered up and reset the PMM state
+ * to its initial state after core registration - all cores powered but not
+ * pending or active.
+ * All events in the event queues will be thrown away.
+ *
+ * @note: Any pending power down will be cancelled including the OS calling for power down
+ */
+void pmm_fatal_reset( _mali_pmm_internal_state_t *pmm );
+
+/** @brief Save the OS specific data for an OS power up/down event
+ *
+ * @param pmm internal PMM state
+ * @param data OS specific event data
+ */
+void pmm_save_os_event_data(_mali_pmm_internal_state_t *pmm, mali_pmm_message_data data);
+
+/** @brief Retrieve the OS specific data for an OS power up/down event
+ *
+ * This will clear the stored OS data, as well as return it.
+ *
+ * @param pmm internal PMM state
+ * @return OS specific event data that was saved previously
+ */
+mali_pmm_message_data pmm_retrieve_os_event_data(_mali_pmm_internal_state_t *pmm);
+
+
+/** @brief Get a human readable name for the cores in a core mask
+ *
+ * @param core the core mask
+ * @return string containing a name relating to the given core mask
+ */
+const char *pmm_trace_get_core_name( mali_pmm_core_mask core );
+
+/** @} */ /* End group pmmapi_state */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_STATE_H__ */
diff --git a/drivers/media/video/samsung/mali/common/pmm/mali_pmm_system.h b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_system.h
new file mode 100644
index 0000000..eccd35b
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/pmm/mali_pmm_system.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PMM_SYSTEM_H__
+#define __MALI_PMM_SYSTEM_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup pmmapi Power Management Module APIs
+ *
+ * @{
+ *
+ * @defgroup pmmapi_system Power Management Module System Functions
+ *
+ * @{
+ */
+
+extern struct mali_kernel_subsystem mali_subsystem_pmm;
+
+/** @brief Register a core with the PMM, which will power up
+ * the core
+ *
+ * @param core the core to register with the PMM
+ * @return error if the core cannot be powered up
+ */
+_mali_osk_errcode_t malipmm_core_register( mali_pmm_core_id core );
+
+/** @brief Unregister a core with the PMM
+ *
+ * @param core the core to unregister with the PMM
+ */
+void malipmm_core_unregister( mali_pmm_core_id core );
+
+/** @brief Acknowledge that a power down is okay to happen
+ *
+ * A core should not be running a job, or be in the idle queue when this
+ * is called.
+ *
+ * @param core the core that can now be powered down
+ */
+void malipmm_core_power_down_okay( mali_pmm_core_id core );
+
+/** @} */ /* End group pmmapi_system */
+/** @} */ /* End group pmmapi */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_PMM_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/license/gpl/mali_kernel_license.h b/drivers/media/video/samsung/mali/linux/license/gpl/mali_kernel_license.h
new file mode 100644
index 0000000..e9e5e55
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/license/gpl/mali_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __MALI_KERNEL_LICENSE_H__
+#define __MALI_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_KERNEL_LINUX_LICENSE "GPL"
+#define MALI_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LICENSE_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_device_pause_resume.c b/drivers/media/video/samsung/mali/linux/mali_device_pause_resume.c
new file mode 100644
index 0000000..04f57d9
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_device_pause_resume.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+#if USING_MALI_PMM
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_pmm.h"
+#include "mali_kernel_license.h"
+#ifdef CONFIG_PM
+#if MALI_LICENSE_IS_GPL
+
+/* Mali Pause Resume APIs */
+int mali_dev_pause()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_SUSPEND)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND) )
+ {
+ err = -EPERM;
+ }
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME) && (!err))
+ {
+ mali_device_suspend(MALI_PMM_EVENT_DVFS_PAUSE, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_SUSPEND;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_pause);
+
+int mali_dev_resume()
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_dvfs_device_state == _MALI_DEVICE_RESUME)
+ || (mali_device_state == _MALI_DEVICE_SUSPEND) )
+ {
+ err = -EPERM;
+ }
+ if (!err)
+ {
+ mali_device_resume(MALI_PMM_EVENT_DVFS_RESUME, &dvfs_pm_thread);
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ }
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+EXPORT_SYMBOL(mali_dev_resume);
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/media/video/samsung/mali/linux/mali_device_pause_resume.h b/drivers/media/video/samsung/mali/linux/mali_device_pause_resume.h
new file mode 100644
index 0000000..155a3e6
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_device_pause_resume.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DEVICE_PAUSE_RESUME_H__
+#define __MALI_DEVICE_PAUSE_RESUME_H__
+
+#if USING_MALI_PMM
+int mali_dev_pause(void);
+int mali_dev_resume(void);
+#endif /* USING_MALI_PMM */
+
+#endif /* __MALI_DEVICE_PAUSE_RESUME_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_kernel_ioctl.h b/drivers/media/video/samsung/mali/linux/mali_kernel_ioctl.h
new file mode 100644
index 0000000..7e3a216
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_kernel_ioctl.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_IOCTL_H__
+#define __MALI_KERNEL_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h> /* file system operations */
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @file mali_kernel_ioctl.h
+ * Interface to the Linux device driver.
+ * This file describes the interface needed to use the Linux device driver.
+ * Its interface is designed to used by the HAL implementation through a thin arch layer.
+ */
+
+/**
+ * ioctl commands
+ */
+
+#define MALI_IOC_BASE 0x82
+#define MALI_IOC_CORE_BASE (_MALI_UK_CORE_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_MEMORY_BASE (_MALI_UK_MEMORY_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PP_BASE (_MALI_UK_PP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_GP_BASE (_MALI_UK_GP_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_PROFILING_BASE (_MALI_UK_PROFILING_SUBSYSTEM + MALI_IOC_BASE)
+#define MALI_IOC_VSYNC_BASE (_MALI_UK_VSYNC_SUBSYSTEM + MALI_IOC_BASE)
+
+#define MALI_IOC_GET_SYSTEM_INFO_SIZE _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO_SIZE, _mali_uk_get_system_info_s *)
+#define MALI_IOC_GET_SYSTEM_INFO _IOR (MALI_IOC_CORE_BASE, _MALI_UK_GET_SYSTEM_INFO, _mali_uk_get_system_info_s *)
+#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s *)
+#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_s *)
+#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s *)
+#define MALI_IOC_MEM_GET_BIG_BLOCK _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_GET_BIG_BLOCK, _mali_uk_get_big_block_s *)
+#define MALI_IOC_MEM_FREE_BIG_BLOCK _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_FREE_BIG_BLOCK, _mali_uk_free_big_block_s *)
+#define MALI_IOC_MEM_INIT _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_INIT_MEM, _mali_uk_init_mem_s *)
+#define MALI_IOC_MEM_TERM _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_TERM_MEM, _mali_uk_term_mem_s *)
+#define MALI_IOC_MEM_MAP_EXT _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_MAP_EXT_MEM, _mali_uk_map_external_mem_s *)
+#define MALI_IOC_MEM_UNMAP_EXT _IOW (MALI_IOC_MEMORY_BASE, _MALI_UK_UNMAP_EXT_MEM, _mali_uk_unmap_external_mem_s *)
+#define MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE _IOR (MALI_IOC_MEMORY_BASE, _MALI_UK_QUERY_MMU_PAGE_TABLE_DUMP_SIZE, _mali_uk_query_mmu_page_table_dump_size_s *)
+#define MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_DUMP_MMU_PAGE_TABLE, _mali_uk_dump_mmu_page_table_s *)
+#define MALI_IOC_MEM_ATTACH_UMP _IOWR(MALI_IOC_MEMORY_BASE, _MALI_UK_ATTACH_UMP_MEM, _mali_uk_attach_ump_mem_s *)
+#define MALI_IOC_MEM_RELEASE_UMP _IOW(MALI_IOC_MEMORY_BASE, _MALI_UK_RELEASE_UMP_MEM, _mali_uk_release_ump_mem_s *)
+#define MALI_IOC_PP_START_JOB _IOWR(MALI_IOC_PP_BASE, _MALI_UK_PP_START_JOB, _mali_uk_pp_start_job_s *)
+#define MALI_IOC_PP_NUMBER_OF_CORES_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_NUMBER_OF_CORES, _mali_uk_get_pp_number_of_cores_s *)
+#define MALI_IOC_PP_CORE_VERSION_GET _IOR (MALI_IOC_PP_BASE, _MALI_UK_GET_PP_CORE_VERSION, _mali_uk_get_pp_core_version_s * )
+#define MALI_IOC_PP_ABORT_JOB _IOW (MALI_IOC_PP_BASE, _MALI_UK_PP_ABORT_JOB, _mali_uk_pp_abort_job_s * )
+#define MALI_IOC_GP2_START_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_START_JOB, _mali_uk_gp_start_job_s *)
+#define MALI_IOC_GP2_ABORT_JOB _IOWR(MALI_IOC_GP_BASE, _MALI_UK_GP_ABORT_JOB, _mali_uk_gp_abort_job_s *)
+#define MALI_IOC_GP2_NUMBER_OF_CORES_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_NUMBER_OF_CORES, _mali_uk_get_gp_number_of_cores_s *)
+#define MALI_IOC_GP2_CORE_VERSION_GET _IOR (MALI_IOC_GP_BASE, _MALI_UK_GET_GP_CORE_VERSION, _mali_uk_get_gp_core_version_s *)
+#define MALI_IOC_GP2_SUSPEND_RESPONSE _IOW (MALI_IOC_GP_BASE, _MALI_UK_GP_SUSPEND_RESPONSE,_mali_uk_gp_suspend_response_s *)
+#define MALI_IOC_PROFILING_START _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_START, _mali_uk_profiling_start_s *)
+#define MALI_IOC_PROFILING_ADD_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_ADD_EVENT, _mali_uk_profiling_add_event_s*)
+#define MALI_IOC_PROFILING_STOP _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_STOP, _mali_uk_profiling_stop_s *)
+#define MALI_IOC_PROFILING_GET_EVENT _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_EVENT, _mali_uk_profiling_get_event_s *)
+#define MALI_IOC_PROFILING_CLEAR _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_CLEAR, _mali_uk_profiling_clear_s *)
+#define MALI_IOC_TRANSFER_SW_COUNTERS _IOW (MALI_IOC_PROFILING_BASE, _MALI_UK_TRANSFER_SW_COUNTERS, _mali_uk_sw_counters_s *)
+#define MALI_IOC_PROFILING_GET_CONFIG _IOWR(MALI_IOC_PROFILING_BASE, _MALI_UK_PROFILING_GET_CONFIG, _mali_uk_profiling_get_config_s *)
+#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s *)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_IOCTL_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_kernel_linux.c b/drivers/media/video/samsung/mali/linux/mali_kernel_linux.c
new file mode 100644
index 0000000..05762ca
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_kernel_linux.c
@@ -0,0 +1,594 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_linux.c
+ * Implementation of the Linux device driver entrypoints
+ */
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/mm.h> /* memory mananger definitions */
+#include <linux/device.h>
+
+/* the mali kernel subsystem types */
+#include "mali_kernel_subsystem.h"
+
+/* A memory subsystem always exists, so no need to conditionally include it */
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_kernel_core.h"
+
+#include "mali_osk.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+#include "mali_kernel_ioctl.h"
+#include "mali_ukk_wrappers.h"
+#include "mali_kernel_pm.h"
+#include "mali_linux_pm.h"
+
+#include "mali_kernel_sysfs.h"
+
+/* */
+#include "mali_kernel_license.h"
+
+#include "mali_platform.h"
+
+/* from the __malidrv_build_info.c file that is generated during build */
+extern const char *__malidrv_build_info(void);
+
+/* Module parameter to control log level */
+int mali_debug_level = 2;
+module_param(mali_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+#if MALI_MAJOR_PREDEFINE
+int mali_major = 244;
+#else
+int mali_major = 0;
+#endif
+module_param(mali_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(mali_major, "Device major number");
+
+int mali_benchmark = 0;
+module_param(mali_benchmark, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_benchmark, "Bypass Mali hardware when non-zero");
+
+extern int mali_hang_check_interval;
+module_param(mali_hang_check_interval, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_hang_check_interval, "Interval at which to check for progress after the hw watchdog has been triggered");
+
+extern int mali_max_job_runtime;
+module_param(mali_max_job_runtime, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_max_job_runtime, "Maximum allowed job runtime in msecs.\nJobs will be killed after this no matter what");
+
+#if defined(USING_MALI400_L2_CACHE)
+extern int mali_l2_max_reads;
+module_param(mali_l2_max_reads, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_l2_max_reads, "Maximum reads for Mali L2 cache");
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+extern int mali_boot_profiling;
+module_param(mali_boot_profiling, int, S_IRUSR | S_IRGRP | S_IROTH);
+MODULE_PARM_DESC(mali_boot_profiling, "Start profiling as a part of Mali driver initialization");
+#endif
+
+#if MALI_DVFS_ENABLED
+extern int mali_dvfs_control;
+module_param(mali_dvfs_control, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(mali_dvfs_control, "Mali Current DVFS");
+#if defined(CONFIG_CPU_EXYNOS4210)
+#else
+extern int step0_clk;
+module_param(step0_clk, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step0_clk, "Mali Current step0_clk");
+#ifdef DEBUG
+extern int step0_vol;
+module_param(step0_vol, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step0_vol, "Mali Current step0_vol");
+#endif
+
+#if (MALI_DVFS_STEPS > 1)
+extern int step1_clk;
+module_param(step1_clk, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step1_clk, "Mali Current step1_clk");
+
+extern int step0_up;
+module_param(step0_up, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step0_up, "Mali Current step0_up");
+
+extern int step1_down;
+module_param(step1_down, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step1_down, "Mali Current step1_down");
+#ifdef DEBUG
+extern int step1_vol;
+module_param(step1_vol, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step1_vol, "Mali Current step1_vol");
+#endif
+
+#if (MALI_DVFS_STEPS > 2)
+extern int step2_clk;
+module_param(step2_clk, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step2_clk, "Mali Current step2_clk");
+
+extern int step1_up;
+module_param(step1_up, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step1_up, "Mali Current step1_up");
+
+extern int step2_down;
+module_param(step2_down, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step2_down, "Mali Current step2_down");
+#ifdef DEBUG
+extern int step2_vol;
+module_param(step2_vol, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step2_vol, "Mali Current step2_vol");
+#endif
+
+#if (MALI_DVFS_STEPS > 3)
+extern int step3_clk;
+module_param(step3_clk, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step3_clk, "Mali Current step3_clk");
+
+extern int step2_up;
+module_param(step2_up, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step2_up, "Mali Current step2_up");
+
+extern int step3_down;
+module_param(step3_down, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step3_down, "Mali Current step3_down");
+#ifdef DEBUG
+extern int step3_vol;
+module_param(step3_vol, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP| S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(step3_vol, "Mali Current step3_vol");
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+extern int mali_gpu_clk;
+module_param(mali_gpu_clk, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(mali_gpu_clk, "Mali Current Clock");
+
+extern int mali_gpu_vol;
+module_param(mali_gpu_vol, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(mali_gpu_vol, "Mali Current Voltage");
+
+extern int gpu_power_state;
+module_param(gpu_power_state, int, S_IRUSR | S_IRGRP | S_IROTH); /* r--r--r-- */
+MODULE_PARM_DESC(gpu_power_state, "Mali Power State");
+extern _mali_device_power_states mali_dvfs_device_state;
+
+static char mali_dev_name[] = "mali"; /* should be const, but the functions we call requires non-cost */
+
+/* the mali device */
+static struct mali_dev device;
+
+
+static int mali_open(struct inode *inode, struct file *filp);
+static int mali_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma);
+
+/* Linux char file operations provided by the Mali module */
+struct file_operations mali_fops =
+{
+ .owner = THIS_MODULE,
+ .open = mali_open,
+ .release = mali_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = mali_ioctl,
+#else
+ .ioctl = mali_ioctl,
+#endif
+ .mmap = mali_mmap
+};
+
+
+int mali_driver_init(void)
+{
+ int err;
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ err = _mali_dev_platform_register();
+ if (err)
+ {
+ return err;
+ }
+#endif
+#endif
+#endif
+ err = mali_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ _mali_dev_platform_unregister();
+#endif
+#endif
+#endif
+ MALI_PRINT(("Failed to initialize driver (error %d)\n", err));
+ return -EFAULT;
+ }
+
+ /* print build options */
+ MALI_DEBUG_PRINT(2, ("%s\n", __malidrv_build_info()));
+
+ return 0;
+}
+
+void mali_driver_exit(void)
+{
+ mali_kernel_destructor();
+
+#if USING_MALI_PMM
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ _mali_dev_platform_unregister();
+#endif
+#endif
+#endif
+}
+
+/* called from _mali_osk_init */
+int initialize_kernel_device(void)
+{
+ int err;
+ dev_t dev = 0;
+ if (0 == mali_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0/*first minor*/, 1/*count*/, mali_dev_name);
+ mali_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(mali_major, 0);
+ err = register_chrdev_region(dev, 1/*count*/, mali_dev_name);
+ }
+
+ if (err)
+ {
+ goto init_chrdev_err;
+ }
+
+ memset(&device, 0, sizeof(device));
+
+ /* initialize our char dev data */
+ cdev_init(&device.cdev, &mali_fops);
+ device.cdev.owner = THIS_MODULE;
+ device.cdev.ops = &mali_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&device.cdev, dev, 1/*count*/);
+ if (err)
+ {
+ goto init_cdev_err;
+ }
+
+ err = mali_sysfs_register(&device, dev, mali_dev_name);
+ if (err)
+ {
+ goto init_sysfs_err;
+ }
+
+ /* Success! */
+ return 0;
+
+init_sysfs_err:
+ cdev_del(&device.cdev);
+init_cdev_err:
+ unregister_chrdev_region(dev, 1/*count*/);
+init_chrdev_err:
+ return err;
+}
+
+/* called from _mali_osk_term */
+void terminate_kernel_device(void)
+{
+ dev_t dev = MKDEV(mali_major, 0);
+
+ mali_sysfs_unregister(&device, dev, mali_dev_name);
+
+ /* unregister char device */
+ cdev_del(&device.cdev);
+ /* free major */
+ unregister_chrdev_region(dev, 1/*count*/);
+ return;
+}
+
+/** @note munmap handler is done by vma close handler */
+static int mali_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ struct mali_session_data * session_data;
+ _mali_uk_mem_mmap_s args = {0, };
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_PRINT_ERROR(("mmap called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("MMap() handler: start=0x%08X, phys=0x%08X, size=0x%08X\n", (unsigned int)vma->vm_start, (unsigned int)(vma->vm_pgoff << PAGE_SHIFT), (unsigned int)(vma->vm_end - vma->vm_start)) );
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = vma->vm_pgoff << PAGE_SHIFT;
+ args.size = vma->vm_end - vma->vm_start;
+ args.ukk_private = vma;
+
+ /* Call the common mmap handler */
+ MALI_CHECK(_MALI_OSK_ERR_OK ==_mali_ukk_mem_mmap( &args ), -EFAULT);
+
+ return 0;
+}
+
+static int mali_open(struct inode *inode, struct file *filp)
+{
+ struct mali_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ /* allocated struct to track this session */
+ err = _mali_ukk_open((void **)&session_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* initialize file pointer */
+ filp->f_pos = 0;
+
+ /* link in our session data */
+ filp->private_data = (void*)session_data;
+
+ return 0;
+}
+
+static int mali_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev)) return -ENODEV;
+
+ err = _mali_ukk_close((void **)&filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ return 0;
+}
+
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long mali_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int mali_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err;
+ struct mali_session_data *session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ /* inode not used */
+ (void)inode;
+#endif
+
+ MALI_DEBUG_PRINT(7, ("Ioctl received 0x%08X 0x%08lX\n", cmd, arg));
+
+ session_data = (struct mali_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(7, ("filp->private_data was NULL\n"));
+ return -ENOTTY;
+ }
+
+ if (NULL == (void *)arg)
+ {
+ MALI_DEBUG_PRINT(7, ("arg was NULL\n"));
+ return -ENOTTY;
+ }
+
+ if (_MALI_DEVICE_SHUTDOWN == mali_dvfs_device_state)
+ {
+ MALI_DEBUG_PRINT(7, ("system is shutdown \n"));
+ return 0;
+ }
+
+ switch(cmd)
+ {
+ case MALI_IOC_GET_SYSTEM_INFO_SIZE:
+ err = get_system_info_size_wrapper(session_data, (_mali_uk_get_system_info_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_SYSTEM_INFO:
+ err = get_system_info_wrapper(session_data, (_mali_uk_get_system_info_s __user *)arg);
+ break;
+
+ case MALI_IOC_WAIT_FOR_NOTIFICATION:
+ err = wait_for_notification_wrapper(session_data, (_mali_uk_wait_for_notification_s __user *)arg);
+ break;
+
+ case MALI_IOC_GET_API_VERSION:
+ err = get_api_version_wrapper(session_data, (_mali_uk_get_api_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_POST_NOTIFICATION:
+ err = post_notification_wrapper(session_data, (_mali_uk_post_notification_s __user *)arg);
+ break;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ case MALI_IOC_PROFILING_START:
+ err = profiling_start_wrapper(session_data, (_mali_uk_profiling_start_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_ADD_EVENT:
+ err = profiling_add_event_wrapper(session_data, (_mali_uk_profiling_add_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_STOP:
+ err = profiling_stop_wrapper(session_data, (_mali_uk_profiling_stop_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_EVENT:
+ err = profiling_get_event_wrapper(session_data, (_mali_uk_profiling_get_event_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_CLEAR:
+ err = profiling_clear_wrapper(session_data, (_mali_uk_profiling_clear_s __user *)arg);
+ break;
+
+ case MALI_IOC_PROFILING_GET_CONFIG:
+ err = profiling_get_config_wrapper(session_data, (_mali_uk_profiling_get_config_s __user *)arg);
+ break;
+#endif
+
+ case MALI_IOC_MEM_INIT:
+ err = mem_init_wrapper(session_data, (_mali_uk_init_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_TERM:
+ err = mem_term_wrapper(session_data, (_mali_uk_term_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_MAP_EXT:
+ err = mem_map_ext_wrapper(session_data, (_mali_uk_map_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_UNMAP_EXT:
+ err = mem_unmap_ext_wrapper(session_data, (_mali_uk_unmap_external_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_QUERY_MMU_PAGE_TABLE_DUMP_SIZE:
+ err = mem_query_mmu_page_table_dump_size_wrapper(session_data, (_mali_uk_query_mmu_page_table_dump_size_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_DUMP_MMU_PAGE_TABLE:
+ err = mem_dump_mmu_page_table_wrapper(session_data, (_mali_uk_dump_mmu_page_table_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_GET_BIG_BLOCK:
+ err = mem_get_big_block_wrapper(filp, (_mali_uk_get_big_block_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_FREE_BIG_BLOCK:
+ err = mem_free_big_block_wrapper(session_data, (_mali_uk_free_big_block_s __user *)arg);
+ break;
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ err = mem_attach_ump_wrapper(session_data, (_mali_uk_attach_ump_mem_s __user *)arg);
+ break;
+
+ case MALI_IOC_MEM_RELEASE_UMP:
+ err = mem_release_ump_wrapper(session_data, (_mali_uk_release_ump_mem_s __user *)arg);
+ break;
+
+#else
+
+ case MALI_IOC_MEM_ATTACH_UMP:
+ case MALI_IOC_MEM_RELEASE_UMP: /* FALL-THROUGH */
+ MALI_DEBUG_PRINT(2, ("UMP not supported\n"));
+ err = -ENOTTY;
+ break;
+#endif
+
+ case MALI_IOC_PP_START_JOB:
+ err = pp_start_job_wrapper(session_data, (_mali_uk_pp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_ABORT_JOB:
+ err = pp_abort_job_wrapper(session_data, (_mali_uk_pp_abort_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_NUMBER_OF_CORES_GET:
+ err = pp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_pp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_PP_CORE_VERSION_GET:
+ err = pp_get_core_version_wrapper(session_data, (_mali_uk_get_pp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_START_JOB:
+ err = gp_start_job_wrapper(session_data, (_mali_uk_gp_start_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_ABORT_JOB:
+ err = gp_abort_job_wrapper(session_data, (_mali_uk_gp_abort_job_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_NUMBER_OF_CORES_GET:
+ err = gp_get_number_of_cores_wrapper(session_data, (_mali_uk_get_gp_number_of_cores_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_CORE_VERSION_GET:
+ err = gp_get_core_version_wrapper(session_data, (_mali_uk_get_gp_core_version_s __user *)arg);
+ break;
+
+ case MALI_IOC_GP2_SUSPEND_RESPONSE:
+ err = gp_suspend_response_wrapper(session_data, (_mali_uk_gp_suspend_response_s __user *)arg);
+ break;
+
+ case MALI_IOC_VSYNC_EVENT_REPORT:
+ err = vsync_event_report_wrapper(session_data, (_mali_uk_vsync_event_report_s __user *)arg);
+ break;
+#if MALI_TRACEPOINTS_ENABLED
+ case MALI_IOC_TRANSFER_SW_COUNTERS:
+ err = transfer_sw_counters_wrapper(session_data, (_mali_uk_sw_counters_s __user *)arg);
+#endif
+ break;
+
+ default:
+ MALI_DEBUG_PRINT(2, ("No handler for ioctl 0x%08X 0x%08lX\n", cmd, arg));
+ err = -ENOTTY;
+ };
+
+ return err;
+}
+
+
+module_init(mali_driver_init);
+module_exit(mali_driver_exit);
+
+MODULE_LICENSE(MALI_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
+
+#if MALI_TRACEPOINTS_ENABLED
+/* Create the trace points (otherwise we just get code to call a tracepoint) */
+#define CREATE_TRACE_POINTS
+#include "mali_linux_trace.h"
+#endif
diff --git a/drivers/media/video/samsung/mali/linux/mali_kernel_linux.h b/drivers/media/video/samsung/mali/linux/mali_kernel_linux.h
new file mode 100644
index 0000000..9c7668c
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_kernel_linux.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_LINUX_H__
+#define __MALI_KERNEL_LINUX_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/cdev.h> /* character device definitions */
+#include "mali_kernel_license.h"
+#include "mali_osk.h"
+
+struct mali_dev
+{
+ struct cdev cdev;
+#if MALI_LICENSE_IS_GPL
+ struct class * mali_class;
+#endif
+};
+
+_mali_osk_errcode_t initialize_kernel_device(void);
+void terminate_kernel_device(void);
+
+void mali_osk_low_level_mem_init(void);
+void mali_osk_low_level_mem_term(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_kernel_pm.c b/drivers/media/video/samsung/mali/linux/mali_kernel_pm.c
new file mode 100644
index 0000000..f06ea4b
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_kernel_pm.c
@@ -0,0 +1,709 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_pm.c
+ * Implementation of the Linux Power Management for Mali GPU kernel driver
+ */
+
+#if USING_MALI_PMM
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/platform_device.h>
+#include <linux/version.h>
+#include <asm/current.h>
+#include <linux/suspend.h>
+
+#include <plat/cpu.h>
+#include <plat/pd.h>
+#include <plat/devs.h>
+
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_pmm.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_kernel_pm.h"
+#include "mali_device_pause_resume.h"
+#include "mali_linux_pm.h"
+
+#ifdef MALI_REBOOTNOTIFIER
+_mali_osk_atomic_t mali_shutdown_state;
+#include <linux/reboot.h>
+#endif
+
+#if MALI_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif /* MALI_GPU_UTILIZATION */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+#ifdef CONFIG_PM
+#include "mali_linux_pm_testsuite.h"
+#include "mali_platform_pmu_internal_testing.h"
+unsigned int pwr_mgmt_status_reg = 0;
+#endif /* CONFIG_PM */
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+static int is_os_pmm_thread_waiting = 0;
+
+/* kernel should be configured with power management support */
+#ifdef CONFIG_PM
+
+/* License should be GPL */
+#if MALI_LICENSE_IS_GPL
+
+/* Linux kernel major version */
+#define LINUX_KERNEL_MAJOR_VERSION 2
+
+/* Linux kernel minor version */
+#define LINUX_KERNEL_MINOR_VERSION 6
+
+/* Linux kernel development version */
+#define LINUX_KERNEL_DEVELOPMENT_VERSION 29
+
+#ifdef CONFIG_PM_DEBUG
+static const char* const mali_states[_MALI_MAX_DEBUG_OPERATIONS] = {
+ [_MALI_DEVICE_SUSPEND] = "suspend",
+ [_MALI_DEVICE_RESUME] = "resume",
+ [_MALI_DVFS_PAUSE_EVENT] = "dvfs_pause",
+ [_MALI_DVFS_RESUME_EVENT] = "dvfs_resume",
+};
+
+#endif /* CONFIG_PM_DEBUG */
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+extern void set_mali_parent_power_domain(struct platform_device* dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy);
+
+static struct notifier_block mali_pwr_notif_block = {
+ .notifier_call = mali_pwr_suspend_notifier
+};
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/* Power management thread pointer */
+struct task_struct *pm_thread;
+
+/* dvfs power management thread */
+struct task_struct *dvfs_pm_thread;
+
+/* is wake up needed */
+short is_wake_up_needed = 0;
+int timeout_fired = 2;
+unsigned int is_mali_pmm_testsuite_enabled = 0;
+
+_mali_device_power_states mali_device_state = _MALI_DEVICE_RESUME;
+_mali_device_power_states mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+_mali_osk_lock_t *lock;
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+const char* const mali_pmm_recording_events[_MALI_DEVICE_MAX_PMM_EVENTS] = {
+ [_MALI_DEVICE_PMM_TIMEOUT_EVENT] = "timeout",
+ [_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS] = "job_scheduling",
+ [_MALI_DEVICE_PMM_REGISTERED_CORES] = "cores",
+
+};
+
+unsigned int mali_timeout_event_recording_on = 0;
+unsigned int mali_job_scheduling_events_recording_on = 0;
+unsigned int is_mali_pmu_present = 0;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/* Function prototypes */
+static int mali_pm_probe(struct platform_device *pdev);
+static int mali_pm_remove(struct platform_device *pdev);
+static void mali_pm_shutdown(struct platform_device *pdev);
+
+/* Mali device suspend function */
+static int mali_pm_suspend(struct device *dev);
+
+/* Mali device resume function */
+static int mali_pm_resume(struct device *dev);
+
+/* Run time suspend and resume functions */
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+static int mali_device_runtime_suspend(struct device *dev);
+static int mali_device_runtime_resume(struct device *dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/* OS suspend and resume callbacks */
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifndef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_suspend(struct platform_device *pdev, pm_message_t state);
+#else
+static int mali_pm_os_suspend(struct device *dev);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_resume(struct platform_device *pdev);
+#else
+static int mali_pm_os_resume(struct device *dev);
+#endif
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+
+/* OS Hibernation suspend callback */
+static int mali_pm_os_suspend_on_hibernation(struct device *dev);
+
+/* OS Hibernation resume callback */
+static int mali_pm_os_resume_on_hibernation(struct device *dev);
+
+static void _mali_release_pm(struct device* device);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static const struct dev_pm_ops mali_dev_pm_ops = {
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .runtime_suspend = mali_device_runtime_suspend,
+ .runtime_resume = mali_device_runtime_resume,
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .suspend = mali_pm_os_suspend,
+ .resume = mali_pm_os_resume,
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+ .freeze = mali_pm_os_suspend_on_hibernation,
+ .poweroff = mali_pm_os_suspend_on_hibernation,
+ .thaw = mali_pm_os_resume_on_hibernation,
+ .restore = mali_pm_os_resume_on_hibernation,
+};
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+struct pm_ext_ops mali_pm_operations = {
+ .base = {
+ .freeze = mali_pm_os_suspend_on_hibernation,
+ .thaw = mali_pm_os_resume_on_hibernation,
+ .poweroff = mali_pm_os_resume_on_hibernation,
+ .restore = mali_pm_os_resume_on_hibernation,
+ },
+};
+#endif
+
+static struct platform_driver mali_plat_driver = {
+ .probe = mali_pm_probe,
+ .remove = mali_pm_remove,
+ .shutdown = mali_pm_shutdown,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ .suspend = mali_pm_os_suspend,
+ .resume = mali_pm_os_resume,
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+ .pm = &mali_pm_operations,
+#endif
+
+ .driver = {
+ .name = "mali_dev",
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+ .pm = &mali_dev_pm_ops,
+#endif
+ },
+};
+
+/* Mali GPU platform device */
+struct platform_device mali_gpu_device = {
+ .name = "mali_dev",
+ .id = 0,
+ .dev.release = _mali_release_pm
+};
+
+/** This function is called when platform device is unregistered. This function
+ * is necessary when the platform device is unregistered.
+ */
+static void _mali_release_pm(struct device *device)
+{
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Platform device removed\n" ));
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+void mali_is_pmu_present(void)
+{
+ int temp = 0;
+ temp = pmu_get_power_up_down_info();
+ if (4095 == temp)
+ {
+ is_mali_pmu_present = 0;
+ }
+ else
+ {
+ is_mali_pmu_present = 1;
+ }
+}
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* MALI_LICENSE_IS_GPL */
+
+#if MALI_LICENSE_IS_GPL
+
+static int mali_wait_for_power_management_policy_event(void)
+{
+ int err = 0;
+ for (; ;)
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current))
+ {
+ err = -EINTR;
+ break;
+ }
+ if (is_wake_up_needed == 1)
+ {
+ break;
+ }
+ schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ is_wake_up_needed =0;
+ return err;
+}
+
+/** This function is invoked when mali device is suspended
+ */
+int mali_device_suspend(unsigned int event_id, struct task_struct **pwr_mgmt_thread)
+{
+ int err = 0;
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ event_id,
+ timeout_fired};
+ *pwr_mgmt_thread = current;
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI device is being suspended\n" ));
+ _mali_ukk_pmm_event_message(&event);
+ is_os_pmm_thread_waiting = 1;
+ err = mali_wait_for_power_management_policy_event();
+ is_os_pmm_thread_waiting = 0;
+ return err;
+}
+
+/** This function is called when Operating system wants to power down
+ * the mali GPU device.
+ */
+static int mali_pm_suspend(struct device *dev)
+{
+ int err = 0;
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+ if ((mali_device_state == _MALI_DEVICE_SUSPEND))
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+#if MALI_DVFS_ENABLED
+ mali_utilization_suspend();
+#endif
+ err = mali_device_suspend(MALI_PMM_EVENT_OS_POWER_DOWN, &pm_thread);
+ mali_device_state = _MALI_DEVICE_SUSPEND;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_suspend(struct platform_device *pdev, pm_message_t state)
+#else
+static int mali_pm_os_suspend(struct device *dev)
+#endif
+{
+ int err = 0;
+ err = mali_pm_suspend(NULL);
+ return err;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+static int mali_pwr_suspend_notifier(struct notifier_block *nb,unsigned long event,void* dummy)
+{
+ int err = 0;
+ switch (event)
+ {
+ case PM_SUSPEND_PREPARE:
+ err = mali_pm_suspend(NULL);
+ break;
+
+ case PM_POST_SUSPEND:
+ err = mali_pm_resume(NULL);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+/** This function is called when mali GPU device is to be resumed.
+ */
+int mali_device_resume(unsigned int event_id, struct task_struct **pwr_mgmt_thread)
+{
+ int err = 0;
+ _mali_uk_pmm_message_s event = {
+ NULL,
+ event_id,
+ timeout_fired};
+ *pwr_mgmt_thread = current;
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI device is being resumed\n" ));
+ _mali_ukk_pmm_event_message(&event);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Power up event is scheduled\n" ));
+ is_os_pmm_thread_waiting = 1;
+ err = mali_wait_for_power_management_policy_event();
+ is_os_pmm_thread_waiting = 0;
+ return err;
+}
+
+/** This function is called when mali GPU device is to be resumed
+ */
+static int mali_pm_resume(struct device *dev)
+{
+ int err = 0;
+
+ _mali_osk_lock_wait(lock, _MALI_OSK_LOCKMODE_RW);
+
+#ifdef CONFIG_REGULATOR
+ mali_regulator_enable();
+#endif
+
+ if (mali_device_state == _MALI_DEVICE_RESUME)
+ {
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+ }
+ err = mali_device_resume(MALI_PMM_EVENT_OS_POWER_UP, &pm_thread);
+ mali_device_state = _MALI_DEVICE_RESUME;
+ mali_dvfs_device_state = _MALI_DEVICE_RESUME;
+ _mali_osk_lock_signal(lock, _MALI_OSK_LOCKMODE_RW);
+ return err;
+}
+
+#ifndef CONFIG_PM_RUNTIME
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(LINUX_KERNEL_MAJOR_VERSION,LINUX_KERNEL_MINOR_VERSION,LINUX_KERNEL_DEVELOPMENT_VERSION))
+static int mali_pm_os_resume(struct platform_device *pdev)
+#else
+static int mali_pm_os_resume(struct device *dev)
+#endif
+{
+ int err = 0;
+ err = mali_pm_resume(NULL);
+ return err;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+static int mali_pm_os_suspend_on_hibernation(struct device *dev)
+{
+ int err = 0;
+ err = mali_pm_suspend(NULL);
+ return err;
+}
+
+static int mali_pm_os_resume_on_hibernation(struct device *dev)
+{
+ int err = 0;
+ err = mali_pm_resume(NULL);
+ return err;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+/** This function is called when runtime suspend of mali device is required.
+ */
+static int mali_device_runtime_suspend(struct device *dev)
+{
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Mali device Run time suspended \n" ));
+ return 0;
+}
+
+/** This function is called when runtime resume of mali device is required.
+ */
+static int mali_device_runtime_resume(struct device *dev)
+{
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Mali device Run time Resumed \n" ));
+ return 0;
+}
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_DEBUG
+
+/** This function is used for debugging purposes when the user want to see
+ * which power management operations are supported for
+ * mali device.
+ */
+static ssize_t show_file(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ char *str = buf;
+#if !MALI_POWER_MGMT_TEST_SUITE
+ int pm_counter = 0;
+ for (pm_counter = 0; pm_counter<_MALI_MAX_DEBUG_OPERATIONS; pm_counter++)
+ {
+ str += sprintf(str, "%s ", mali_states[pm_counter]);
+ }
+#else
+ str += sprintf(str, "%d ",pwr_mgmt_status_reg);
+#endif
+ if (str != buf)
+ {
+ *(str-1) = '\n';
+ }
+ return (str-buf);
+}
+
+/** This function is called when user wants to suspend the mali GPU device in order
+ * to simulate the power up and power down events.
+ */
+static ssize_t store_file(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ int err = 0;
+
+#if MALI_POWER_MGMT_TEST_SUITE
+ int test_flag_dvfs = 0;
+ pwr_mgmt_status_reg = 0;
+ mali_is_pmu_present();
+
+#endif
+ if (!strncmp(buf,mali_states[_MALI_DEVICE_SUSPEND],strlen(mali_states[_MALI_DEVICE_SUSPEND])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI suspend Power operation is scheduled\n" ));
+ err = mali_pm_suspend(NULL);
+ }
+
+#if MALI_POWER_MGMT_TEST_SUITE
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_REGISTERED_CORES],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_REGISTERED_CORES])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Device get number of registerd cores\n" ));
+ pwr_mgmt_status_reg = _mali_pmm_cores_list();
+ return count;
+ }
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_TIMEOUT_EVENT],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_TIMEOUT_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI timeout event recording is enabled\n" ));
+ mali_timeout_event_recording_on = 1;
+ }
+ else if (!strncmp(buf,mali_pmm_recording_events[_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS],strlen(mali_pmm_recording_events[_MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Job scheduling events recording is enabled\n" ));
+ mali_job_scheduling_events_recording_on = 1;
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+ else if (!strncmp(buf,mali_states[_MALI_DEVICE_RESUME],strlen(mali_states[_MALI_DEVICE_RESUME])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI Resume Power operation is scheduled\n" ));
+ err = mali_pm_resume(NULL);
+ }
+ else if (!strncmp(buf,mali_states[_MALI_DVFS_PAUSE_EVENT],strlen(mali_states[_MALI_DVFS_PAUSE_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI DVFS Pause Power operation is scheduled\n" ));
+ err = mali_dev_pause();
+#if MALI_POWER_MGMT_TEST_SUITE
+ test_flag_dvfs = 1;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ }
+ else if (!strncmp(buf,mali_states[_MALI_DVFS_RESUME_EVENT],strlen(mali_states[_MALI_DVFS_RESUME_EVENT])))
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: MALI DVFS Resume Power operation is scheduled\n" ));
+ err = mali_dev_resume();
+#if MALI_POWER_MGMT_TEST_SUITE
+ test_flag_dvfs = 1;
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Invalid Power Mode Operation selected\n" ));
+ }
+#if MALI_POWER_MGMT_TEST_SUITE
+ if (test_flag_dvfs == 1)
+ {
+ if (err)
+ {
+ pwr_mgmt_status_reg = 2;
+ }
+ else
+ {
+ pwr_mgmt_status_reg = 1;
+ }
+ }
+ else
+ {
+ if (1 == is_mali_pmu_present)
+ {
+ pwr_mgmt_status_reg = pmu_get_power_up_down_info();
+ }
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ return count;
+}
+
+/* Device attribute file */
+static DEVICE_ATTR(file, 0644, show_file, store_file);
+#endif /* CONFIG_PM_DEBUG */
+
+static int mali_pm_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_DEBUG
+ device_remove_file(&mali_gpu_device.dev, &dev_attr_file);
+#endif /* CONFIG_PM_DEBUG */
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ pm_runtime_disable(&pdev->dev);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+ return 0;
+}
+
+int mali_pd_enable(void)
+{
+ return exynos_pd_enable(&exynos4_device_pd[PD_G3D].dev);
+}
+
+static void mali_pm_shutdown(struct platform_device *pdev)
+{
+ MALI_PRINT(("Mali shutdown!!\n"));
+ mali_dvfs_device_state =_MALI_DEVICE_SHUTDOWN;
+ exynos_pd_enable(&exynos4_device_pd[PD_G3D].dev);
+ return;
+}
+
+/** This function is called when the device is probed */
+static int mali_pm_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_PM_DEBUG
+ int err;
+ err = device_create_file(&mali_gpu_device.dev, &dev_attr_file);
+ if (err)
+ {
+ MALI_DEBUG_PRINT(4, ("PMMDEBUG: Error in creating device file\n" ));
+ }
+#endif /* CONFIG_PM_DEBUG */
+ return 0;
+}
+#ifdef MALI_REBOOTNOTIFIER
+static int mali_reboot_notify(struct notifier_block *this,
+ unsigned long code, void *unused)
+{
+ _mali_osk_atomic_inc_return(&mali_shutdown_state);
+ mali_dvfs_device_state = _MALI_DEVICE_SHUTDOWN;
+ MALI_PRINT(("REBOOT Notifier for mali\n"));
+ return NOTIFY_DONE;
+}
+static struct notifier_block mali_reboot_notifier = {
+ .notifier_call = mali_reboot_notify,
+};
+#endif
+
+/** This function is called when Mali GPU device is initialized
+ */
+int _mali_dev_platform_register(void)
+{
+ int err;
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ set_mali_parent_power_domain((void *)&mali_gpu_device);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ err = register_pm_notifier(&mali_pwr_notif_block);
+ if (err)
+ {
+ return err;
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef MALI_REBOOTNOTIFIER
+ _mali_osk_atomic_init(&mali_shutdown_state, 0);
+ err = register_reboot_notifier(&mali_reboot_notifier);
+ if (err) {
+ MALI_PRINT(("Failed to setup reboot notifier\n"));
+ return err;
+ }
+#endif
+
+ err = platform_device_register(&mali_gpu_device);
+ lock = _mali_osk_lock_init((_mali_osk_lock_flags_t)( _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED), 0, 0);
+ if (!err)
+ {
+ err = platform_driver_register(&mali_plat_driver);
+ if (err)
+ {
+ _mali_osk_lock_term(lock);
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef MALI_REBOOTNOTIFIER
+ unregister_reboot_notifier(&mali_reboot_notifier);
+#endif
+
+ platform_device_unregister(&mali_gpu_device);
+ }
+ }
+ return err;
+}
+
+/** This function is called when Mali GPU device is unloaded
+ */
+void _mali_dev_platform_unregister(void)
+{
+ _mali_osk_lock_term(lock);
+
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ unregister_pm_notifier(&mali_pwr_notif_block);
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef MALI_REBOOTNOTIFIER
+ unregister_reboot_notifier(&mali_reboot_notifier);
+#endif
+ platform_driver_unregister(&mali_plat_driver);
+ platform_device_unregister(&mali_gpu_device);
+}
+
+int mali_get_ospmm_thread_state(void)
+{
+ return is_os_pmm_thread_waiting;
+}
+
+#endif /* MALI_LICENSE_IS_GPL */
+#endif /* CONFIG_PM */
+
+#if MALI_STATE_TRACKING
+u32 mali_pmm_dump_os_thread_state( char *buf, u32 size )
+{
+ return snprintf(buf, size, "OSPMM: OS PMM thread is waiting: %s\n", is_os_pmm_thread_waiting ? "true" : "false");
+}
+#endif /* MALI_STATE_TRACKING */
+#endif /* USING_MALI_PMM */
diff --git a/drivers/media/video/samsung/mali/linux/mali_kernel_pm.h b/drivers/media/video/samsung/mali/linux/mali_kernel_pm.h
new file mode 100644
index 0000000..1c44439
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_kernel_pm.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_PM_H__
+#define __MALI_KERNEL_PM_H__
+
+#ifdef USING_MALI_PMM
+int _mali_dev_platform_register(void);
+void _mali_dev_platform_unregister(void);
+#endif /* USING_MALI_PMM */
+int mali_pd_enable(void);
+
+#endif /* __MALI_KERNEL_PM_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_kernel_sysfs.c b/drivers/media/video/samsung/mali/linux/mali_kernel_sysfs.c
new file mode 100644
index 0000000..6dcf052
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_kernel_sysfs.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+/**
+ * @file mali_kernel_sysfs.c
+ * Implementation of some sysfs data exports
+ */
+
+#include <linux/fs.h>
+#include <linux/device.h>
+#include "mali_kernel_license.h"
+#include "mali_kernel_linux.h"
+#include "mali_ukk.h"
+
+#if MALI_LICENSE_IS_GPL
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+#include "mali_kernel_subsystem.h"
+#include "mali_kernel_sysfs.h"
+#include "mali_kernel_profiling.h"
+
+static struct dentry *mali_debugfs_dir = NULL;
+
+#if MALI_STATE_TRACKING
+static int mali_seq_internal_state_show(struct seq_file *seq_file, void *v)
+{
+ u32 len = 0;
+ u32 size;
+ char *buf;
+
+ size = seq_get_buf(seq_file, &buf);
+
+ if(!size)
+ {
+ return -ENOMEM;
+ }
+
+ /* Create the internal state dump. */
+ len = snprintf(buf+len, size-len, "Mali device driver %s\n", SVN_REV_STRING);
+ len += snprintf(buf+len, size-len, "License: %s\n\n", MALI_KERNEL_LINUX_LICENSE);
+
+ len += _mali_kernel_core_dump_state(buf + len, size - len);
+
+ seq_commit(seq_file, len);
+
+ return 0;
+}
+
+static int mali_seq_internal_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mali_seq_internal_state_show, NULL);
+}
+
+static const struct file_operations mali_seq_internal_state_fops = {
+ .owner = THIS_MODULE,
+ .open = mali_seq_internal_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* MALI_STATE_TRACKING */
+
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+static ssize_t profiling_record_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%u\n", _mali_profiling_is_recording() ? 1 : 0);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_record_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ if (val != 0)
+ {
+ u32 limit = MALI_PROFILING_MAX_BUFFER_ENTRIES; /* This can be made configurable at a later stage if we need to */
+
+ /* check if we are already recording */
+ if (MALI_TRUE == _mali_profiling_is_recording())
+ {
+ MALI_DEBUG_PRINT(3, ("Recording of profiling events already in progress\n"));
+ return -EFAULT;
+ }
+
+ /* check if we need to clear out an old recording first */
+ if (MALI_TRUE == _mali_profiling_have_recording())
+ {
+ if (_MALI_OSK_ERR_OK != _mali_profiling_clear())
+ {
+ MALI_DEBUG_PRINT(3, ("Failed to clear existing recording of profiling events\n"));
+ return -EFAULT;
+ }
+ }
+
+ /* start recording profiling data */
+ if (_MALI_OSK_ERR_OK != _mali_profiling_start(&limit))
+ {
+ MALI_DEBUG_PRINT(3, ("Failed to start recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Profiling recording started (max %u events)\n", limit));
+ }
+ else
+ {
+ /* stop recording profiling data */
+ u32 count = 0;
+ if (_MALI_OSK_ERR_OK != _mali_profiling_stop(&count))
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to stop recording of profiling events\n"));
+ return -EFAULT;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Profiling recording stopped (recorded %u events)\n", count));
+ }
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_record_fops = {
+ .owner = THIS_MODULE,
+ .read = profiling_record_read,
+ .write = profiling_record_write,
+};
+
+static void *profiling_events_start(struct seq_file *s, loff_t *pos)
+{
+ loff_t *spos;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_profiling_have_recording())
+ {
+ return NULL;
+ }
+
+ spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
+ if (NULL == spos)
+ {
+ return NULL;
+ }
+
+ *spos = *pos;
+ return spos;
+}
+
+static void *profiling_events_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ loff_t *spos = v;
+
+ /* check if we have data avaiable */
+ if (MALI_TRUE != _mali_profiling_have_recording())
+ {
+ return NULL;
+ }
+
+ /* check if the next entry actually is avaiable */
+ if (_mali_profiling_get_count() <= (u32)(*spos + 1))
+ {
+ return NULL;
+ }
+
+ *pos = ++*spos;
+ return spos;
+}
+
+static void profiling_events_stop(struct seq_file *s, void *v)
+{
+ kfree(v);
+}
+
+static int profiling_events_show(struct seq_file *seq_file, void *v)
+{
+ loff_t *spos = v;
+ u32 index;
+ u64 timestamp;
+ u32 event_id;
+ u32 data[5];
+
+ index = (u32)*spos;
+
+ /* Retrieve all events */
+ if (_MALI_OSK_ERR_OK == _mali_profiling_get_event(index, &timestamp, &event_id, data))
+ {
+ seq_printf(seq_file, "%llu %u %u %u %u %u %u\n", timestamp, event_id, data[0], data[1], data[2], data[3], data[4]);
+ return 0;
+ }
+
+ return 0;
+}
+
+static const struct seq_operations profiling_events_seq_ops = {
+ .start = profiling_events_start,
+ .next = profiling_events_next,
+ .stop = profiling_events_stop,
+ .show = profiling_events_show
+};
+
+static int profiling_events_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &profiling_events_seq_ops);
+}
+
+static const struct file_operations profiling_events_fops = {
+ .owner = THIS_MODULE,
+ .open = profiling_events_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static ssize_t profiling_proc_default_enable_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%u\n", _mali_profiling_get_default_enable_state() ? 1 : 0);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t profiling_proc_default_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ {
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ {
+ return -EFAULT;
+ }
+
+ buf[cnt] = 0;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret < 0)
+ {
+ return ret;
+ }
+
+ _mali_profiling_set_default_enable_state(val != 0 ? MALI_TRUE : MALI_FALSE);
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static const struct file_operations profiling_proc_default_enable_fops = {
+ .owner = THIS_MODULE,
+ .read = profiling_proc_default_enable_read,
+ .write = profiling_proc_default_enable_write,
+};
+#endif
+
+static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 mem = _mali_ukk_report_memory_usage();
+
+ r = snprintf(buf, 64, "%u\n", mem);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .read = memory_used_read,
+};
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ int err = 0;
+ struct device * mdev;
+
+ device->mali_class = class_create(THIS_MODULE, mali_dev_name);
+ if (IS_ERR(device->mali_class))
+ {
+ err = PTR_ERR(device->mali_class);
+ goto init_class_err;
+ }
+ mdev = device_create(device->mali_class, NULL, dev, NULL, mali_dev_name);
+ if (IS_ERR(mdev))
+ {
+ err = PTR_ERR(mdev);
+ goto init_mdev_err;
+ }
+
+ mali_debugfs_dir = debugfs_create_dir(mali_dev_name, NULL);
+ if(ERR_PTR(-ENODEV) == mali_debugfs_dir)
+ {
+ /* Debugfs not supported. */
+ mali_debugfs_dir = NULL;
+ }
+ else
+ {
+ if(NULL != mali_debugfs_dir)
+ {
+ /* Debugfs directory created successfully; create files now */
+#if MALI_TIMELINE_PROFILING_ENABLED
+ struct dentry *mali_profiling_dir = debugfs_create_dir("profiling", mali_debugfs_dir);
+ if (mali_profiling_dir != NULL)
+ {
+ struct dentry *mali_profiling_proc_dir = debugfs_create_dir("proc", mali_profiling_dir);
+ if (mali_profiling_proc_dir != NULL)
+ {
+ struct dentry *mali_profiling_proc_default_dir = debugfs_create_dir("default", mali_profiling_proc_dir);
+ if (mali_profiling_proc_default_dir != NULL)
+ {
+ debugfs_create_file("enable", 0600, mali_profiling_proc_default_dir, NULL, &profiling_proc_default_enable_fops);
+ }
+ }
+ debugfs_create_file("record", 0600, mali_profiling_dir, NULL, &profiling_record_fops);
+ debugfs_create_file("events", 0400, mali_profiling_dir, NULL, &profiling_events_fops);
+ }
+#endif
+
+#if MALI_STATE_TRACKING
+ debugfs_create_file("state_dump", 0400, mali_debugfs_dir, NULL, &mali_seq_internal_state_fops);
+#endif
+
+ debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+ }
+ }
+
+ /* Success! */
+ return 0;
+
+ /* Error handling */
+init_mdev_err:
+ class_destroy(device->mali_class);
+init_class_err:
+
+ return err;
+}
+
+int mali_sysfs_unregister(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ if(NULL != mali_debugfs_dir)
+ {
+ debugfs_remove_recursive(mali_debugfs_dir);
+ }
+ device_destroy(device->mali_class, dev);
+ class_destroy(device->mali_class);
+
+ return 0;
+}
+
+#else
+
+/* Dummy implementations for when the sysfs API isn't available. */
+
+int mali_sysfs_register(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ return 0;
+}
+
+int mali_sysfs_unregister(struct mali_dev *device, dev_t dev, const char *mali_dev_name)
+{
+ return 0;
+}
+
+
+#endif
diff --git a/drivers/media/video/samsung/mali/linux/mali_kernel_sysfs.h b/drivers/media/video/samsung/mali/linux/mali_kernel_sysfs.h
new file mode 100644
index 0000000..f68b4e1
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_kernel_sysfs.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_SYSFS_H__
+#define __MALI_KERNEL_SYSFS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_PROC_DIR "driver/mali"
+
+int mali_sysfs_register(struct mali_dev *mali_class, dev_t dev, const char *mali_dev_name);
+
+int mali_sysfs_unregister(struct mali_dev *mali_class, dev_t dev, const char *mali_dev_name);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_KERNEL_LINUX_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_linux_pm.h b/drivers/media/video/samsung/mali/linux/mali_linux_pm.h
new file mode 100644
index 0000000..a8c0c52
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_linux_pm.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_LINUX_PM_H__
+#define __MALI_LINUX_PM_H__
+
+#if USING_MALI_PMM
+
+#ifdef CONFIG_PM
+/* Number of power states supported for making power up and down */
+typedef enum
+{
+ _MALI_DEVICE_SUSPEND, /* Suspend */
+ _MALI_DEVICE_RESUME, /* Resume */
+ _MALI_DEVICE_MAX_POWER_STATES, /* Maximum power states */
+ _MALI_DEVICE_SHUTDOWN, /* Power off states*/
+} _mali_device_power_states;
+
+/* Number of DVFS events */
+typedef enum
+{
+ _MALI_DVFS_PAUSE_EVENT = _MALI_DEVICE_MAX_POWER_STATES, /* DVFS Pause event */
+ _MALI_DVFS_RESUME_EVENT, /* DVFS Resume event */
+ _MALI_MAX_DEBUG_OPERATIONS,
+} _mali_device_dvfs_events;
+
+extern _mali_device_power_states mali_device_state;
+extern _mali_device_power_states mali_dvfs_device_state;
+extern _mali_osk_lock_t *lock;
+extern short is_wake_up_needed;
+extern int timeout_fired;
+extern struct platform_device mali_gpu_device;
+
+/* dvfs pm thread */
+extern struct task_struct *dvfs_pm_thread;
+
+/* Power management thread */
+extern struct task_struct *pm_thread;
+
+int mali_device_suspend(u32 event_id, struct task_struct **pwr_mgmt_thread);
+int mali_device_resume(u32 event_id, struct task_struct **pwr_mgmt_thread);
+int mali_get_ospmm_thread_state(void);
+
+#endif /* CONFIG_PM */
+#endif /* USING_MALI_PMM */
+#endif /* __MALI_LINUX_PM_H___ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_linux_pm_testsuite.h b/drivers/media/video/samsung/mali/linux/mali_linux_pm_testsuite.h
new file mode 100644
index 0000000..7b1bff9
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_linux_pm_testsuite.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __MALI_LINUX_PM_TESTSUITE_H__
+#define __MALI_LINUX_PM_TESTSUITE_H__
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+#ifdef CONFIG_PM
+
+typedef enum
+{
+ _MALI_DEVICE_PMM_TIMEOUT_EVENT,
+ _MALI_DEVICE_PMM_JOB_SCHEDULING_EVENTS,
+ _MALI_DEVICE_PMM_REGISTERED_CORES,
+ _MALI_DEVICE_MAX_PMM_EVENTS
+
+} _mali_device_pmm_recording_events;
+
+extern unsigned int mali_timeout_event_recording_on;
+extern unsigned int mali_job_scheduling_events_recording_on;
+extern unsigned int pwr_mgmt_status_reg;
+extern unsigned int is_mali_pmm_testsuite_enabled;
+extern unsigned int is_mali_pmu_present;
+
+#endif /* CONFIG_PM */
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+#endif /* USING_MALI_PMM */
+#endif /* __MALI_LINUX_PM_TESTSUITE_H__ */
+
+
diff --git a/drivers/media/video/samsung/mali/linux/mali_linux_trace.h b/drivers/media/video/samsung/mali/linux/mali_linux_trace.h
new file mode 100644
index 0000000..3ce1e50
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_linux_trace.h
@@ -0,0 +1,93 @@
+#if !defined(_TRACE_MALI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MALI_H
+
+#include <linux/stringify.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mali
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE mali_linux_trace
+
+/**
+ * mali_timeline_event - called from the central collection point (_mali_profiling_add_event)
+ * @event_id: ORed together bitfields representing a type of event
+ * In the future we might add
+ * @timestamp
+ * @data[5] - this currently includes thread and process id's - we should have EGLConfig or similar too
+ *
+ * Just make a record of the event_id, we'll decode it elsewhere
+ */
+TRACE_EVENT(mali_timeline_event,
+
+ TP_PROTO(unsigned int event_id),
+
+ TP_ARGS(event_id),
+
+ TP_STRUCT__entry(
+ __field( int, event_id )
+ ),
+
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+
+ TP_printk("event=%d", __entry->event_id)
+);
+
+/**
+ * mali_hw_counter - called from the ????
+ * @event_id: event being counted
+ * In the future we might add
+ * @timestamp ??
+ *
+ * Just make a record of the event_id and value
+ */
+TRACE_EVENT(mali_hw_counter,
+
+ TP_PROTO(unsigned int event_id, unsigned int value),
+
+ TP_ARGS(event_id, value),
+
+ TP_STRUCT__entry(
+ __field( int, event_id )
+ __field( int, value )
+ ),
+
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+
+ TP_printk("event %d = %d", __entry->event_id, __entry->value)
+);
+
+/**
+ * mali_sw_counter
+ * @event_id: counter id
+ */
+TRACE_EVENT(mali_sw_counter,
+
+ TP_PROTO(unsigned int event_id, signed long long value),
+
+ TP_ARGS(event_id, value),
+
+ TP_STRUCT__entry(
+ __field( int, event_id )
+ __field( long long, value )
+ ),
+
+ TP_fast_assign(
+ __entry->event_id = event_id;
+ ),
+
+ TP_printk("event %d = %lld", __entry->event_id, __entry->value)
+);
+
+#endif /* _TRACE_MALI_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef linux
+#define TRACE_INCLUDE_PATH .
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_atomics.c b/drivers/media/video/samsung/mali/linux/mali_osk_atomics.c
new file mode 100644
index 0000000..05831c5
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_atomics.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_atomics.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <asm/atomic.h>
+#include "mali_kernel_common.h"
+
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom )
+{
+ atomic_dec((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom )
+{
+ atomic_inc((atomic_t *)&atom->u.val);
+}
+
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
+
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val )
+{
+ MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ atomic_set((atomic_t *)&atom->u.val, val);
+ return _MALI_OSK_ERR_OK;
+}
+
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_read((atomic_t *)&atom->u.val);
+}
+
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom )
+{
+ MALI_IGNORE(atom);
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_indir_mmap.c b/drivers/media/video/samsung/mali/linux/mali_osk_indir_mmap.c
new file mode 100644
index 0000000..7297218
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_indir_mmap.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * @file mali_osk_specific.c
+ * Implementation of per-OS Kernel level specifics
+ */
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args )
+{
+ /* args->ctx ignored here; args->ukk_private required instead */
+ /* we need to lock the mmap semaphore before calling the do_mmap function */
+ down_write(&current->mm->mmap_sem);
+
+ args->mapping = (void __user *)do_mmap(
+ (struct file *)args->ukk_private,
+ 0, /* start mapping from any address after NULL */
+ args->size,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED,
+ args->phys_addr
+ );
+
+ /* and unlock it after the call */
+ up_write(&current->mm->mmap_sem);
+
+ /* No cookie required here */
+ args->cookie = 0;
+ /* uku_private meaningless, so zero */
+ args->uku_private = NULL;
+
+ if ( (NULL == args->mapping) || IS_ERR((void *)args->mapping) )
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Success */
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args )
+{
+ /* args->ctx and args->cookie ignored here */
+
+ if ((NULL != current) && (NULL != current->mm))
+ {
+ /* remove mapping of mali memory from the process' view */
+ /* lock mmap semaphore before call */
+ /* lock mmap_sem before calling do_munmap */
+ down_write(&current->mm->mmap_sem);
+ do_munmap(
+ current->mm,
+ (unsigned long)args->mapping,
+ args->size
+ );
+ /* and unlock after call */
+ up_write(&current->mm->mmap_sem);
+ MALI_DEBUG_PRINT(5, ("unmapped\n"));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Freeing of a big block while no user process attached, assuming crash cleanup in progress\n"));
+ }
+
+ return _MALI_OSK_ERR_OK; /* always succeeds */
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_indir_mmap.h b/drivers/media/video/samsung/mali/linux/mali_osk_indir_mmap.h
new file mode 100644
index 0000000..41cb462
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_indir_mmap.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_INDIR_MMAP_H__
+#define __MALI_OSK_INDIR_MMAP_H__
+
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Linux specific means for calling _mali_ukk_mem_mmap/munmap
+ *
+ * The presence of _MALI_OSK_SPECIFIC_INDIRECT_MMAP indicates that
+ * _mali_osk_specific_indirect_mmap and _mali_osk_specific_indirect_munmap
+ * should be used instead of _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * The arguments are the same as _mali_ukk_mem_mmap/_mali_ukk_mem_munmap.
+ *
+ * In ALL operating system other than Linux, it is expected that common code
+ * should be able to call _mali_ukk_mem_mmap/_mali_ukk_mem_munmap directly.
+ * Such systems should NOT define _MALI_OSK_SPECIFIC_INDIRECT_MMAP.
+ */
+_mali_osk_errcode_t _mali_osk_specific_indirect_mmap( _mali_uk_mem_mmap_s *args );
+_mali_osk_errcode_t _mali_osk_specific_indirect_munmap( _mali_uk_mem_munmap_s *args );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_INDIR_MMAP_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_irq.c b/drivers/media/video/samsung/mali/linux/mali_osk_irq.c
new file mode 100644
index 0000000..c597b9e
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_irq.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_irq.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/slab.h> /* For memory allocation */
+#include <linux/workqueue.h>
+
+#include "mali_osk.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "linux/interrupt.h"
+
+typedef struct _mali_osk_irq_t_struct
+{
+ u32 irqnum;
+ void *data;
+ _mali_osk_irq_uhandler_t uhandler;
+ _mali_osk_irq_bhandler_t bhandler;
+ struct work_struct work_queue_irq_handle; /* Workqueue for the bottom half of the IRQ-handling. This job is activated when this core gets an IRQ.*/
+} mali_osk_irq_object_t;
+
+static struct workqueue_struct *mali_irq_wq=NULL;
+
+typedef void (*workqueue_func_t)(void *);
+typedef irqreturn_t (*irq_handler_func_t)(int, void *, struct pt_regs *);
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ); /* , struct pt_regs *regs*/
+
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work );
+#else
+static void irq_handler_bottom_half ( void * input );
+#endif
+
+/**
+ * Linux kernel version has marked SA_SHIRQ as deprecated, IRQF_SHARED should be used.
+ * This is to handle older kernels which haven't done this swap.
+ */
+#ifndef IRQF_SHARED
+#define IRQF_SHARED SA_SHIRQ
+#endif /* IRQF_SHARED */
+
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description )
+{
+ mali_osk_irq_object_t *irq_object;
+
+ irq_object = kmalloc(sizeof(mali_osk_irq_object_t), GFP_KERNEL);
+ if (NULL == irq_object) return NULL;
+
+ /* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+ /* New syntax: INIT_WORK( struct work_struct *work, void (*function)(struct work_struct *)) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half);
+#else
+ /* Old syntax: INIT_WORK( struct work_struct *work, void (*function)(void *), void *data) */
+ INIT_WORK( &irq_object->work_queue_irq_handle, irq_handler_bottom_half, irq_object);
+#endif /* defined(INIT_DELAYED_WORK) */
+
+ if (-1 == irqnum)
+ {
+ /* Probe for IRQ */
+ if ( (NULL != trigger_func) && (NULL != ack_func) )
+ {
+ unsigned long probe_count = 3;
+ _mali_osk_errcode_t err;
+ int irq;
+
+ MALI_DEBUG_PRINT(2, ("Probing for irq\n"));
+
+ do
+ {
+ unsigned long mask;
+
+ mask = probe_irq_on();
+ trigger_func(data);
+
+ _mali_osk_time_ubusydelay(5);
+
+ irq = probe_irq_off(mask);
+ err = ack_func(data);
+ }
+ while (irq < 0 && (err == _MALI_OSK_ERR_OK) && probe_count--);
+
+ if (irq < 0 || (_MALI_OSK_ERR_OK != err)) irqnum = -1;
+ else irqnum = irq;
+ }
+ else irqnum = -1; /* no probe functions, fault */
+
+ if (-1 != irqnum)
+ {
+ /* found an irq */
+ MALI_DEBUG_PRINT(2, ("Found irq %d\n", irqnum));
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(2, ("Probe for irq failed\n"));
+ }
+ }
+
+ irq_object->irqnum = irqnum;
+ irq_object->uhandler = uhandler;
+ irq_object->bhandler = bhandler;
+ irq_object->data = data;
+
+ /* Is this a real IRQ handler we need? */
+ if (!mali_benchmark && irqnum != _MALI_OSK_IRQ_NUMBER_FAKE && irqnum != _MALI_OSK_IRQ_NUMBER_PMM)
+ {
+ if (-1 == irqnum)
+ {
+ MALI_DEBUG_PRINT(2, ("No IRQ for core '%s' found during probe\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+
+ if (0 != request_irq(irqnum, irq_handler_upper_half, IRQF_SHARED, description, irq_object))
+ {
+ MALI_DEBUG_PRINT(2, ("Unable to install IRQ handler for core '%s'\n", description));
+ kfree(irq_object);
+ return NULL;
+ }
+ }
+
+ if (mali_irq_wq == NULL)
+ {
+ mali_irq_wq = create_singlethread_workqueue("mali-pmm-wq");
+ }
+
+ return irq_object;
+}
+
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+ queue_work_on(0, mali_irq_wq,&irq_object->work_queue_irq_handle);
+}
+
+void _mali_osk_flush_workqueue( _mali_osk_irq_t *irq )
+{
+ flush_workqueue(mali_irq_wq );
+}
+
+void _mali_osk_irq_term( _mali_osk_irq_t *irq )
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)irq;
+
+ if(mali_irq_wq != NULL)
+ {
+ flush_workqueue(mali_irq_wq);
+ destroy_workqueue(mali_irq_wq);
+ mali_irq_wq = NULL;
+ }
+
+ if (!mali_benchmark)
+ {
+ free_irq(irq_object->irqnum, irq_object);
+ }
+ kfree(irq_object);
+ flush_scheduled_work();
+}
+
+
+/** This function is called directly in interrupt context from the OS just after
+ * the CPU get the hw-irq from mali, or other devices on the same IRQ-channel.
+ * It is registered one of these function for each mali core. When an interrupt
+ * arrives this function will be called equal times as registered mali cores.
+ * That means that we only check one mali core in one function call, and the
+ * core we check for each turn is given by the \a dev_id variable.
+ * If we detect an pending interrupt on the given core, we mask the interrupt
+ * out by settging the core's IRQ_MASK register to zero.
+ * Then we schedule the mali_core_irq_handler_bottom_half to run as high priority
+ * work queue job.
+ */
+static irqreturn_t irq_handler_upper_half (int port_name, void* dev_id ) /* , struct pt_regs *regs*/
+{
+ mali_osk_irq_object_t *irq_object = (mali_osk_irq_object_t *)dev_id;
+
+ if (irq_object->uhandler(irq_object->data) == _MALI_OSK_ERR_OK)
+ {
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/* Is executed when an interrupt occur on one core */
+/* workqueue API changed in 2.6.20, support both versions: */
+#if defined(INIT_DELAYED_WORK)
+static void irq_handler_bottom_half ( struct work_struct *work )
+#else
+static void irq_handler_bottom_half ( void * input )
+#endif
+{
+ mali_osk_irq_object_t *irq_object;
+
+#if defined(INIT_DELAYED_WORK)
+ irq_object = _MALI_OSK_CONTAINER_OF(work, mali_osk_irq_object_t, work_queue_irq_handle);
+#else
+ if ( NULL == input )
+ {
+ MALI_PRINT_ERROR(("IRQ: Null pointer! Illegal!"));
+ return; /* Error */
+ }
+ irq_object = (mali_osk_irq_object_t *) input;
+#endif
+
+ irq_object->bhandler(irq_object->data);
+}
+
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_locks.c b/drivers/media/video/samsung/mali/linux/mali_osk_locks.c
new file mode 100644
index 0000000..aad6fc6
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_locks.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_locks.c
+ * Implemenation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* These are all the locks we implement: */
+typedef enum
+{
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN, /* Mutex, implicitly non-interruptable, use spin_lock/spin_unlock */
+ _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ, /* Mutex, IRQ version of spinlock, use spin_lock_irqsave/spin_unlock_irqrestore */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX, /* Interruptable, use up()/down_interruptable() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT, /* Non-Interruptable, use up()/down() */
+ _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW, /* Non-interruptable, Reader/Writer, use {up,down}{read,write}() */
+
+ /* Linux supports, but we do not support:
+ * Non-Interruptable Reader/Writer spinlock mutexes - RW optimization will be switched off
+ */
+
+ /* Linux does not support:
+ * One-locks, of any sort - no optimization for this fact will be made.
+ */
+
+} _mali_osk_internal_locktype;
+
+struct _mali_osk_lock_t_struct
+{
+ _mali_osk_internal_locktype type;
+ unsigned long flags;
+ union
+ {
+ spinlock_t spinlock;
+ struct semaphore sema;
+ struct rw_semaphore rw_sema;
+ } obj;
+ MALI_DEBUG_CODE(
+ /** original flags for debug checking */
+ _mali_osk_lock_flags_t orig_flags;
+ ); /* MALI_DEBUG_CODE */
+};
+
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order )
+{
+ _mali_osk_lock_t *lock = NULL;
+
+ /* Validate parameters: */
+ /* Flags acceptable */
+ MALI_DEBUG_ASSERT( 0 == ( flags & ~(_MALI_OSK_LOCKFLAG_SPINLOCK
+ | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+ | _MALI_OSK_LOCKFLAG_READERWRITER
+ | _MALI_OSK_LOCKFLAG_ORDERED
+ | _MALI_OSK_LOCKFLAG_ONELOCK )) );
+ /* Spinlocks are always non-interruptable */
+ MALI_DEBUG_ASSERT( (((flags & _MALI_OSK_LOCKFLAG_SPINLOCK) || (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ)) && (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE))
+ || !(flags & _MALI_OSK_LOCKFLAG_SPINLOCK));
+ /* Parameter initial SBZ - for future expansion */
+ MALI_DEBUG_ASSERT( 0 == initial );
+
+ lock = kmalloc(sizeof(_mali_osk_lock_t), GFP_KERNEL);
+
+ if ( NULL == lock )
+ {
+ return lock;
+ }
+
+ /* Determine type of mutex: */
+ /* defaults to interruptable mutex if no flags are specified */
+
+ if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK) )
+ {
+ /* Non-interruptable Spinlocks override all others */
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ ) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ;
+ lock->flags = 0;
+ spin_lock_init( &lock->obj.spinlock );
+ }
+ else if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE)
+ && (flags & _MALI_OSK_LOCKFLAG_READERWRITER) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW;
+ init_rwsem( &lock->obj.rw_sema );
+ }
+ else
+ {
+ /* Usual mutex types */
+ if ( (flags & _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE) )
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT;
+ }
+ else
+ {
+ lock->type = _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX;
+ }
+
+ /* Initially unlocked */
+ sema_init( &lock->obj.sema, 1 );
+ }
+
+ MALI_DEBUG_CODE(
+ /* Debug tracking of flags */
+ lock->orig_flags = flags;
+ ); /* MALI_DEBUG_CODE */
+
+ return lock;
+}
+
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_lock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_lock_irqsave(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ if ( down_interruptible(&lock->obj.sema) )
+ {
+ err = _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ down(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ down_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ down_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+
+ return err;
+}
+
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || _MALI_OSK_LOCKMODE_RO == mode );
+
+ /* Only allow RO locks when the initial object was a Reader/Writer lock
+ * Since information is lost on the internal locktype, we use the original
+ * information, which is only stored when built for DEBUG */
+ MALI_DEBUG_ASSERT( _MALI_OSK_LOCKMODE_RW == mode
+ || (_MALI_OSK_LOCKMODE_RO == mode && (_MALI_OSK_LOCKFLAG_READERWRITER & lock->orig_flags)) );
+
+ switch ( lock->type )
+ {
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN:
+ spin_unlock(&lock->obj.spinlock);
+ break;
+ case _MALI_OSK_INTERNAL_LOCKTYPE_SPIN_IRQ:
+ spin_unlock_irqrestore(&lock->obj.spinlock, lock->flags);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX:
+ /* FALLTHROUGH */
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT:
+ up(&lock->obj.sema);
+ break;
+
+ case _MALI_OSK_INTERNAL_LOCKTYPE_MUTEX_NONINT_RW:
+ if (mode == _MALI_OSK_LOCKMODE_RO)
+ {
+ up_read(&lock->obj.rw_sema);
+ }
+ else
+ {
+ up_write(&lock->obj.rw_sema);
+ }
+ break;
+
+ default:
+ /* Reaching here indicates a programming error, so you will not get here
+ * on non-DEBUG builds */
+ MALI_DEBUG_PRINT_ERROR( ("Invalid internal lock type: %.8X", lock->type ) );
+ break;
+ }
+}
+
+void _mali_osk_lock_term( _mali_osk_lock_t *lock )
+{
+ /* Parameter validation */
+ MALI_DEBUG_ASSERT_POINTER( lock );
+
+ /* Linux requires no explicit termination of spinlocks, semaphores, or rw_semaphores */
+ kfree(lock);
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_low_level_mem.c b/drivers/media/video/samsung/mali/linux/mali_osk_low_level_mem.c
new file mode 100644
index 0000000..c0aecb8
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_low_level_mem.c
@@ -0,0 +1,599 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_low_level_mem.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include "mali_osk.h"
+#include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
+#include "mali_kernel_common.h"
+#include "mali_kernel_linux.h"
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+
+typedef struct mali_vma_usage_tracker
+{
+ int references;
+ u32 cookie;
+} mali_vma_usage_tracker;
+
+
+/* Linked list structure to hold details of all OS allocations in a particular
+ * mapping
+ */
+struct AllocationList
+{
+ struct AllocationList *next;
+ u32 offset;
+ u32 physaddr;
+};
+
+typedef struct AllocationList AllocationList;
+
+/* Private structure to store details of a mapping region returned
+ * from _mali_osk_mem_mapregion_init
+ */
+struct MappingInfo
+{
+ struct vm_area_struct *vma;
+ struct AllocationList *list;
+};
+
+typedef struct MappingInfo MappingInfo;
+
+
+static u32 _kernel_page_allocate(void);
+static void _kernel_page_release(u32 physical_address);
+static AllocationList * _allocation_list_item_get(void);
+static void _allocation_list_item_release(AllocationList * item);
+
+
+/* Variable declarations */
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
+spinlock_t allocation_list_spinlock = SPIN_LOCK_UNLOCKED;
+#else
+DEFINE_SPINLOCK(allocation_list_spinlock);
+#endif
+
+static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
+static int pre_allocated_memory_size_current = 0;
+#ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
+ static int pre_allocated_memory_size_max = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
+#else
+ static int pre_allocated_memory_size_max = 6 * 1024 * 1024; /* 6 MiB */
+#endif
+
+static struct vm_operations_struct mali_kernel_vm_ops =
+{
+ .open = mali_kernel_memory_vma_open,
+ .close = mali_kernel_memory_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = mali_kernel_memory_cpu_page_fault_handler
+#else
+ .nopfn = mali_kernel_memory_cpu_page_fault_handler
+#endif
+};
+
+
+void mali_osk_low_level_mem_init(void)
+{
+ pre_allocated_memory = (AllocationList*) NULL ;
+}
+
+void mali_osk_low_level_mem_term(void)
+{
+ while ( NULL != pre_allocated_memory )
+ {
+ AllocationList *item;
+ item = pre_allocated_memory;
+ pre_allocated_memory = item->next;
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+ }
+ pre_allocated_memory_size_current = 0;
+}
+
+static u32 _kernel_page_allocate(void)
+{
+ struct page *new_page;
+ u32 linux_phys_addr;
+
+ new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
+
+ if ( NULL == new_page )
+ {
+ return 0;
+ }
+
+ /* Ensure page is flushed from CPU caches. */
+ linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ return linux_phys_addr;
+}
+
+static void _kernel_page_release(u32 physical_address)
+{
+ struct page *unmap_page;
+
+ #if 1
+ dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ #endif
+
+ unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
+ MALI_DEBUG_ASSERT_POINTER( unmap_page );
+ __free_page( unmap_page );
+}
+
+static AllocationList * _allocation_list_item_get(void)
+{
+ AllocationList *item = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory )
+ {
+ item = pre_allocated_memory;
+ pre_allocated_memory = pre_allocated_memory->next;
+ pre_allocated_memory_size_current -= PAGE_SIZE;
+
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return item;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ item = _mali_osk_malloc( sizeof(AllocationList) );
+ if ( NULL == item)
+ {
+ return NULL;
+ }
+
+ item->physaddr = _kernel_page_allocate();
+ if ( 0 == item->physaddr )
+ {
+ /* Non-fatal error condition, out of memory. Upper levels will handle this. */
+ _mali_osk_free( item );
+ return NULL;
+ }
+ return item;
+}
+
+static void _allocation_list_item_release(AllocationList * item)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&allocation_list_spinlock,flags);
+ if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
+ {
+ item->next = pre_allocated_memory;
+ pre_allocated_memory = item;
+ pre_allocated_memory_size_current += PAGE_SIZE;
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+ return;
+ }
+ spin_unlock_irqrestore(&allocation_list_spinlock,flags);
+
+ _kernel_page_release(item->physaddr);
+ _mali_osk_free( item );
+}
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ /*
+ * We always fail the call since all memory is pre-faulted when assigned to the process.
+ * Only the Mali cores can use page faults to extend buffers.
+ */
+
+ MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
+ MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
+{
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+ vma_usage_tracker->references++;
+
+ return;
+}
+
+static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
+{
+ _mali_uk_mem_munmap_s args = {0, };
+ mali_memory_allocation * descriptor;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+ MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
+
+ vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
+
+ BUG_ON(!vma_usage_tracker);
+ BUG_ON(0 == vma_usage_tracker->references);
+
+ vma_usage_tracker->references--;
+
+ descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+ mappingInfo->vma = vma;
+
+ if (0 != vma_usage_tracker->references)
+ {
+ MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
+ return;
+ }
+
+ /** @note args->context unused, initialized to 0.
+ * Instead, we use the memory_session from the cookie */
+
+ args.cookie = (u32)descriptor;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ _mali_ukk_mem_munmap( &args );
+
+ /* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
+ * In the case of the memory engine, it is called as the release function that has been registered with the engine*/
+}
+
+
+void _mali_osk_mem_barrier( void )
+{
+ mb();
+}
+
+void _mali_osk_write_mem_barrier( void )
+{
+ wmb();
+}
+
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
+{
+ return (mali_io_address)ioremap_nocache(phys, size);
+}
+
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ iounmap((void*)virt);
+}
+
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
+{
+ void * virt;
+ MALI_DEBUG_ASSERT_POINTER( phys );
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+ MALI_DEBUG_ASSERT( 0 != size );
+
+ /* dma_alloc_* uses a limited region of address space. On most arch/marchs
+ * 2 to 14 MiB is available. This should be enough for the page tables, which
+ * currently is the only user of this function. */
+ virt = dma_alloc_coherent(NULL, size, phys, GFP_KERNEL | GFP_DMA );
+
+ MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(5, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return (mali_io_address)virt;
+}
+
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
+{
+ MALI_DEBUG_ASSERT_POINTER( (void*)virt );
+ MALI_DEBUG_ASSERT( 0 != size );
+ MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
+
+ dma_free_coherent(NULL, size, virt, phys);
+}
+
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
+{
+ return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
+}
+
+void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
+{
+ release_mem_region(phys, size);
+}
+
+void inline _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ __raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
+}
+
+u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
+{
+ return ioread32(((u8*)addr) + offset);
+}
+
+void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
+{
+ iowrite32(val, ((u8*)addr) + offset);
+}
+
+void _mali_osk_cache_flushall( void )
+{
+ /** @note Cached memory is not currently supported in this implementation */
+}
+
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
+{
+ _mali_osk_write_mem_barrier();
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct *vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ /* Re-write the process_addr_mapping_info */
+ mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
+
+ if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
+
+ if (NULL == vma_usage_tracker)
+ {
+ MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
+ _mali_osk_free( mappingInfo );
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo->vma = vma;
+ descriptor->process_addr_mapping_info = mappingInfo;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+ /* list member is already NULL */
+
+ /*
+ set some bits which indicate that:
+ The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
+ The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
+ */
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
+
+ vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
+ vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
+
+ vma->vm_private_data = vma_usage_tracker;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ mali_vma_usage_tracker * vma_usage_tracker;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = mappingInfo->vma;
+
+ MALI_DEBUG_ASSERT_POINTER( vma );
+
+ /* ASSERT that there are no allocations on the list. Unmap should've been
+ * called on all OS allocations. */
+ MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ _mali_osk_free(vma_usage_tracker);
+
+ _mali_osk_free( mappingInfo );
+ return;
+}
+
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
+{
+ struct vm_area_struct *vma;
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT_POINTER( phys_addr );
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
+
+ if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
+ *phys_addr, size, descriptor->mapping, offset));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ vma = mappingInfo->vma;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
+
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
+ {
+ _mali_osk_errcode_t ret;
+ AllocationList *alloc_item;
+ u32 linux_phys_frame_num;
+
+ alloc_item = _allocation_list_item_get();
+ if (NULL == alloc_item)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate list item\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
+
+ ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+ if ( ret != _MALI_OSK_ERR_OK)
+ {
+ _allocation_list_item_release(alloc_item);
+ return ret;
+ }
+
+ /* Put our alloc_item into the list of allocations on success */
+ alloc_item->next = mappingInfo->list;
+ alloc_item->offset = offset;
+
+ /*alloc_item->physaddr = linux_phys_addr;*/
+ mappingInfo->list = alloc_item;
+
+ /* Write out new physical address on success */
+ *phys_addr = alloc_item->physaddr;
+
+ return ret;
+ }
+
+ /* Otherwise, Use the supplied physical address */
+
+ /* ASSERT that supplied phys_addr is page aligned */
+ MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
+
+}
+
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
+{
+ MappingInfo *mappingInfo;
+
+ if (NULL == descriptor) return;
+
+ MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
+
+ MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
+
+ if (NULL == descriptor->mapping) return;
+
+ if (size > (descriptor->size - offset))
+ {
+ MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
+ size, descriptor->mapping, offset));
+ return;
+ }
+ mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
+
+ MALI_DEBUG_ASSERT_POINTER( mappingInfo );
+
+ if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
+ {
+ /* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
+ * so needs to be unmapped
+ */
+ while (size)
+ {
+ /* First find the allocation in the list of allocations */
+ AllocationList *alloc = mappingInfo->list;
+ AllocationList **prev = &(mappingInfo->list);
+ while (NULL != alloc && alloc->offset != offset)
+ {
+ prev = &(alloc->next);
+ alloc = alloc->next;
+ }
+ if (alloc == NULL) {
+ MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ continue;
+ }
+
+ _kernel_page_release(alloc->physaddr);
+
+ /* Remove the allocation from the list */
+ *prev = alloc->next;
+ _mali_osk_free( alloc );
+
+ /* Move onto the next allocation */
+ size -= _MALI_OSK_CPU_PAGE_SIZE;
+ offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+ }
+
+ /* Linux does the right thing as part of munmap to remove the mapping */
+
+ return;
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_mali.c b/drivers/media/video/samsung/mali/linux/mali_osk_mali.c
new file mode 100644
index 0000000..ab571c1
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_mali.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.c
+ * Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+
+#include "mali_kernel_common.h" /* MALI_xxx macros */
+#include "mali_osk.h" /* kernel side OS functions */
+#include "mali_uk_types.h"
+#include "mali_kernel_linux.h" /* exports initialize/terminate_kernel_device() definition of mali_osk_low_level_mem_init() and term */
+#include <mach/irqs.h>
+#include "arch/config.h" /* contains the configuration of the arch we are compiling for */
+
+/* is called from mali_kernel_constructor in common code */
+_mali_osk_errcode_t _mali_osk_init( void )
+{
+ if (0 != initialize_kernel_device()) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ mali_osk_low_level_mem_init();
+
+ MALI_SUCCESS;
+}
+
+/* is called from mali_kernel_deconstructor in common code */
+void _mali_osk_term( void )
+{
+ mali_osk_low_level_mem_term();
+ terminate_kernel_device();
+}
+
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources )
+{
+ *num_resources = sizeof(arch_configuration) / sizeof(arch_configuration[0]);
+ *arch_config = arch_configuration;
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources )
+{
+ /* Nothing to do */
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_math.c b/drivers/media/video/samsung/mali/linux/mali_osk_math.c
new file mode 100644
index 0000000..3e62e51
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_math.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_math.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/bitops.h>
+
+u32 inline _mali_osk_clz( u32 input )
+{
+ return 32-fls(input);
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_memory.c b/drivers/media/video/samsung/mali/linux/mali_osk_memory.c
new file mode 100644
index 0000000..871505a
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_memory.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+void inline *_mali_osk_calloc( u32 n, u32 size )
+{
+ return kcalloc(n, size, GFP_KERNEL);
+}
+
+void inline *_mali_osk_malloc( u32 size )
+{
+ return kmalloc(size, GFP_KERNEL);
+}
+
+void inline _mali_osk_free( void *ptr )
+{
+ kfree(ptr);
+}
+
+void inline *_mali_osk_valloc( u32 size )
+{
+ return vmalloc(size);
+}
+
+void inline _mali_osk_vfree( void *ptr )
+{
+ vfree(ptr);
+}
+
+void inline *_mali_osk_memcpy( void *dst, const void *src, u32 len )
+{
+ return memcpy(dst, src, len);
+}
+
+void inline *_mali_osk_memset( void *s, u32 c, u32 n )
+{
+ return memset(s, c, n);
+}
+
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated )
+{
+ /* No need to prevent an out-of-memory dialogue appearing on Linux,
+ * so we always return MALI_TRUE.
+ */
+ return MALI_TRUE;
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_misc.c b/drivers/media/video/samsung/mali/linux/mali_osk_misc.c
new file mode 100644
index 0000000..e37e8c0
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_misc.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_misc.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include "mali_osk.h"
+
+void _mali_osk_dbgmsg( const char *fmt, ... )
+{
+ va_list args;
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+}
+
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... )
+{
+ int res;
+ va_list args;
+ va_start(args, fmt);
+
+ res = vsnprintf(buf, (size_t)size, fmt, args);
+
+ va_end(args);
+ return res;
+}
+
+void _mali_osk_abort(void)
+{
+ /* make a simple fault by dereferencing a NULL pointer */
+ *(int *)0 = 0;
+}
+
+void _mali_osk_break(void)
+{
+ _mali_osk_abort();
+}
+
+u32 _mali_osk_get_pid(void)
+{
+ /* Thread group ID is the process ID on Linux */
+ return (u32)current->tgid;
+}
+
+u32 _mali_osk_get_tid(void)
+{
+ /* pid is actually identifying the thread on Linux */
+ return (u32)current->pid;
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_notification.c b/drivers/media/video/samsung/mali/linux/mali_osk_notification.c
new file mode 100644
index 0000000..74a18e8
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_notification.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_notification.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+/**
+ * Declaration of the notification queue object type
+ * Contains a linked list of notification pending delivery to user space.
+ * It also contains a wait queue of exclusive waiters blocked in the ioctl
+ * When a new notification is posted a single thread is resumed.
+ */
+struct _mali_osk_notification_queue_t_struct
+{
+ struct semaphore mutex; /**< Mutex protecting the list */
+ wait_queue_head_t receive_queue; /**< Threads waiting for new entries to the queue */
+ struct list_head head; /**< List of notifications waiting to be picked up */
+};
+
+typedef struct _mali_osk_notification_wrapper_t_struct
+{
+ struct list_head list; /**< Internal linked list variable */
+ _mali_osk_notification_t data; /**< Notification data */
+} _mali_osk_notification_wrapper_t;
+
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void )
+{
+ _mali_osk_notification_queue_t * result;
+
+ result = (_mali_osk_notification_queue_t *)kmalloc(sizeof(_mali_osk_notification_queue_t), GFP_KERNEL);
+ if (NULL == result) return NULL;
+
+ sema_init(&result->mutex, 1);
+ init_waitqueue_head(&result->receive_queue);
+ INIT_LIST_HEAD(&result->head);
+
+ return result;
+}
+
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size )
+{
+ /* OPT Recycling of notification objects */
+ _mali_osk_notification_wrapper_t *notification;
+
+ notification = (_mali_osk_notification_wrapper_t *)kmalloc( sizeof(_mali_osk_notification_wrapper_t) + size, GFP_KERNEL );
+ if (NULL == notification)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create a notification object\n"));
+ return NULL;
+ }
+
+ /* Init the list */
+ INIT_LIST_HEAD(&notification->list);
+
+ if (0 != size)
+ {
+ notification->data.result_buffer = ((u8*)notification) + sizeof(_mali_osk_notification_wrapper_t);
+ }
+ else
+ {
+ notification->data.result_buffer = NULL;
+ }
+
+ /* set up the non-allocating fields */
+ notification->data.magic_code = 0x31415926;
+ notification->data.notification_type = type;
+ notification->data.result_buffer_size = size;
+
+ /* all ok */
+ return &(notification->data);
+}
+
+void _mali_osk_notification_delete( _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* Remove from the list */
+ list_del(&notification->list);
+ /* Free the container */
+ kfree(notification);
+}
+
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue )
+{
+ MALI_DEBUG_ASSERT_POINTER( queue );
+
+ /* not much to do, just free the memory */
+ kfree(queue);
+}
+
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object )
+{
+ _mali_osk_notification_wrapper_t *notification;
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( object );
+
+ notification = container_of( object, _mali_osk_notification_wrapper_t, data );
+
+ /* lock queue access */
+ down(&queue->mutex);
+ /* add to list */
+ list_add_tail(&notification->list, &queue->head);
+ /* unlock the queue */
+ up(&queue->mutex);
+
+ /* and wake up one possible exclusive waiter */
+ wake_up(&queue->receive_queue);
+}
+
+static int _mali_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ int ret;
+
+ down(&queue->mutex);
+ ret = list_empty(&queue->head);
+ up(&queue->mutex);
+ return ret;
+}
+
+#if MALI_STATE_TRACKING
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue )
+{
+ return _mali_notification_queue_is_empty(queue) ? MALI_TRUE : MALI_FALSE;
+}
+#endif
+
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ _mali_osk_notification_wrapper_t *wrapper_object;
+
+ down(&queue->mutex);
+
+ if (!list_empty(&queue->head))
+ {
+ wrapper_object = list_entry(queue->head.next, _mali_osk_notification_wrapper_t, list);
+ *result = &(wrapper_object->data);
+ list_del_init(&wrapper_object->list);
+
+ if (wrapper_object->data.magic_code != 0x31415926) {
+ MALI_PRINT(("SEC WARNING : list entry magic_code not match : %x\n", wrapper_object->data.magic_code));
+ MALI_PRINT(("SEC WARNING : list entry notification type : %x\n", wrapper_object->data.notification_type));
+ MALI_PRINT(("SEC WARNING : list entry result buffer size : %x\n", wrapper_object->data.result_buffer_size));
+ MALI_PRINT(("SEC WARNING : list entry result buffer : %x\n", wrapper_object->data.result_buffer));
+ } else {
+ ret = _MALI_OSK_ERR_OK;
+ }
+ }
+
+ up(&queue->mutex);
+
+ return ret;
+}
+
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result )
+{
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER( queue );
+ MALI_DEBUG_ASSERT_POINTER( result );
+
+ /* default result */
+ *result = NULL;
+
+ while (_MALI_OSK_ERR_OK != _mali_osk_notification_queue_dequeue(queue, result))
+ {
+ if (wait_event_interruptible(queue->receive_queue, !_mali_notification_queue_is_empty(queue)))
+ {
+ return _MALI_OSK_ERR_RESTARTSYSCALL;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK; /* all ok */
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_pm.c b/drivers/media/video/samsung/mali/linux/mali_osk_pm.c
new file mode 100644
index 0000000..2438cbc
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_pm.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_pm.c
+ * Implementation of the callback functions from common power management
+ */
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif /* CONFIG_PM_RUNTIME */
+
+#include <linux/platform_device.h>
+
+#include "mali_platform.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_pmm.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_license.h"
+#include "mali_linux_pm.h"
+#include "mali_linux_pm_testsuite.h"
+
+#if MALI_LICENSE_IS_GPL
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#ifdef CONFIG_PM_RUNTIME
+static int is_runtime =0;
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* MALI_LICENSE_IS_GPL */
+
+#if MALI_POWER_MGMT_TEST_SUITE
+
+#ifdef CONFIG_PM
+unsigned int mali_pmm_events_triggered_mask = 0;
+#endif /* CONFIG_PM */
+
+void _mali_osk_pmm_policy_events_notifications(mali_pmm_event_id mali_pmm_event)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+
+ switch (mali_pmm_event)
+ {
+ case MALI_PMM_EVENT_JOB_QUEUED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<0);
+ }
+ break;
+
+ case MALI_PMM_EVENT_JOB_SCHEDULED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<1);
+ }
+ break;
+
+ case MALI_PMM_EVENT_JOB_FINISHED:
+ if (mali_job_scheduling_events_recording_on == 1)
+ {
+ mali_pmm_events_triggered_mask |= (1<<2);
+ mali_job_scheduling_events_recording_on = 0;
+ pwr_mgmt_status_reg = mali_pmm_events_triggered_mask;
+ }
+ break;
+
+ case MALI_PMM_EVENT_TIMEOUT:
+ if (mali_timeout_event_recording_on == 1)
+ {
+ pwr_mgmt_status_reg = (1<<3);
+ mali_timeout_event_recording_on = 0;
+ }
+ break;
+
+ default:
+
+ break;
+
+ }
+#endif /* CONFIG_PM */
+
+#endif /* MALI_LICENSE_IS_GPL */
+}
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+
+/** This function is called when the Mali device has completed power up
+ * operation.
+ */
+void _mali_osk_pmm_power_up_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+ wake_up_process(pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI OSK Power up Done\n" ));
+ return;
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+/** This function is called when the Mali device has completed power down
+ * operation.
+ */
+void _mali_osk_pmm_power_down_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+#if MALI_POWER_MGMT_TEST_SUITE
+ if (is_mali_pmu_present == 0)
+ {
+ pwr_mgmt_status_reg = _mali_pmm_cores_list();
+ }
+#endif /* MALI_POWER_MGMT_TEST_SUITE */
+ wake_up_process(pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI Power down Done\n" ));
+ return;
+
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+/** This function is invoked when mali device is idle.
+*/
+_mali_osk_errcode_t _mali_osk_pmm_dev_idle(void)
+{
+ _mali_osk_errcode_t err = 0;
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+
+ err = pm_runtime_put_sync(&(mali_gpu_device.dev));
+ if(err)
+ {
+ MALI_DEBUG_PRINT(4, ("OSPMM: Error in _mali_osk_pmm_dev_idle\n" ));
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_LICENSE_IS_GPL */
+ return err;
+}
+
+/** This funtion is invoked when mali device needs to be activated.
+*/
+int _mali_osk_pmm_dev_activate(void)
+{
+
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM_RUNTIME
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ int err = 0;
+ if(is_runtime == 0)
+ {
+ pm_suspend_ignore_children(&(mali_gpu_device.dev), true);
+ pm_runtime_enable(&(mali_gpu_device.dev));
+ err = pm_runtime_get_sync(&(mali_gpu_device.dev));
+ is_runtime = 1;
+ }
+ else
+ {
+ err = pm_runtime_get_sync(&(mali_gpu_device.dev));
+ }
+ if(err < 0)
+ {
+ MALI_PRINT(("OSPMM: Error in _mali_osk_pmm_dev_activate, err : %d\n",err ));
+ }
+#endif /* MALI_PMM_RUNTIME_JOB_CONTROL_ON */
+#endif /* CONFIG_PM_RUNTIME */
+#endif /* MALI_LICENSE_IS_GPL */
+
+ return err;
+}
+
+void _mali_osk_pmm_ospmm_cleanup( void )
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ int thread_state;
+ thread_state = mali_get_ospmm_thread_state();
+ if (thread_state)
+ {
+ _mali_osk_pmm_dvfs_operation_done(0);
+ }
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+void _mali_osk_pmm_dvfs_operation_done(mali_pmm_message_data data)
+{
+#if MALI_LICENSE_IS_GPL
+#ifdef CONFIG_PM
+ is_wake_up_needed = 1;
+ wake_up_process(dvfs_pm_thread);
+ MALI_DEBUG_PRINT(4, ("OSPMM: MALI OSK DVFS Operation done\n" ));
+ return;
+#endif /* CONFIG_PM */
+#endif /* MALI_LICENSE_IS_GPL */
+}
+
+
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_profiling.c b/drivers/media/video/samsung/mali/linux/mali_osk_profiling.c
new file mode 100644
index 0000000..98d3937
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_profiling.c
@@ -0,0 +1,47 @@
+#include <linux/module.h>
+#include "mali_linux_trace.h"
+#include "mali_osk.h"
+
+/* The Linux trace point for hardware activity (idle vs running) */
+void _mali_osk_profiling_add_event(u32 event_id, u32 data0)
+{
+ trace_mali_timeline_event(event_id);
+}
+
+/* The Linux trace point for hardware counters */
+void _mali_osk_profiling_add_counter(u32 event_id, u32 data0)
+{
+ trace_mali_hw_counter(event_id, data0);
+}
+
+/* This table stores the event to be counted by each counter
+ * 0xFFFFFFFF is a special value which means disable counter
+ */
+//TODO at the moment this table is indexed by the magic numbers
+//listed in gator_events_mali.c. In future these numbers should
+//be shared through the mali_linux_trace.h header
+u32 counter_table[17] = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,
+ 0xFFFFFFFF};
+
+/* Called by gator.ko to populate the table above */
+int _mali_osk_counter_event(u32 counter, u32 event)
+{
+ /* Remember what has been set, and that a change has occured
+ * When a job actually starts the code will program the registers
+ */
+ //TODO as above these magic numbers need to be moved to a header file
+ if( counter >=5 && counter < 17 ) {
+ counter_table[counter] = event;
+
+ return 1;
+ } else {
+ printk("mali rjc: counter out of range (%d,%d)\n", counter, event);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(_mali_osk_counter_event);
+
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_specific.h b/drivers/media/video/samsung/mali/linux/mali_osk_specific.h
new file mode 100644
index 0000000..6aacf17
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_specific.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_specific.h
+ * Defines per-OS Kernel level specifics, such as unusual workarounds for
+ * certain OSs.
+ */
+
+#ifndef __MALI_OSK_SPECIFIC_H__
+#define __MALI_OSK_SPECIFIC_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define MALI_STATIC_INLINE static inline
+#define MALI_NON_STATIC_INLINE inline
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_SPECIFIC_H__ */
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_time.c b/drivers/media/video/samsung/mali/linux/mali_osk_time.c
new file mode 100644
index 0000000..da9b865
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_time.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_time.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include "mali_osk.h"
+#include <linux/jiffies.h>
+#include <linux/time.h>
+#include <asm/delay.h>
+
+int _mali_osk_time_after( u32 ticka, u32 tickb )
+{
+ return time_after((unsigned long)ticka, (unsigned long)tickb);
+}
+
+u32 _mali_osk_time_mstoticks( u32 ms )
+{
+ return msecs_to_jiffies(ms);
+}
+
+u32 _mali_osk_time_tickstoms( u32 ticks )
+{
+ return jiffies_to_msecs(ticks);
+}
+
+u32 _mali_osk_time_tickcount( void )
+{
+ return jiffies;
+}
+
+void _mali_osk_time_ubusydelay( u32 usecs )
+{
+ udelay(usecs);
+}
+
+u64 _mali_osk_time_get_ns( void )
+{
+ struct timespec tsval;
+ getnstimeofday(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_osk_timers.c b/drivers/media/video/samsung/mali/linux/mali_osk_timers.c
new file mode 100644
index 0000000..0454756
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_osk_timers.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_timers.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+struct _mali_osk_timer_t_struct
+{
+ struct timer_list timer;
+};
+
+typedef void (*timer_timeout_function_t)(unsigned long);
+
+_mali_osk_timer_t *_mali_osk_timer_init(void)
+{
+ _mali_osk_timer_t *t = (_mali_osk_timer_t*)kmalloc(sizeof(_mali_osk_timer_t), GFP_KERNEL);
+ if (NULL != t) init_timer(&t->timer);
+ return t;
+}
+
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.expires = _mali_osk_time_tickcount() + ticks_to_expire;
+ add_timer(&(tim->timer));
+}
+
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick)
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ mod_timer(&(tim->timer), expiry_tick);
+}
+
+void _mali_osk_timer_del( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ del_timer_sync(&(tim->timer));
+}
+
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ tim->timer.data = (unsigned long)data;
+ tim->timer.function = (timer_timeout_function_t)callback;
+}
+
+void _mali_osk_timer_term( _mali_osk_timer_t *tim )
+{
+ MALI_DEBUG_ASSERT_POINTER(tim);
+ kfree(tim);
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_ukk_core.c b/drivers/media/video/samsung/mali/linux/mali_ukk_core.c
new file mode 100644
index 0000000..59eafe2
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_ukk_core.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <linux/slab.h> /* memort allocation functions */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs)
+{
+ _mali_uk_get_api_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_api_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ if (0 != put_user(kargs.compatible, &uargs->compatible)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs)
+{
+ _mali_uk_get_system_info_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_system_info_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs)
+{
+ _mali_uk_get_system_info_s kargs;
+ _mali_osk_errcode_t err;
+ _mali_system_info *system_info_user;
+ _mali_system_info *system_info_kernel;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.system_info, &uargs->system_info)) return -EFAULT;
+ if (0 != get_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ /* A temporary kernel buffer for the system_info datastructure is passed through the system_info
+ * member. The ukk_private member will point to the user space destination of this buffer so
+ * that _mali_ukk_get_system_info() can correct the pointers in the system_info correctly
+ * for user space.
+ */
+ system_info_kernel = kmalloc(kargs.size, GFP_KERNEL);
+ if (NULL == system_info_kernel) return -EFAULT;
+
+ system_info_user = kargs.system_info;
+ kargs.system_info = system_info_kernel;
+ kargs.ukk_private = (u32)system_info_user;
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_get_system_info(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ kfree(system_info_kernel);
+ return map_errcode(err);
+ }
+
+ if (0 != copy_to_user(system_info_user, system_info_kernel, kargs.size))
+ {
+ kfree(system_info_kernel);
+ return -EFAULT;
+ }
+
+ kfree(system_info_kernel);
+ return 0;
+}
+
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
+{
+ _mali_uk_wait_for_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_wait_for_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if(_MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS != kargs.type)
+ {
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_wait_for_notification_s))) return -EFAULT;
+ }
+ else
+ {
+ if (0 != put_user(kargs.type, &uargs->type)) return -EFAULT;
+ }
+
+ return 0;
+}
+
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs)
+{
+ _mali_uk_post_notification_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ if (0 != get_user(kargs.type, &uargs->type))
+ {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_post_notification(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_ukk_gp.c b/drivers/media/video/samsung/mali/linux/mali_ukk_gp.c
new file mode 100644
index 0000000..58ff1de
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_ukk_gp.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs)
+{
+ _mali_uk_gp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_gp_start_job_s)))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int gp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_abort_job_s __user *uargs)
+{
+ _mali_uk_gp_abort_job_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_abort_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_gp_abort_job(&kargs);
+
+ return 0;
+}
+
+
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs)
+{
+ _mali_uk_get_gp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
+
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs)
+{
+ _mali_uk_gp_suspend_response_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_gp_suspend_response_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_gp_suspend_response(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.cookie, &uargs->cookie)) return -EFAULT;
+
+ /* no known transactions to roll-back */
+ return 0;
+}
+
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_gp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_gp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ /* no known transactions to roll-back */
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_ukk_mem.c b/drivers/media/video/samsung/mali/linux/mali_ukk_mem.c
new file mode 100644
index 0000000..0b98e41
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_ukk_mem.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs)
+{
+ _mali_uk_init_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_init_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.mali_address_base, &uargs->mali_address_base)) goto mem_init_rollback;
+ if (0 != put_user(kargs.memory_size, &uargs->memory_size)) goto mem_init_rollback;
+
+ return 0;
+
+mem_init_rollback:
+ {
+ _mali_uk_term_mem_s kargs;
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_init_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+}
+
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs)
+{
+ _mali_uk_term_mem_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_term_mem(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument)
+{
+ _mali_uk_map_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_map_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_map_external_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_unmap_external_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_unmap_external_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument)
+{
+ _mali_uk_unmap_external_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_unmap_external_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_unmap_external_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument)
+{
+ _mali_uk_release_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_release_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_release_ump_mem( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
+
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument)
+{
+ _mali_uk_attach_ump_mem_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_attach_ump_mem_s)) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_attach_ump_mem( &uk_args );
+
+ if (0 != put_user(uk_args.cookie, &argument->cookie))
+ {
+ if (_MALI_OSK_ERR_OK == err_code)
+ {
+ /* Rollback */
+ _mali_uk_release_ump_mem_s uk_args_unmap;
+
+ uk_args_unmap.ctx = session_data;
+ uk_args_unmap.cookie = uk_args.cookie;
+ err_code = _mali_ukk_release_ump_mem( &uk_args_unmap );
+ if (_MALI_OSK_ERR_OK != err_code)
+ {
+ MALI_DEBUG_PRINT(4, ("reverting _mali_ukk_attach_mem, as a result of failing put_user(), failed\n"));
+ }
+ }
+ return -EFAULT;
+ }
+
+ /* Return the error that _mali_ukk_map_external_ump_mem produced */
+ return map_errcode(err_code);
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs)
+{
+ _mali_uk_query_mmu_page_table_dump_size_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_query_mmu_page_table_dump_size(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.size, &uargs->size)) return -EFAULT;
+
+ return 0;
+}
+
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs)
+{
+ _mali_uk_dump_mmu_page_table_s kargs;
+ _mali_osk_errcode_t err;
+ void *buffer;
+ int rc = -EFAULT;
+
+ /* validate input */
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ /* the session_data pointer was validated by caller */
+
+ kargs.buffer = NULL;
+
+ /* get location of user buffer */
+ if (0 != get_user(buffer, &uargs->buffer)) goto err_exit;
+ /* get size of mmu page table info buffer from user space */
+ if ( 0 != get_user(kargs.size, &uargs->size) ) goto err_exit;
+ /* verify we can access the whole of the user buffer */
+ if (!access_ok(VERIFY_WRITE, buffer, kargs.size)) goto err_exit;
+
+ /* allocate temporary buffer (kernel side) to store mmu page table info */
+ kargs.buffer = _mali_osk_valloc(kargs.size);
+ if (NULL == kargs.buffer)
+ {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_dump_mmu_page_table(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ rc = map_errcode(err);
+ goto err_exit;
+ }
+
+ /* copy mmu page table info back to user space and update pointers */
+ if (0 != copy_to_user(uargs->buffer, kargs.buffer, kargs.size) ) goto err_exit;
+ if (0 != put_user((kargs.register_writes - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->register_writes)) goto err_exit;
+ if (0 != put_user((kargs.page_table_dump - (u32 *)kargs.buffer) + (u32 *)uargs->buffer, &uargs->page_table_dump)) goto err_exit;
+ if (0 != put_user(kargs.register_writes_size, &uargs->register_writes_size)) goto err_exit;
+ if (0 != put_user(kargs.page_table_dump_size, &uargs->page_table_dump_size)) goto err_exit;
+ rc = 0;
+
+err_exit:
+ if (kargs.buffer) _mali_osk_vfree(kargs.buffer);
+ return rc;
+}
+
+
+
+int mem_get_big_block_wrapper( struct file * filp, _mali_uk_get_big_block_s __user * argument )
+{
+ _mali_uk_get_big_block_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL);
+
+ /* get call arguments from user space. copy_from_user returns how many bytes which where NOT copied */
+ if ( 0 != copy_from_user(&uk_args, (void __user *)argument, sizeof(_mali_uk_get_big_block_s)) )
+ {
+ return -EFAULT;
+ }
+
+ /* This interface inserts something into the ukk_private word */
+ uk_args.ukk_private = (u32)filp;
+ uk_args.ctx = filp->private_data;
+ err_code = _mali_ukk_get_big_block( &uk_args );
+
+ /* Do not leak the private word back into user space */
+ uk_args.ukk_private = 0;
+
+ if ( _MALI_OSK_ERR_OK != err_code )
+ {
+ return map_errcode(err_code);
+ }
+
+ /* From this point on, we must roll-back any failing action to preserve the
+ * meaning of the U/K interface (e.g. when excluded) */
+
+ /* transfer response back to user space */
+ if ( 0 != copy_to_user(argument, &uk_args, sizeof(_mali_uk_get_big_block_s)) )
+ {
+ /* Roll-back - the _mali_uk_get_big_block call succeeded, so all
+ * values in uk_args will be correct */
+ _mali_uk_free_big_block_s uk_args_rollback = {0, };
+
+ uk_args_rollback.ctx = uk_args.ctx;
+ uk_args_rollback.cookie = uk_args.cookie;
+ err_code = _mali_ukk_free_big_block( &uk_args_rollback );
+
+ if ( _MALI_OSK_ERR_OK != err_code )
+ {
+ /* error in DEBUG and RELEASE */
+ MALI_PRINT_ERROR( ("Failed to rollback get_big_block: %.8X\n", (u32)err_code) );
+ }
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int mem_free_big_block_wrapper(struct mali_session_data *session_data, _mali_uk_free_big_block_s __user * argument)
+{
+ _mali_uk_free_big_block_s uk_args;
+ _mali_osk_errcode_t err_code;
+
+ /* validate input */
+ /* the session_data pointer was validated by caller */
+ MALI_CHECK_NON_NULL( argument, -EINVAL );
+
+ /* get call arguments from user space. get_user returns 0 on success */
+ if ( 0 != get_user(uk_args.cookie, &argument->cookie) )
+ {
+ return -EFAULT;
+ }
+
+ uk_args.ctx = session_data;
+ err_code = _mali_ukk_free_big_block( &uk_args );
+
+ /* Return the error that _mali_ukk_free_big_block produced */
+ return map_errcode(err_code);
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_ukk_pp.c b/drivers/media/video/samsung/mali/linux/mali_ukk_pp.c
new file mode 100644
index 0000000..31e2a6a
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_ukk_pp.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs)
+{
+ _mali_uk_pp_start_job_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (!access_ok(VERIFY_WRITE, uargs, sizeof(_mali_uk_pp_start_job_s)))
+ {
+ return -EFAULT;
+ }
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_start_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_pp_start_job(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.returned_user_job_ptr, &uargs->returned_user_job_ptr) ||
+ 0 != put_user(kargs.status, &uargs->status))
+ {
+ /*
+ * If this happens, then user space will not know that the job was actually started,
+ * and if we return a queued job, then user space will still think that one is still queued.
+ * This will typically lead to a deadlock in user space.
+ * This could however only happen if user space deliberately passes a user buffer which
+ * passes the access_ok(VERIFY_WRITE) check, but isn't fully writable at the time of copy_to_user().
+ * The official Mali driver will never attempt to do that, and kernel space should not be affected.
+ * That is why we do not bother to do a complex rollback in this very very very rare case.
+ */
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int pp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_abort_job_s __user *uargs)
+{
+ _mali_uk_pp_abort_job_s kargs;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_pp_abort_job_s))) return -EFAULT;
+
+ kargs.ctx = session_data;
+ _mali_ukk_pp_abort_job(&kargs);
+
+ return 0;
+}
+
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs)
+{
+ _mali_uk_get_pp_number_of_cores_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_number_of_cores(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.number_of_cores, &uargs->number_of_cores)) return -EFAULT;
+
+ return 0;
+}
+
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs)
+{
+ _mali_uk_get_pp_core_version_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ MALI_CHECK_NON_NULL(session_data, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_get_pp_core_version(&kargs);
+ if (_MALI_OSK_ERR_OK != err) return map_errcode(err);
+
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/mali/linux/mali_ukk_profiling.c b/drivers/media/video/samsung/mali/linux/mali_ukk_profiling.c
new file mode 100644
index 0000000..17366be
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_ukk_profiling.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs)
+{
+ _mali_uk_profiling_start_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_start_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_start(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.limit, &uargs->limit))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs)
+{
+ _mali_uk_profiling_add_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_profiling_add_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_add_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs)
+{
+ _mali_uk_profiling_stop_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_stop(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.count, &uargs->count))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs)
+{
+ _mali_uk_profiling_get_event_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != get_user(kargs.index, &uargs->index))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+
+ err = _mali_ukk_profiling_get_event(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ kargs.ctx = NULL; /* prevent kernel address to be returned to user space */
+ if (0 != copy_to_user(uargs, &kargs, sizeof(_mali_uk_profiling_get_event_s)))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs)
+{
+ _mali_uk_profiling_clear_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_clear(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
+int profiling_get_config_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_config_s __user *uargs)
+{
+ _mali_uk_profiling_get_config_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_profiling_get_config(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ if (0 != put_user(kargs.enable_events, &uargs->enable_events))
+ {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#if MALI_TRACEPOINTS_ENABLED
+int transfer_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_s __user *uargs)
+{
+ _mali_uk_sw_counters_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ kargs.ctx = session_data;
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_sw_counters_s)))
+ {
+ return -EFAULT;
+ }
+
+ err = _mali_ukk_transfer_sw_counters(&kargs);
+
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+#endif
+
diff --git a/drivers/media/video/samsung/mali/linux/mali_ukk_vsync.c b/drivers/media/video/samsung/mali/linux/mali_ukk_vsync.c
new file mode 100644
index 0000000..80a6afd
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_ukk_vsync.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/fs.h> /* file system operations */
+#include <asm/uaccess.h> /* user space access */
+
+#include "mali_ukk.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_session_manager.h"
+#include "mali_ukk_wrappers.h"
+
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs)
+{
+ _mali_uk_vsync_event_report_s kargs;
+ _mali_osk_errcode_t err;
+
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+
+ if (0 != copy_from_user(&kargs, uargs, sizeof(_mali_uk_vsync_event_report_s)))
+ {
+ return -EFAULT;
+ }
+
+ kargs.ctx = session_data;
+ err = _mali_ukk_vsync_event_report(&kargs);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ return map_errcode(err);
+ }
+
+ return 0;
+}
+
diff --git a/drivers/media/video/samsung/mali/linux/mali_ukk_wrappers.h b/drivers/media/video/samsung/mali/linux/mali_ukk_wrappers.h
new file mode 100644
index 0000000..184ce8d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/linux/mali_ukk_wrappers.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk_wrappers.h
+ * Defines the wrapper functions for each user-kernel function
+ */
+
+#ifndef __MALI_UKK_WRAPPERS_H__
+#define __MALI_UKK_WRAPPERS_H__
+
+#include "mali_uk_types.h"
+#include "mali_osk.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+int get_system_info_size_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_size_s __user *uargs);
+int get_system_info_wrapper(struct mali_session_data *session_data, _mali_uk_get_system_info_s __user *uargs);
+int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs);
+int get_api_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_s __user *uargs);
+int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
+int mem_init_wrapper(struct mali_session_data *session_data, _mali_uk_init_mem_s __user *uargs);
+int mem_term_wrapper(struct mali_session_data *session_data, _mali_uk_term_mem_s __user *uargs);
+int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user * argument);
+int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user * argument);
+int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user * uargs);
+int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user * uargs);
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+int mem_attach_ump_wrapper(struct mali_session_data *session_data, _mali_uk_attach_ump_mem_s __user * argument);
+int mem_release_ump_wrapper(struct mali_session_data *session_data, _mali_uk_release_ump_mem_s __user * argument);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+int mem_get_big_block_wrapper( struct file * filp, _mali_uk_get_big_block_s __user * argument );
+int mem_free_big_block_wrapper( struct mali_session_data *session_data, _mali_uk_free_big_block_s __user * argument);
+int pp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_start_job_s __user *uargs);
+int pp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_pp_abort_job_s __user *uargs);
+int pp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_number_of_cores_s __user *uargs);
+int pp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_pp_core_version_s __user *uargs);
+int gp_start_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_start_job_s __user *uargs);
+int gp_abort_job_wrapper(struct mali_session_data *session_data, _mali_uk_gp_abort_job_s __user *uargs);
+int gp_get_number_of_cores_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_number_of_cores_s __user *uargs);
+int gp_get_core_version_wrapper(struct mali_session_data *session_data, _mali_uk_get_gp_core_version_s __user *uargs);
+int gp_suspend_response_wrapper(struct mali_session_data *session_data, _mali_uk_gp_suspend_response_s __user *uargs);
+
+int profiling_start_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_start_s __user *uargs);
+int profiling_add_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_add_event_s __user *uargs);
+int profiling_stop_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_stop_s __user *uargs);
+int profiling_get_event_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_event_s __user *uargs);
+int profiling_clear_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_clear_s __user *uargs);
+int profiling_get_config_wrapper(struct mali_session_data *session_data, _mali_uk_profiling_get_config_s __user *uargs);
+
+int vsync_event_report_wrapper(struct mali_session_data *session_data, _mali_uk_vsync_event_report_s __user *uargs);
+
+#if MALI_TRACEPOINTS_ENABLED
+int transfer_sw_counters_wrapper(struct mali_session_data *session_data, _mali_uk_sw_counters_s __user *uargs);
+#endif
+
+int map_errcode( _mali_osk_errcode_t err );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_WRAPPERS_H__ */
diff --git a/drivers/media/video/samsung/mali/platform/default/mali_platform.c b/drivers/media/video/samsung/mali/platform/default/mali_platform.c
new file mode 100644
index 0000000..44f877e
--- /dev/null
+++ b/drivers/media/video/samsung/mali/platform/default/mali_platform.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+
+_mali_osk_errcode_t mali_platform_init(void)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit(void)
+{
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode)
+{
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+}
+
+void set_mali_parent_power_domain(void* dev)
+{
+}
diff --git a/drivers/media/video/samsung/mali/platform/mali_platform.h b/drivers/media/video/samsung/mali/platform/mali_platform.h
new file mode 100644
index 0000000..70cfa14
--- /dev/null
+++ b/drivers/media/video/samsung/mali/platform/mali_platform.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#ifndef __MALI_PLATFORM_H__
+#define __MALI_PLATFORM_H__
+
+#include "mali_osk.h"
+
+#ifdef CONFIG_CPU_EXYNOS4210
+#define MALI_DVFS_STEPS 2
+#else
+#define MALI_DVFS_STEPS 4
+#endif
+
+#if !USING_MALI_PMM
+/* @brief System power up/down cores that can be passed into mali_platform_powerdown/up() */
+#define MALI_PLATFORM_SYSTEM 0
+#endif
+
+/* @Enable or Disable Mali GPU Bottom Lock feature */
+#define MALI_GPU_BOTTOM_LOCK 1
+
+#define MALI_VOLTAGE_LOCK 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @brief description of power change reasons
+ */
+typedef enum mali_power_mode_tag
+{
+ MALI_POWER_MODE_ON,
+ MALI_POWER_MODE_LIGHT_SLEEP,
+ MALI_POWER_MODE_DEEP_SLEEP,
+} mali_power_mode;
+
+/** @brief Platform specific setup and initialisation of MALI
+ *
+ * This is called from the entrypoint of the driver to initialize the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_init(void);
+
+/** @brief Platform specific deinitialisation of MALI
+ *
+ * This is called on the exit of the driver to terminate the platform
+ *
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_deinit(void);
+
+/** @brief Platform specific powerdown sequence of MALI
+ *
+ * Call as part of platform init if there is no PMM support, else the
+ * PMM will call it.
+ * There are three power modes defined:
+ * 1) MALI_POWER_MODE_ON
+ * 2) MALI_POWER_MODE_LIGHT_SLEEP
+ * 3) MALI_POWER_MODE_DEEP_SLEEP
+ * MALI power management module transitions to MALI_POWER_MODE_LIGHT_SLEEP mode when MALI is idle
+ * for idle timer (software timer defined in mali_pmm_policy_jobcontrol.h) duration, MALI transitions
+ * to MALI_POWER_MODE_LIGHT_SLEEP mode during timeout if there are no more jobs queued.
+ * MALI power management module transitions to MALI_POWER_MODE_DEEP_SLEEP mode when OS does system power
+ * off.
+ * Customer has to add power down code when MALI transitions to MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP
+ * mode.
+ * MALI_POWER_MODE_ON mode is entered when the MALI is to powered up. Some customers want to control voltage regulators during
+ * the whole system powers on/off. Customer can track in this function whether the MALI is powered up from
+ * MALI_POWER_MODE_LIGHT_SLEEP or MALI_POWER_MODE_DEEP_SLEEP mode and manage the voltage regulators as well.
+ * @param power_mode defines the power modes
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode);
+
+
+/** @brief Platform specific handling of GPU utilization data
+ *
+ * When GPU utilization data is enabled, this function will be
+ * periodically called.
+ *
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+void mali_gpu_utilization_handler(u32 utilization);
+
+/** @brief Setting the power domain of MALI
+ *
+ * This function sets the power domain of MALI if Linux run time power management is enabled
+ *
+ * @param dev Reference to struct platform_device (defined in linux) used by MALI GPU
+ */
+//void set_mali_parent_power_domain(void* dev);
+void mali_utilization_suspend(void);
+
+#ifdef CONFIG_REGULATOR
+int mali_regulator_get_usecount(void);
+void mali_regulator_disable(void);
+void mali_regulator_enable(void);
+void mali_regulator_set_voltage(int min_uV, int max_uV);
+#endif
+mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz);
+unsigned long mali_clk_get_rate(void);
+void mali_clk_put(mali_bool binc_mali_clk);
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores);
+_mali_osk_errcode_t mali_platform_powerup(u32 cores);
+#endif
+
+
+#if USING_MALI_PMM
+#if MALI_POWER_MGMT_TEST_SUITE
+/** @brief function to get status of individual cores
+ *
+ * This function is used by power management test suite to get the status of powered up/down the number
+ * of cores
+ * @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
+ */
+u32 pmu_get_power_up_down_info(void);
+#endif
+#endif
+
+#if MALI_DVFS_ENABLED
+mali_bool init_mali_dvfs_status(int step);
+void deinit_mali_dvfs_status(void);
+mali_bool mali_dvfs_handler(u32 utilization);
+int mali_dvfs_is_running(void);
+void mali_dvfs_late_resume(void);
+int get_mali_dvfs_control_status(void);
+mali_bool set_mali_dvfs_current_step(unsigned int step);
+void mali_default_step_set(int step, mali_bool boostup);
+int change_dvfs_tableset(int change_clk, int change_step);
+#ifdef CONFIG_CPU_EXYNOS4210
+#if MALI_GPU_BOTTOM_LOCK
+int mali_dvfs_bottom_lock_push(void);
+int mali_dvfs_bottom_lock_pop(void);
+#endif
+#else
+int mali_dvfs_bottom_lock_push(int lock_step);
+int mali_dvfs_bottom_lock_pop(void);
+#endif
+#endif
+
+#if MALI_VOLTAGE_LOCK
+int mali_voltage_lock_push(int lock_vol);
+int mali_voltage_lock_pop(void);
+int mali_voltage_lock_init(void);
+int mali_vol_get_from_table(int vol);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/media/video/samsung/mali/platform/orion-m400/mali_platform.c b/drivers/media/video/samsung/mali/platform/orion-m400/mali_platform.c
new file mode 100644
index 0000000..0fc4503
--- /dev/null
+++ b/drivers/media/video/samsung/mali/platform/orion-m400/mali_platform.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include <linux/version.h>
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#include <plat/pd.h>
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+
+#include <asm/io.h>
+#include <mach/regs-pmu.h>
+
+#define EXTXTALCLK_NAME "ext_xtal"
+#define VPLLSRCCLK_NAME "vpll_src"
+#define FOUTVPLLCLK_NAME "fout_vpll"
+#define SCLVPLLCLK_NAME "sclk_vpll"
+#define GPUMOUT1CLK_NAME "mout_g3d1"
+
+#define MPLLCLK_NAME "mout_mpll"
+#define GPUMOUT0CLK_NAME "mout_g3d0"
+#define GPUCLK_NAME "sclk_g3d"
+#define CLK_DIV_STAT_G3D 0x1003C62C
+#define CLK_DESC "clk-divider-status"
+
+static struct clk *ext_xtal_clock = 0;
+static struct clk *vpll_src_clock = 0;
+static struct clk *fout_vpll_clock = 0;
+static struct clk *sclk_vpll_clock = 0;
+
+static struct clk *mpll_clock = 0;
+static struct clk *mali_parent_clock = 0;
+static struct clk *mali_clock = 0;
+
+int mali_gpu_clk = 160;
+static unsigned int GPU_MHZ = 1000000;
+#ifdef CONFIG_S5PV310_ASV
+int mali_gpu_vol = 1100000; /* 1.10V for ASV */
+#else
+int mali_gpu_vol = 1100000; /* 1.10V */
+#endif
+
+#if MALI_DVFS_ENABLED
+#define MALI_DVFS_DEFAULT_STEP 0 // 134Mhz default
+#endif
+
+int gpu_power_state;
+static int bPoweroff;
+
+#ifdef CONFIG_REGULATOR
+struct regulator {
+ struct device *dev;
+ struct list_head list;
+ int uA_load;
+ int min_uV;
+ int max_uV;
+ char *supply_name;
+ struct device_attribute dev_attr;
+ struct regulator_dev *rdev;
+};
+
+struct regulator *g3d_regulator=NULL;
+#endif
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
+extern struct platform_device s5pv310_device_pd[];
+#else
+extern struct platform_device exynos4_device_pd[];
+#endif
+#endif
+
+mali_io_address clk_register_map=0;
+
+#if MALI_GPU_BOTTOM_LOCK
+_mali_osk_lock_t *mali_dvfs_lock;
+#else
+static _mali_osk_lock_t *mali_dvfs_lock;
+#endif
+
+#ifdef CONFIG_REGULATOR
+int mali_regulator_get_usecount(void)
+{
+ struct regulator_dev *rdev;
+
+ if( IS_ERR_OR_NULL(g3d_regulator) )
+ {
+ MALI_DEBUG_PRINT(1, ("error on mali_regulator_get_usecount : g3d_regulator is null\n"));
+ return 0;
+ }
+ rdev = g3d_regulator->rdev;
+ return rdev->use_count;
+}
+
+void mali_regulator_disable(void)
+{
+ bPoweroff = 1;
+ if( IS_ERR_OR_NULL(g3d_regulator) )
+ {
+ MALI_DEBUG_PRINT(1, ("error on mali_regulator_disable : g3d_regulator is null\n"));
+ return;
+ }
+ regulator_disable(g3d_regulator);
+ MALI_DEBUG_PRINT(1, ("regulator_disable -> use cnt: %d \n",mali_regulator_get_usecount()));
+}
+
+void mali_regulator_enable(void)
+{
+ bPoweroff = 0;
+ if( IS_ERR_OR_NULL(g3d_regulator) )
+ {
+ MALI_DEBUG_PRINT(1, ("error on mali_regulator_enable : g3d_regulator is null\n"));
+ return;
+ }
+ regulator_enable(g3d_regulator);
+ MALI_DEBUG_PRINT(1, ("regulator_enable -> use cnt: %d \n",mali_regulator_get_usecount()));
+}
+
+void mali_regulator_set_voltage(int min_uV, int max_uV)
+{
+ int voltage;
+
+ _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if( IS_ERR_OR_NULL(g3d_regulator) )
+ {
+ MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
+ return;
+ }
+ MALI_DEBUG_PRINT(2, ("= regulator_set_voltage: %d, %d \n",min_uV, max_uV));
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_VOLTS,
+ min_uV, max_uV, 0, 0, 0);
+#endif
+
+ regulator_set_voltage(g3d_regulator,min_uV,max_uV);
+ voltage = regulator_get_voltage(g3d_regulator);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_VOLTS,
+ voltage, 0, 1, 0, 0);
+#endif
+ mali_gpu_vol = voltage;
+ MALI_DEBUG_PRINT(1, ("= regulator_get_voltage: %d \n",mali_gpu_vol));
+
+ _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+}
+#endif
+
+unsigned long mali_clk_get_rate(void)
+{
+ return clk_get_rate(mali_clock);
+}
+
+mali_bool mali_clk_get(mali_bool bis_vpll)
+{
+ if (bis_vpll == MALI_TRUE)
+ {
+ if (ext_xtal_clock == NULL)
+ {
+ ext_xtal_clock = clk_get(NULL,EXTXTALCLK_NAME);
+ if (IS_ERR(ext_xtal_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source ext_xtal_clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (vpll_src_clock == NULL)
+ {
+ vpll_src_clock = clk_get(NULL,VPLLSRCCLK_NAME);
+ if (IS_ERR(vpll_src_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source vpll_src_clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (fout_vpll_clock == NULL)
+ {
+ fout_vpll_clock = clk_get(NULL,FOUTVPLLCLK_NAME);
+ if (IS_ERR(fout_vpll_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source fout_vpll_clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (sclk_vpll_clock == NULL)
+ {
+ sclk_vpll_clock = clk_get(NULL,SCLVPLLCLK_NAME);
+ if (IS_ERR(sclk_vpll_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source sclk_vpll_clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (mali_parent_clock == NULL)
+ {
+ mali_parent_clock = clk_get(NULL, GPUMOUT1CLK_NAME);
+
+ if (IS_ERR(mali_parent_clock)) {
+ MALI_PRINT( ( "MALI Error : failed to get source mali parent clock\n"));
+ return MALI_FALSE;
+ }
+ }
+ }
+ else // mpll
+ {
+ if (mpll_clock == NULL)
+ {
+ mpll_clock = clk_get(NULL,MPLLCLK_NAME);
+
+ if (IS_ERR(mpll_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source mpll clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (mali_parent_clock == NULL)
+ {
+ mali_parent_clock = clk_get(NULL, GPUMOUT0CLK_NAME);
+
+ if (IS_ERR(mali_parent_clock)) {
+ MALI_PRINT( ( "MALI Error : failed to get source mali parent clock\n"));
+ return MALI_FALSE;
+ }
+ }
+ }
+
+ // mali clock get always.
+ if (mali_clock == NULL)
+ {
+ mali_clock = clk_get(NULL, GPUCLK_NAME);
+
+ if (IS_ERR(mali_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source mali clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+
+void mali_clk_put(mali_bool binc_mali_clock)
+{
+ if (mali_parent_clock)
+ {
+ clk_put(mali_parent_clock);
+ mali_parent_clock = 0;
+ }
+
+ if (mpll_clock)
+ {
+ clk_put(mpll_clock);
+ mpll_clock = 0;
+ }
+
+ if (sclk_vpll_clock)
+ {
+ clk_put(sclk_vpll_clock);
+ sclk_vpll_clock = 0;
+ }
+
+ if (fout_vpll_clock)
+ {
+ clk_put(fout_vpll_clock);
+ fout_vpll_clock = 0;
+ }
+
+ if (vpll_src_clock)
+ {
+ clk_put(vpll_src_clock);
+ vpll_src_clock = 0;
+ }
+
+ if (ext_xtal_clock)
+ {
+ clk_put(ext_xtal_clock);
+ ext_xtal_clock = 0;
+ }
+
+ if (binc_mali_clock == MALI_TRUE && mali_clock)
+ {
+ clk_put(mali_clock);
+ mali_clock = 0;
+ }
+
+}
+
+
+mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz)
+{
+ unsigned long rate = 0;
+ mali_bool bis_vpll = MALI_FALSE;
+
+#ifdef CONFIG_VPLL_USE_FOR_TVENC
+ bis_vpll = MALI_TRUE;
+#endif
+
+ _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (mali_clk_get(bis_vpll) == MALI_FALSE)
+ return MALI_FALSE;
+
+ rate = (unsigned long)clk * (unsigned long)mhz;
+ MALI_DEBUG_PRINT(3,("= clk_set_rate : %d , %d \n",clk, mhz ));
+
+ if (bis_vpll)
+ {
+ clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
+ clk_set_parent(vpll_src_clock, ext_xtal_clock);
+ clk_set_parent(sclk_vpll_clock, fout_vpll_clock);
+
+ clk_set_parent(mali_parent_clock, sclk_vpll_clock);
+ clk_set_parent(mali_clock, mali_parent_clock);
+ }
+ else
+ {
+ clk_set_parent(mali_parent_clock, mpll_clock);
+ clk_set_parent(mali_clock, mali_parent_clock);
+ }
+
+ if (clk_enable(mali_clock) < 0)
+ return MALI_FALSE;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_FREQ,
+ rate, 0, 0, 0, 0);
+#endif
+
+ clk_set_rate(mali_clock, rate);
+ rate = clk_get_rate(mali_clock);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_FREQ,
+ rate, 0, 0, 0, 0);
+#endif
+
+ if (bis_vpll)
+ mali_gpu_clk = (int)(rate / mhz);
+ else
+ mali_gpu_clk = (int)((rate + 500000) / mhz);
+
+ GPU_MHZ = mhz;
+ MALI_DEBUG_PRINT(3,("= clk_get_rate: %d \n",mali_gpu_clk));
+
+ mali_clk_put(MALI_FALSE);
+
+ _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_TRUE;
+}
+
+static mali_bool init_mali_clock(void)
+{
+ mali_bool ret = MALI_TRUE;
+
+ gpu_power_state = 0;
+
+ if (mali_clock != 0)
+ return ret; // already initialized
+
+ mali_dvfs_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+ | _MALI_OSK_LOCKFLAG_ONELOCK, 0, 0);
+ if (mali_dvfs_lock == NULL)
+ return _MALI_OSK_ERR_FAULT;
+
+ if (mali_clk_set_rate(mali_gpu_clk, GPU_MHZ) == MALI_FALSE)
+ {
+ ret = MALI_FALSE;
+ goto err_clock_get;
+ }
+
+ MALI_PRINT(("init_mali_clock mali_clock %p \n", mali_clock));
+
+
+#ifdef CONFIG_REGULATOR
+#if USING_MALI_PMM
+ g3d_regulator = regulator_get(&mali_gpu_device.dev, "vdd_g3d");
+#else
+ g3d_regulator = regulator_get(NULL, "vdd_g3d");
+#endif
+
+ if (IS_ERR(g3d_regulator))
+ {
+ MALI_PRINT( ("MALI Error : failed to get vdd_g3d\n"));
+ ret = MALI_FALSE;
+ goto err_regulator;
+ }
+
+ regulator_enable(g3d_regulator);
+ MALI_DEBUG_PRINT(1, ("= regulator_enable -> use cnt: %d \n",mali_regulator_get_usecount()));
+ mali_regulator_set_voltage(mali_gpu_vol, mali_gpu_vol);
+#endif
+
+ MALI_DEBUG_PRINT(2, ("MALI Clock is set at mali driver\n"));
+
+
+ MALI_DEBUG_PRINT(3,("::clk_put:: %s mali_parent_clock - normal\n", __FUNCTION__));
+ MALI_DEBUG_PRINT(3,("::clk_put:: %s mpll_clock - normal\n", __FUNCTION__));
+
+ mali_clk_put(MALI_FALSE);
+
+ return MALI_TRUE;
+
+
+#ifdef CONFIG_REGULATOR
+err_regulator:
+ regulator_put(g3d_regulator);
+#endif
+
+err_clock_get:
+ mali_clk_put(MALI_TRUE);
+
+ return ret;
+}
+
+static mali_bool deinit_mali_clock(void)
+{
+ if (mali_clock == 0)
+ return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+ if (g3d_regulator)
+ {
+ regulator_put(g3d_regulator);
+ g3d_regulator=NULL;
+ }
+#endif
+
+ mali_clk_put(MALI_TRUE);
+
+ return MALI_TRUE;
+}
+
+
+static _mali_osk_errcode_t enable_mali_clocks(void)
+{
+ int err;
+ err = clk_enable(mali_clock);
+ MALI_DEBUG_PRINT(3,("enable_mali_clocks mali_clock %p error %d \n", mali_clock, err));
+
+ // set clock rate
+ mali_clk_set_rate(mali_gpu_clk, GPU_MHZ);
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t disable_mali_clocks(void)
+{
+ clk_disable(mali_clock);
+ MALI_DEBUG_PRINT(3,("disable_mali_clocks mali_clock %p \n", mali_clock));
+
+ MALI_SUCCESS;
+}
+
+void set_mali_parent_power_domain(struct platform_device* dev)
+{
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
+ dev->dev.parent = &s5pv310_device_pd[PD_G3D].dev;
+#else
+ dev->dev.parent = &exynos4_device_pd[PD_G3D].dev;
+#endif
+
+#endif
+}
+
+_mali_osk_errcode_t g3d_power_domain_control(int bpower_on)
+{
+ if (bpower_on)
+ {
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ MALI_DEBUG_PRINT(3,("_mali_osk_pmm_dev_activate \n"));
+ _mali_osk_pmm_dev_activate();
+#else //MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ void __iomem *status;
+ u32 timeout;
+ __raw_writel(S5P_INT_LOCAL_PWR_EN, S5P_PMU_G3D_CONF);
+ status = S5P_PMU_G3D_CONF + 0x4;
+
+ timeout = 10;
+ while ((__raw_readl(status) & S5P_INT_LOCAL_PWR_EN)
+ != S5P_INT_LOCAL_PWR_EN) {
+ if (timeout == 0) {
+ MALI_PRINTF(("Power domain enable failed.\n"));
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ _mali_osk_time_ubusydelay(100);
+ }
+#endif //MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ }
+ else
+ {
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ MALI_DEBUG_PRINT( 4,("_mali_osk_pmm_dev_idle\n"));
+ _mali_osk_pmm_dev_idle();
+
+#else //MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ void __iomem *status;
+ u32 timeout;
+ __raw_writel(0, S5P_PMU_G3D_CONF);
+
+ status = S5P_PMU_G3D_CONF + 0x4;
+ /* Wait max 1ms */
+ timeout = 10;
+ while (__raw_readl(status) & S5P_INT_LOCAL_PWR_EN)
+ {
+ if (timeout == 0) {
+ MALI_PRINTF(("Power domain disable failed.\n" ));
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ _mali_osk_time_ubusydelay( 100);
+ }
+#endif //MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_init()
+{
+ MALI_CHECK(init_mali_clock(), _MALI_OSK_ERR_FAULT);
+#if MALI_DVFS_ENABLED
+ if (!clk_register_map) clk_register_map = _mali_osk_mem_mapioregion( CLK_DIV_STAT_G3D, 0x20, CLK_DESC );
+ if(!init_mali_dvfs_status(MALI_DVFS_DEFAULT_STEP))
+ MALI_DEBUG_PRINT(1, ("mali_platform_init failed\n"));
+#endif
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit()
+{
+ deinit_mali_clock();
+
+#if MALI_DVFS_ENABLED
+ deinit_mali_dvfs_status();
+ if (clk_register_map )
+ {
+ _mali_osk_mem_unmapioregion(CLK_DIV_STAT_G3D, 0x20, clk_register_map);
+ clk_register_map=0;
+ }
+#endif
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ MALI_DEBUG_PRINT(3,("power down is called in mali_platform_powerdown state %x core %x \n", gpu_power_state, cores));
+
+ if (gpu_power_state != 0) // power down after state is 0
+ {
+ gpu_power_state = gpu_power_state & (~cores);
+ if (gpu_power_state == 0)
+ {
+ MALI_DEBUG_PRINT( 3,("disable clock\n"));
+ disable_mali_clocks();
+ }
+ }
+ else
+ {
+ MALI_PRINT(("mali_platform_powerdown gpu_power_state == 0 and cores %x \n", cores));
+ }
+
+ bPoweroff=1;
+
+
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ MALI_DEBUG_PRINT(3,("power up is called in mali_platform_powerup state %x core %x \n", gpu_power_state, cores));
+
+ if (gpu_power_state == 0) // power up only before state is 0
+ {
+ gpu_power_state = gpu_power_state | cores;
+
+ if (gpu_power_state != 0)
+ {
+ MALI_DEBUG_PRINT(4,("enable clock \n"));
+ enable_mali_clocks();
+ }
+ }
+ else
+ {
+ gpu_power_state = gpu_power_state | cores;
+ }
+
+ bPoweroff=0;
+
+
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+ if (bPoweroff==0)
+ {
+#if MALI_DVFS_ENABLED
+ if(!mali_dvfs_handler(utilization))
+ MALI_DEBUG_PRINT(1,( "error on mali dvfs status in utilization\n"));
+#endif
+ }
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+u32 pmu_get_power_up_down_info(void)
+{
+ return 4095;
+}
+
+#endif
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode)
+{
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/media/video/samsung/mali/platform/orion-m400/mali_platform_dvfs.c b/drivers/media/video/samsung/mali/platform/orion-m400/mali_platform_dvfs.c
new file mode 100644
index 0000000..9e6edba
--- /dev/null
+++ b/drivers/media/video/samsung/mali/platform/orion-m400/mali_platform_dvfs.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform_dvfs.c
+ * Platform specific Mali driver dvfs functions
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+
+#include <asm/io.h>
+
+#ifdef CONFIG_CPU_FREQ
+#include <mach/asv.h>
+#include <mach/regs-pmu.h>
+#define EXYNOS4_ASV_ENABLED
+#endif
+
+#include "mali_device_pause_resume.h"
+#include <linux/workqueue.h>
+
+#define MALI_DVFS_WATING 10 // msec
+
+static int bMaliDvfsRun=0;
+
+#if MALI_GPU_BOTTOM_LOCK
+static _mali_osk_atomic_t bottomlock_status;
+#endif
+
+typedef struct mali_dvfs_tableTag{
+ unsigned int clock;
+ unsigned int freq;
+ unsigned int vol;
+}mali_dvfs_table;
+
+typedef struct mali_dvfs_statusTag{
+ unsigned int currentStep;
+ mali_dvfs_table * pCurrentDvfs;
+
+}mali_dvfs_currentstatus;
+
+typedef struct mali_dvfs_thresholdTag{
+ unsigned int downthreshold;
+ unsigned int upthreshold;
+}mali_dvfs_threshold_table;
+
+typedef struct mali_dvfs_staycount{
+ unsigned int staycount;
+}mali_dvfs_staycount_table;
+
+mali_dvfs_staycount_table mali_dvfs_staycount[MALI_DVFS_STEPS]={
+ /*step 0*/{1},
+ /*step 1*/{1},};
+
+/*dvfs threshold*/
+mali_dvfs_threshold_table mali_dvfs_threshold[MALI_DVFS_STEPS]={
+ /*step 0*/{((int)((255*0)/100)), ((int)((255*85)/100))},
+ /*step 1*/{((int)((255*75)/100)), ((int)((255*100)/100))} };
+
+/*dvfs status*/
+mali_dvfs_currentstatus maliDvfsStatus;
+int mali_dvfs_control=0;
+
+/*dvfs table*/
+mali_dvfs_table mali_dvfs[MALI_DVFS_STEPS]={
+ /*step 0*/{160 ,1000000 , 950000},
+ /*step 1*/{267 ,1000000 ,1000000} };
+
+#ifdef EXYNOS4_ASV_ENABLED
+
+#define ASV_8_LEVEL 8
+#define ASV_5_LEVEL 5
+
+static unsigned int asv_3d_volt_5_table[ASV_5_LEVEL][MALI_DVFS_STEPS] = {
+ /* L3(160MHz), L2(266MHz) */
+ {1000000, 1100000}, /* S */
+ {1000000, 1100000}, /* A */
+ { 950000, 1000000}, /* B */
+ { 950000, 1000000}, /* C */
+ { 950000, 950000}, /* D */
+};
+
+static unsigned int asv_3d_volt_8_table[ASV_8_LEVEL][MALI_DVFS_STEPS] = {
+ /* L3(160MHz), L2(266MHz)) */
+ {1000000, 1100000}, /* SS */
+ {1000000, 1100000}, /* A1 */
+ {1000000, 1100000}, /* A2 */
+ { 950000, 1000000}, /* B1 */
+ { 950000, 1000000}, /* B2 */
+ { 950000, 1000000}, /* C1 */
+ { 950000, 1000000}, /* C2 */
+ { 950000, 950000}, /* D1 */
+};
+#endif
+
+static u32 mali_dvfs_utilization = 255;
+
+static void mali_dvfs_work_handler(struct work_struct *w);
+
+static struct workqueue_struct *mali_dvfs_wq = 0;
+extern mali_io_address clk_register_map;
+
+#if MALI_GPU_BOTTOM_LOCK
+extern _mali_osk_lock_t *mali_dvfs_lock;
+#endif
+
+static DECLARE_WORK(mali_dvfs_work, mali_dvfs_work_handler);
+
+static unsigned int get_mali_dvfs_status(void)
+{
+ return maliDvfsStatus.currentStep;
+}
+
+#if MALI_GPU_BOTTOM_LOCK
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+int get_mali_dvfs_control_status(void)
+{
+ return mali_dvfs_control;
+}
+
+mali_bool set_mali_dvfs_current_step(unsigned int step)
+{
+ _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+ maliDvfsStatus.currentStep = step;
+ _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+ return MALI_TRUE;
+}
+#endif
+#endif
+
+static mali_bool set_mali_dvfs_status(u32 step,mali_bool boostup)
+{
+ u32 validatedStep=step;
+
+#ifdef CONFIG_REGULATOR
+ if (mali_regulator_get_usecount()==0) {
+ MALI_DEBUG_PRINT(1, ("regulator use_count is 0 \n"));
+ return MALI_FALSE;
+ }
+#endif
+
+ if (boostup) {
+#ifdef CONFIG_REGULATOR
+ /*change the voltage*/
+ mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+ /*change the clock*/
+ mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+ } else {
+ /*change the clock*/
+ mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+#ifdef CONFIG_REGULATOR
+ /*change the voltage*/
+ mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+ }
+
+ maliDvfsStatus.currentStep = validatedStep;
+ /*for future use*/
+ maliDvfsStatus.pCurrentDvfs = &mali_dvfs[validatedStep];
+
+ return MALI_TRUE;
+}
+
+static void mali_platform_wating(u32 msec)
+{
+ /*sample wating
+ change this in the future with proper check routine.
+ */
+ unsigned int read_val;
+ while(1) {
+ read_val = _mali_osk_mem_ioread32(clk_register_map, 0x00);
+ if ((read_val & 0x8000)==0x0000) break;
+
+ _mali_osk_time_ubusydelay(100); // 1000 -> 100 : 20101218
+ }
+ /* _mali_osk_time_ubusydelay(msec*1000);*/
+}
+
+static mali_bool change_mali_dvfs_status(u32 step, mali_bool boostup )
+{
+
+ MALI_DEBUG_PRINT(1, ("> change_mali_dvfs_status: %d, %d \n",step, boostup));
+
+ if (!set_mali_dvfs_status(step, boostup)) {
+ MALI_DEBUG_PRINT(1, ("error on set_mali_dvfs_status: %d, %d \n",step, boostup));
+ return MALI_FALSE;
+ }
+
+ /*wait until clock and voltage is stablized*/
+ mali_platform_wating(MALI_DVFS_WATING); /*msec*/
+
+ return MALI_TRUE;
+}
+
+static unsigned int decideNextStatus(unsigned int utilization)
+{
+ unsigned int level=0; // 0:stay, 1:up
+
+ if (!mali_dvfs_control) {
+#if MALI_GPU_BOTTOM_LOCK
+ if (_mali_osk_atomic_read(&bottomlock_status) > 0)
+ level = 1; /* or bigger */
+ else if (utilization > mali_dvfs_threshold[maliDvfsStatus.currentStep].upthreshold)
+#else
+ if (utilization > mali_dvfs_threshold[maliDvfsStatus.currentStep].upthreshold)
+#endif
+ level=1;
+ else if (utilization < mali_dvfs_threshold[maliDvfsStatus.currentStep].downthreshold)
+ level=0;
+ else
+ level = maliDvfsStatus.currentStep;
+ } else {
+ if ((mali_dvfs_control > 0) && (mali_dvfs_control < mali_dvfs[1].clock))
+ level=0;
+ else
+ level=1;
+ }
+
+ return level;
+}
+
+#ifdef EXYNOS4_ASV_ENABLED
+static mali_bool mali_dvfs_table_update(void)
+{
+ unsigned int exynos_result_of_asv_group;
+ unsigned int target_asv;
+ unsigned int i;
+ exynos_result_of_asv_group = exynos_result_of_asv & 0xf;
+ target_asv = exynos_result_of_asv >> 28;
+ MALI_PRINT(("exynos_result_of_asv_group = 0x%x, target_asv = 0x%x\n", exynos_result_of_asv_group, target_asv));
+
+ if (target_asv == 0x8) { //SUPPORT_1400MHZ
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ mali_dvfs[i].vol = asv_3d_volt_5_table[exynos_result_of_asv_group][i];
+ MALI_PRINT(("mali_dvfs[%d].vol = %d\n", i, mali_dvfs[i].vol));
+ }
+ } else if (target_asv == 0x4){ //SUPPORT_1200MHZ
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ mali_dvfs[i].vol = asv_3d_volt_8_table[exynos_result_of_asv_group][i];
+ MALI_PRINT(("mali_dvfs[%d].vol = %d\n", i, mali_dvfs[i].vol));
+ }
+ }
+
+ return MALI_TRUE;
+
+}
+#endif
+
+static mali_bool mali_dvfs_status(u32 utilization)
+{
+ unsigned int nextStatus = 0;
+ unsigned int curStatus = 0;
+ mali_bool boostup = MALI_FALSE;
+#ifdef EXYNOS4_ASV_ENABLED
+ static mali_bool asv_applied = MALI_FALSE;
+#endif
+ static int stay_count = 0; // to prevent frequent switch
+
+ MALI_DEBUG_PRINT(1, ("> mali_dvfs_status: %d \n",utilization));
+#ifdef EXYNOS4_ASV_ENABLED
+ if (asv_applied == MALI_FALSE) {
+ mali_dvfs_table_update();
+ change_mali_dvfs_status(0,0);
+ asv_applied = MALI_TRUE;
+
+ return MALI_TRUE;
+ }
+#endif
+
+ /*decide next step*/
+ curStatus = get_mali_dvfs_status();
+ nextStatus = decideNextStatus(utilization);
+
+ MALI_DEBUG_PRINT(1, ("= curStatus %d, nextStatus %d, maliDvfsStatus.currentStep %d \n", curStatus, nextStatus, maliDvfsStatus.currentStep));
+
+ /*if next status is same with current status, don't change anything*/
+ if ((curStatus!=nextStatus && stay_count==0)) {
+ /*check if boost up or not*/
+ if (nextStatus > maliDvfsStatus.currentStep)
+ boostup = 1;
+
+ /*change mali dvfs status*/
+ if (!change_mali_dvfs_status(nextStatus,boostup)) {
+ MALI_DEBUG_PRINT(1, ("error on change_mali_dvfs_status \n"));
+ return MALI_FALSE;
+ }
+ stay_count = mali_dvfs_staycount[maliDvfsStatus.currentStep].staycount;
+ } else {
+ if (stay_count>0)
+ stay_count--;
+ }
+
+ return MALI_TRUE;
+}
+
+
+
+int mali_dvfs_is_running(void)
+{
+ return bMaliDvfsRun;
+}
+
+
+
+void mali_dvfs_late_resume(void)
+{
+ // set the init clock as low when resume
+ set_mali_dvfs_status(0,0);
+}
+
+
+static void mali_dvfs_work_handler(struct work_struct *w)
+{
+ bMaliDvfsRun=1;
+
+ MALI_DEBUG_PRINT(3, ("=== mali_dvfs_work_handler\n"));
+
+ if (!mali_dvfs_status(mali_dvfs_utilization))
+ MALI_DEBUG_PRINT(1,( "error on mali dvfs status in mali_dvfs_work_handler"));
+
+ bMaliDvfsRun=0;
+}
+
+
+mali_bool init_mali_dvfs_status(int step)
+{
+ /*default status
+ add here with the right function to get initilization value.
+ */
+ if (!mali_dvfs_wq)
+ mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs");
+
+#if MALI_GPU_BOTTOM_LOCK
+ _mali_osk_atomic_init(&bottomlock_status, 0);
+#endif
+
+ /*add a error handling here*/
+ maliDvfsStatus.currentStep = step;
+
+ return MALI_TRUE;
+}
+
+void deinit_mali_dvfs_status(void)
+{
+#if MALI_GPU_BOTTOM_LOCK
+ _mali_osk_atomic_term(&bottomlock_status);
+#endif
+
+ if (mali_dvfs_wq)
+ destroy_workqueue(mali_dvfs_wq);
+ mali_dvfs_wq = NULL;
+}
+
+mali_bool mali_dvfs_handler(u32 utilization)
+{
+ mali_dvfs_utilization = utilization;
+ queue_work_on(0, mali_dvfs_wq,&mali_dvfs_work);
+
+ /*add error handle here*/
+ return MALI_TRUE;
+}
+
+void mali_default_step_set(int step, mali_bool boostup)
+{
+ mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+
+ if (maliDvfsStatus.currentStep == 1)
+ set_mali_dvfs_status(step, boostup);
+}
+
+#if MALI_GPU_BOTTOM_LOCK
+int mali_dvfs_bottom_lock_push(void)
+{
+ int prev_status = _mali_osk_atomic_read(&bottomlock_status);
+
+ if (prev_status < 0) {
+ MALI_PRINT(("gpu bottom lock status is not valid for push"));
+ return -1;
+ }
+
+ if (prev_status == 0) {
+ mali_regulator_set_voltage(mali_dvfs[1].vol, mali_dvfs[1].vol);
+ mali_clk_set_rate(mali_dvfs[1].clock, mali_dvfs[1].freq);
+ set_mali_dvfs_current_step(1);
+ }
+
+ return _mali_osk_atomic_inc_return(&bottomlock_status);
+}
+
+int mali_dvfs_bottom_lock_pop(void)
+{
+ if (_mali_osk_atomic_read(&bottomlock_status) <= 0) {
+ MALI_PRINT(("gpu bottom lock status is not valid for pop"));
+ return -1;
+ }
+
+ return _mali_osk_atomic_dec_return(&bottomlock_status);
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/platform/pegasus-m400/mali_platform.c b/drivers/media/video/samsung/mali/platform/pegasus-m400/mali_platform.c
new file mode 100644
index 0000000..397ba942
--- /dev/null
+++ b/drivers/media/video/samsung/mali/platform/pegasus-m400/mali_platform.c
@@ -0,0 +1,756 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.c
+ * Platform specific Mali driver functions for a default platform
+ */
+#include <linux/version.h>
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+#include "mali_linux_pm.h"
+
+#if USING_MALI_PMM
+#include "mali_pmm.h"
+#endif
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#include <plat/pd.h>
+#endif
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_kernel_profiling.h"
+#endif
+
+#include <asm/io.h>
+#include <mach/regs-pmu.h>
+
+#define EXTXTALCLK_NAME "ext_xtal"
+#define VPLLSRCCLK_NAME "vpll_src"
+#define FOUTVPLLCLK_NAME "fout_vpll"
+#define SCLVPLLCLK_NAME "sclk_vpll"
+#define GPUMOUT1CLK_NAME "mout_g3d1"
+
+#define MPLLCLK_NAME "mout_mpll"
+#define GPUMOUT0CLK_NAME "mout_g3d0"
+#define GPUCLK_NAME "sclk_g3d"
+#define CLK_DIV_STAT_G3D 0x1003C62C
+#define CLK_DESC "clk-divider-status"
+
+#define MALI_BOTTOMLOCK_VOL 900000
+
+typedef struct mali_runtime_resumeTag{
+ int clk;
+ int vol;
+}mali_runtime_resume_table;
+
+mali_runtime_resume_table mali_runtime_resume = {266, 900000};
+
+/* lock/unlock CPU freq by Mali */
+extern int cpufreq_lock_by_mali(unsigned int freq);
+extern void cpufreq_unlock_by_mali(void);
+
+static struct clk *ext_xtal_clock = 0;
+static struct clk *vpll_src_clock = 0;
+static struct clk *fout_vpll_clock = 0;
+static struct clk *sclk_vpll_clock = 0;
+
+static struct clk *mpll_clock = 0;
+static struct clk *mali_parent_clock = 0;
+static struct clk *mali_clock = 0;
+
+
+static unsigned int GPU_MHZ = 1000000;
+
+int mali_gpu_clk = 266;
+int mali_gpu_vol = 900000;
+
+#if MALI_DVFS_ENABLED
+#define MALI_DVFS_DEFAULT_STEP 0
+#endif
+#if MALI_VOLTAGE_LOCK
+int mali_lock_vol = 0;
+static _mali_osk_atomic_t voltage_lock_status;
+static mali_bool mali_vol_lock_flag = 0;
+#endif
+
+int gpu_power_state;
+static int bPoweroff;
+
+#ifdef CONFIG_REGULATOR
+struct regulator {
+ struct device *dev;
+ struct list_head list;
+ int uA_load;
+ int min_uV;
+ int max_uV;
+ char *supply_name;
+ struct device_attribute dev_attr;
+ struct regulator_dev *rdev;
+};
+
+struct regulator *g3d_regulator=NULL;
+#endif
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
+extern struct platform_device s5pv310_device_pd[];
+#else
+extern struct platform_device exynos4_device_pd[];
+#endif
+#endif
+
+mali_io_address clk_register_map=0;
+
+_mali_osk_lock_t *mali_dvfs_lock = 0;
+
+#ifdef CONFIG_REGULATOR
+int mali_regulator_get_usecount(void)
+{
+ struct regulator_dev *rdev;
+
+ if( IS_ERR_OR_NULL(g3d_regulator) )
+ {
+ MALI_DEBUG_PRINT(1, ("error on mali_regulator_get_usecount : g3d_regulator is null\n"));
+ return 0;
+ }
+ rdev = g3d_regulator->rdev;
+ return rdev->use_count;
+}
+
+void mali_regulator_disable(void)
+{
+ bPoweroff = 1;
+ if( IS_ERR_OR_NULL(g3d_regulator) )
+ {
+ MALI_DEBUG_PRINT(1, ("error on mali_regulator_disable : g3d_regulator is null\n"));
+ return;
+ }
+ regulator_disable(g3d_regulator);
+ MALI_DEBUG_PRINT(1, ("regulator_disable -> use cnt: %d \n",mali_regulator_get_usecount()));
+}
+
+void mali_regulator_enable(void)
+{
+ bPoweroff = 0;
+ if( IS_ERR_OR_NULL(g3d_regulator) )
+ {
+ MALI_DEBUG_PRINT(1, ("error on mali_regulator_enable : g3d_regulator is null\n"));
+ return;
+ }
+ regulator_enable(g3d_regulator);
+ MALI_DEBUG_PRINT(1, ("regulator_enable -> use cnt: %d \n",mali_regulator_get_usecount()));
+}
+
+void mali_regulator_set_voltage(int min_uV, int max_uV)
+{
+ int voltage;
+#if !MALI_DVFS_ENABLED
+ min_uV = mali_gpu_vol;
+ max_uV = mali_gpu_vol;
+#endif
+#if MALI_VOLTAGE_LOCK
+ if (mali_vol_lock_flag == MALI_FALSE) {
+ if (min_uV < MALI_BOTTOMLOCK_VOL || max_uV < MALI_BOTTOMLOCK_VOL) {
+ min_uV = MALI_BOTTOMLOCK_VOL;
+ max_uV = MALI_BOTTOMLOCK_VOL;
+ }
+ } else if (_mali_osk_atomic_read(&voltage_lock_status) > 0 ) {
+ if (min_uV < mali_lock_vol || max_uV < mali_lock_vol) {
+#if MALI_DVFS_ENABLED
+ int mali_vol_get;
+ mali_vol_get = mali_vol_get_from_table(mali_lock_vol);
+ if (mali_vol_get) {
+ min_uV = mali_vol_get;
+ max_uV = mali_vol_get;
+ }
+#else
+ min_uV = mali_lock_vol;
+ max_uV = mali_lock_vol;
+#endif
+ }
+ }
+#endif
+
+ _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if( IS_ERR_OR_NULL(g3d_regulator) )
+ {
+ MALI_DEBUG_PRINT(1, ("error on mali_regulator_set_voltage : g3d_regulator is null\n"));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(2, ("= regulator_set_voltage: %d, %d \n",min_uV, max_uV));
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_VOLTS,
+ min_uV, max_uV, 1, 0, 0);
+#endif
+
+ regulator_set_voltage(g3d_regulator,min_uV,max_uV);
+ voltage = regulator_get_voltage(g3d_regulator);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_VOLTS,
+ voltage, 0, 2, 0, 0);
+#endif
+
+ mali_gpu_vol = voltage;
+ MALI_DEBUG_PRINT(1, ("= regulator_get_voltage: %d \n",mali_gpu_vol));
+
+ _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+}
+#endif
+
+unsigned long mali_clk_get_rate(void)
+{
+ return clk_get_rate(mali_clock);
+}
+
+mali_bool mali_clk_get(mali_bool bis_vpll)
+{
+ if (bis_vpll == MALI_TRUE)
+ {
+ if (ext_xtal_clock == NULL)
+ {
+ ext_xtal_clock = clk_get(NULL,EXTXTALCLK_NAME);
+ if (IS_ERR(ext_xtal_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source ext_xtal_clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (vpll_src_clock == NULL)
+ {
+ vpll_src_clock = clk_get(NULL,VPLLSRCCLK_NAME);
+ if (IS_ERR(vpll_src_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source vpll_src_clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (fout_vpll_clock == NULL)
+ {
+ fout_vpll_clock = clk_get(NULL,FOUTVPLLCLK_NAME);
+ if (IS_ERR(fout_vpll_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source fout_vpll_clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (sclk_vpll_clock == NULL)
+ {
+ sclk_vpll_clock = clk_get(NULL,SCLVPLLCLK_NAME);
+ if (IS_ERR(sclk_vpll_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source sclk_vpll_clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (mali_parent_clock == NULL)
+ {
+ mali_parent_clock = clk_get(NULL, GPUMOUT1CLK_NAME);
+
+ if (IS_ERR(mali_parent_clock)) {
+ MALI_PRINT( ( "MALI Error : failed to get source mali parent clock\n"));
+ return MALI_FALSE;
+ }
+ }
+ }
+ else // mpll
+ {
+ if (mpll_clock == NULL)
+ {
+ mpll_clock = clk_get(NULL,MPLLCLK_NAME);
+
+ if (IS_ERR(mpll_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source mpll clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ if (mali_parent_clock == NULL)
+ {
+ mali_parent_clock = clk_get(NULL, GPUMOUT0CLK_NAME);
+
+ if (IS_ERR(mali_parent_clock)) {
+ MALI_PRINT( ( "MALI Error : failed to get source mali parent clock\n"));
+ return MALI_FALSE;
+ }
+ }
+ }
+
+ // mali clock get always.
+ if (mali_clock == NULL)
+ {
+ mali_clock = clk_get(NULL, GPUCLK_NAME);
+
+ if (IS_ERR(mali_clock)) {
+ MALI_PRINT( ("MALI Error : failed to get source mali clock\n"));
+ return MALI_FALSE;
+ }
+ }
+
+ return MALI_TRUE;
+}
+
+void mali_clk_put(mali_bool binc_mali_clock)
+{
+ if (mali_parent_clock)
+ {
+ clk_put(mali_parent_clock);
+ mali_parent_clock = 0;
+ }
+
+ if (mpll_clock)
+ {
+ clk_put(mpll_clock);
+ mpll_clock = 0;
+ }
+
+ if (sclk_vpll_clock)
+ {
+ clk_put(sclk_vpll_clock);
+ sclk_vpll_clock = 0;
+ }
+
+ if (fout_vpll_clock)
+ {
+ clk_put(fout_vpll_clock);
+ fout_vpll_clock = 0;
+ }
+
+ if (vpll_src_clock)
+ {
+ clk_put(vpll_src_clock);
+ vpll_src_clock = 0;
+ }
+
+ if (ext_xtal_clock)
+ {
+ clk_put(ext_xtal_clock);
+ ext_xtal_clock = 0;
+ }
+
+ if (binc_mali_clock == MALI_TRUE && mali_clock)
+ {
+ clk_put(mali_clock);
+ mali_clock = 0;
+ }
+
+}
+
+
+mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz)
+{
+ unsigned long rate = 0;
+ mali_bool bis_vpll = MALI_TRUE;
+
+#ifndef CONFIG_VPLL_USE_FOR_TVENC
+ bis_vpll = MALI_TRUE;
+#endif
+
+#if !MALI_DVFS_ENABLED
+ clk = mali_gpu_clk;
+#endif
+
+ _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (mali_clk_get(bis_vpll) == MALI_FALSE)
+ return MALI_FALSE;
+
+ rate = (unsigned long)clk * (unsigned long)mhz;
+ MALI_DEBUG_PRINT(3,("= clk_set_rate : %d , %d \n",clk, mhz ));
+
+ if (bis_vpll)
+ {
+ clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
+ clk_set_parent(vpll_src_clock, ext_xtal_clock);
+ clk_set_parent(sclk_vpll_clock, fout_vpll_clock);
+
+ clk_set_parent(mali_parent_clock, sclk_vpll_clock);
+ clk_set_parent(mali_clock, mali_parent_clock);
+ }
+ else
+ {
+ clk_set_parent(mali_parent_clock, mpll_clock);
+ clk_set_parent(mali_clock, mali_parent_clock);
+ }
+
+ if (clk_enable(mali_clock) < 0)
+ return MALI_FALSE;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_FREQ,
+ rate, 0, 0, 0, 0);
+#endif
+
+ clk_set_rate(mali_clock, rate);
+ rate = clk_get_rate(mali_clock);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_FREQ,
+ rate, 1, 0, 0, 0);
+#endif
+
+ if (bis_vpll)
+ mali_gpu_clk = (int)(rate / mhz);
+ else
+ mali_gpu_clk = (int)((rate + 500000) / mhz);
+
+ GPU_MHZ = mhz;
+ MALI_DEBUG_PRINT(3,("= clk_get_rate: %d \n",mali_gpu_clk));
+
+ mali_clk_put(MALI_FALSE);
+
+ _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_TRUE;
+}
+
+static mali_bool init_mali_clock(void)
+{
+ mali_bool ret = MALI_TRUE;
+
+ gpu_power_state = 0;
+
+ if (mali_clock != 0)
+ return ret; // already initialized
+
+ mali_dvfs_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE
+ | _MALI_OSK_LOCKFLAG_ONELOCK, 0, 0);
+ if (mali_dvfs_lock == NULL)
+ return _MALI_OSK_ERR_FAULT;
+
+ if (mali_clk_set_rate(mali_gpu_clk, GPU_MHZ) == MALI_FALSE)
+ {
+ ret = MALI_FALSE;
+ goto err_clock_get;
+ }
+
+ MALI_PRINT(("init_mali_clock mali_clock %p \n", mali_clock));
+
+
+#ifdef CONFIG_REGULATOR
+#if USING_MALI_PMM
+ g3d_regulator = regulator_get(&mali_gpu_device.dev, "vdd_g3d");
+#else
+ g3d_regulator = regulator_get(NULL, "vdd_g3d");
+#endif
+
+ if (IS_ERR(g3d_regulator))
+ {
+ MALI_PRINT( ("MALI Error : failed to get vdd_g3d\n"));
+ ret = MALI_FALSE;
+ goto err_regulator;
+ }
+
+ regulator_enable(g3d_regulator);
+
+ MALI_DEBUG_PRINT(1, ("= regulator_enable -> use cnt: %d \n",mali_regulator_get_usecount()));
+ mali_regulator_set_voltage(mali_gpu_vol, mali_gpu_vol);
+#endif
+
+ MALI_DEBUG_PRINT(2, ("MALI Clock is set at mali driver\n"));
+
+
+ MALI_DEBUG_PRINT(3,("::clk_put:: %s mali_parent_clock - normal\n", __FUNCTION__));
+ MALI_DEBUG_PRINT(3,("::clk_put:: %s mpll_clock - normal\n", __FUNCTION__));
+
+ mali_clk_put(MALI_FALSE);
+
+ return MALI_TRUE;
+
+
+#ifdef CONFIG_REGULATOR
+err_regulator:
+ regulator_put(g3d_regulator);
+#endif
+
+err_clock_get:
+ mali_clk_put(MALI_TRUE);
+
+ return ret;
+}
+
+static mali_bool deinit_mali_clock(void)
+{
+ if (mali_clock == 0)
+ return MALI_TRUE;
+
+#ifdef CONFIG_REGULATOR
+ if (g3d_regulator)
+ {
+ regulator_put(g3d_regulator);
+ g3d_regulator=NULL;
+ }
+#endif
+
+ mali_clk_put(MALI_TRUE);
+
+ return MALI_TRUE;
+}
+static _mali_osk_errcode_t enable_mali_clocks(void)
+{
+ int err;
+ err = clk_enable(mali_clock);
+ MALI_DEBUG_PRINT(3,("enable_mali_clocks mali_clock %p error %d \n", mali_clock, err));
+
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if MALI_DVFS_ENABLED
+ // set clock rate
+ if (get_mali_dvfs_control_status() != 0 || mali_gpu_clk >= mali_runtime_resume.clk)
+ mali_clk_set_rate(mali_gpu_clk, GPU_MHZ);
+ else {
+ mali_regulator_set_voltage(mali_runtime_resume.vol, mali_runtime_resume.vol);
+ mali_clk_set_rate(mali_runtime_resume.clk, GPU_MHZ);
+ }
+ if (mali_gpu_clk <= mali_runtime_resume.clk)
+ set_mali_dvfs_current_step(5);
+ /* lock/unlock CPU freq by Mali */
+ if (mali_gpu_clk == 440)
+ err = cpufreq_lock_by_mali(1200);
+#else
+ mali_regulator_set_voltage(mali_runtime_resume.vol, mali_runtime_resume.vol);
+ mali_clk_set_rate(mali_runtime_resume.clk, GPU_MHZ);
+#endif
+#else
+ mali_clk_set_rate(mali_gpu_clk, GPU_MHZ);
+#endif
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t disable_mali_clocks(void)
+{
+ clk_disable(mali_clock);
+ MALI_DEBUG_PRINT(3,("disable_mali_clocks mali_clock %p \n", mali_clock));
+
+ /* lock/unlock CPU freq by Mali */
+ cpufreq_unlock_by_mali();
+ MALI_SUCCESS;
+}
+
+void set_mali_parent_power_domain(struct platform_device* dev)
+{
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
+ dev->dev.parent = &s5pv310_device_pd[PD_G3D].dev;
+#else
+ dev->dev.parent = &exynos4_device_pd[PD_G3D].dev;
+#endif
+#endif
+}
+
+_mali_osk_errcode_t g3d_power_domain_control(int bpower_on)
+{
+ if (bpower_on)
+ {
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ MALI_DEBUG_PRINT(3,("_mali_osk_pmm_dev_activate \n"));
+ _mali_osk_pmm_dev_activate();
+#else //MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ void __iomem *status;
+ u32 timeout;
+ __raw_writel(S5P_INT_LOCAL_PWR_EN, S5P_PMU_G3D_CONF);
+ status = S5P_PMU_G3D_CONF + 0x4;
+
+ timeout = 10;
+ while ((__raw_readl(status) & S5P_INT_LOCAL_PWR_EN)
+ != S5P_INT_LOCAL_PWR_EN) {
+ if (timeout == 0) {
+ MALI_PRINTF(("Power domain enable failed.\n"));
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ _mali_osk_time_ubusydelay(100);
+ }
+#endif //MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ }
+ else
+ {
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ MALI_DEBUG_PRINT( 4,("_mali_osk_pmm_dev_idle\n"));
+ _mali_osk_pmm_dev_idle();
+
+#else //MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ void __iomem *status;
+ u32 timeout;
+ __raw_writel(0, S5P_PMU_G3D_CONF);
+
+ status = S5P_PMU_G3D_CONF + 0x4;
+ /* Wait max 1ms */
+ timeout = 10;
+ while (__raw_readl(status) & S5P_INT_LOCAL_PWR_EN)
+ {
+ if (timeout == 0) {
+ MALI_PRINTF(("Power domain disable failed.\n" ));
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ _mali_osk_time_ubusydelay( 100);
+ }
+#endif //MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_init()
+{
+ MALI_CHECK(init_mali_clock(), _MALI_OSK_ERR_FAULT);
+#if MALI_VOLTAGE_LOCK
+ _mali_osk_atomic_init(&voltage_lock_status, 0);
+#endif
+#if MALI_DVFS_ENABLED
+ if (!clk_register_map) clk_register_map = _mali_osk_mem_mapioregion( CLK_DIV_STAT_G3D, 0x20, CLK_DESC );
+ if(!init_mali_dvfs_status(MALI_DVFS_DEFAULT_STEP))
+ MALI_DEBUG_PRINT(1, ("mali_platform_init failed\n"));
+#endif
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_deinit()
+{
+ deinit_mali_clock();
+#if MALI_VOLTAGE_LOCK
+ _mali_osk_atomic_term(&voltage_lock_status);
+#endif
+#if MALI_DVFS_ENABLED
+ deinit_mali_dvfs_status();
+ if (clk_register_map )
+ {
+ _mali_osk_mem_unmapioregion(CLK_DIV_STAT_G3D, 0x20, clk_register_map);
+ clk_register_map=0;
+ }
+#endif
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerdown(u32 cores)
+{
+ MALI_DEBUG_PRINT(3,("power down is called in mali_platform_powerdown state %x core %x \n", gpu_power_state, cores));
+
+ if (gpu_power_state != 0) // power down after state is 0
+ {
+ gpu_power_state = gpu_power_state & (~cores);
+ if (gpu_power_state == 0)
+ {
+ MALI_DEBUG_PRINT( 3,("disable clock\n"));
+ disable_mali_clocks();
+ }
+ }
+ else
+ {
+ MALI_PRINT(("mali_platform_powerdown gpu_power_state == 0 and cores %x \n", cores));
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_platform_powerup(u32 cores)
+{
+ MALI_DEBUG_PRINT(3,("power up is called in mali_platform_powerup state %x core %x \n", gpu_power_state, cores));
+
+ if (gpu_power_state == 0) // power up only before state is 0
+ {
+ gpu_power_state = gpu_power_state | cores;
+
+ if (gpu_power_state != 0)
+ {
+ MALI_DEBUG_PRINT(4,("enable clock \n"));
+ enable_mali_clocks();
+ }
+ }
+ else
+ {
+ gpu_power_state = gpu_power_state | cores;
+ }
+
+ MALI_SUCCESS;
+}
+
+void mali_gpu_utilization_handler(u32 utilization)
+{
+ if (bPoweroff==0)
+ {
+#if MALI_DVFS_ENABLED
+ if(!mali_dvfs_handler(utilization))
+ MALI_DEBUG_PRINT(1,( "error on mali dvfs status in utilization\n"));
+#endif
+ }
+}
+
+#if MALI_POWER_MGMT_TEST_SUITE
+u32 pmu_get_power_up_down_info(void)
+{
+ return 4095;
+}
+
+#endif
+
+_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode)
+{
+ MALI_SUCCESS;
+}
+
+#if MALI_VOLTAGE_LOCK
+int mali_voltage_lock_push(int lock_vol)
+{
+ int prev_status = _mali_osk_atomic_read(&voltage_lock_status);
+
+ if (prev_status < 0) {
+ MALI_PRINT(("gpu voltage lock status is not valid for push\n"));
+ return -1;
+ }
+ if (prev_status == 0) {
+ mali_lock_vol = lock_vol;
+ if (mali_gpu_vol < mali_lock_vol)
+ mali_regulator_set_voltage(mali_lock_vol, mali_lock_vol);
+ } else {
+ MALI_PRINT(("gpu voltage lock status is already pushed, current lock voltage : %d\n", mali_lock_vol));
+ return -1;
+ }
+
+ return _mali_osk_atomic_inc_return(&voltage_lock_status);
+}
+
+int mali_voltage_lock_pop(void)
+{
+ if (_mali_osk_atomic_read(&voltage_lock_status) <= 0) {
+ MALI_PRINT(("gpu voltage lock status is not valid for pop\n"));
+ return -1;
+ }
+ return _mali_osk_atomic_dec_return(&voltage_lock_status);
+}
+
+int mali_voltage_lock_init(void)
+{
+ mali_vol_lock_flag = MALI_TRUE;
+
+ MALI_SUCCESS;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/platform/pegasus-m400/mali_platform_dvfs.c b/drivers/media/video/samsung/mali/platform/pegasus-m400/mali_platform_dvfs.c
new file mode 100644
index 0000000..8293d6e
--- /dev/null
+++ b/drivers/media/video/samsung/mali/platform/pegasus-m400/mali_platform_dvfs.c
@@ -0,0 +1,722 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform_dvfs.c
+ * Platform specific Mali driver dvfs functions
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+
+#include <asm/io.h>
+
+#include "mali_device_pause_resume.h"
+#include <linux/workqueue.h>
+
+#define MAX_MALI_DVFS_STEPS 4
+#define MALI_DVFS_WATING 10 // msec
+
+#ifdef CONFIG_CPU_FREQ
+#include <mach/asv.h>
+#define EXYNOS4_ASV_ENABLED
+#endif
+
+static int bMaliDvfsRun=0;
+
+static _mali_osk_atomic_t bottomlock_status;
+static int bottom_lock_step;
+
+typedef struct mali_dvfs_tableTag{
+ unsigned int clock;
+ unsigned int freq;
+ unsigned int vol;
+}mali_dvfs_table;
+
+typedef struct mali_dvfs_statusTag{
+ unsigned int currentStep;
+ mali_dvfs_table * pCurrentDvfs;
+
+}mali_dvfs_currentstatus;
+
+typedef struct mali_dvfs_thresholdTag{
+ unsigned int downthreshold;
+ unsigned int upthreshold;
+}mali_dvfs_threshold_table;
+
+typedef struct mali_dvfs_staycount{
+ unsigned int staycount;
+}mali_dvfs_staycount_table;
+
+typedef struct mali_dvfs_stepTag{
+ int clk;
+ int vol;
+}mali_dvfs_step;
+
+mali_dvfs_step step[MALI_DVFS_STEPS]={
+ /*step 0 clk*/ {160, 875000},
+#if (MALI_DVFS_STEPS > 1)
+ /*step 1 clk*/ {266, 900000},
+#if (MALI_DVFS_STEPS > 2)
+ /*step 2 clk*/ {350, 950000},
+#if (MALI_DVFS_STEPS > 3)
+ /*step 3 clk*/ {440, 1025000}
+#endif
+#endif
+#endif
+};
+
+mali_dvfs_staycount_table mali_dvfs_staycount[MALI_DVFS_STEPS]={
+ /*step 0*/{0},
+#if (MALI_DVFS_STEPS > 1)
+ /*step 1*/{0},
+#if (MALI_DVFS_STEPS > 2)
+ /*step 2*/{0},
+#if (MALI_DVFS_STEPS > 3)
+ /*step 3*/{0}
+#endif
+#endif
+#endif
+};
+
+/* dvfs information */
+// L0 = 440Mhz, 1.025V
+// L1 = 350Mhz, 0.95V
+// L2 = 266Mhz, 0.90V
+// L3 = 160Mhz, 0.875V
+
+int step0_clk = 160;
+int step0_vol = 875000;
+#if (MALI_DVFS_STEPS > 1)
+int step1_clk = 266;
+int step1_vol = 900000;
+int step0_up = 70;
+int step1_down = 62;
+#if (MALI_DVFS_STEPS > 2)
+int step2_clk = 350;
+int step2_vol = 950000;
+int step1_up = 90;
+int step2_down = 85;
+#if (MALI_DVFS_STEPS > 3)
+int step3_clk = 440;
+int step3_vol = 1025000;
+int step2_up = 90;
+int step3_down = 90;
+#endif
+#endif
+#endif
+
+mali_dvfs_table mali_dvfs_all[MAX_MALI_DVFS_STEPS]={
+ {160 ,1000000 , 875000},
+ {266 ,1000000 , 900000},
+ {350 ,1000000 , 950000},
+ {440 ,1000000 , 1025000} };
+
+mali_dvfs_table mali_dvfs[MALI_DVFS_STEPS]={
+ {160 ,1000000 , 875000},
+#if (MALI_DVFS_STEPS > 1)
+ {266 ,1000000 , 900000},
+#if (MALI_DVFS_STEPS > 2)
+ {350 ,1000000 , 950000},
+#if (MALI_DVFS_STEPS > 3)
+ {440 ,1000000 ,1025000}
+#endif
+#endif
+#endif
+};
+
+mali_dvfs_threshold_table mali_dvfs_threshold[MALI_DVFS_STEPS]={
+ {0 , 70},
+#if (MALI_DVFS_STEPS > 1)
+ {62 , 90},
+#if (MALI_DVFS_STEPS > 2)
+ {85 , 90},
+#if (MALI_DVFS_STEPS > 3)
+ {90 ,100}
+#endif
+#endif
+#endif
+};
+
+#ifdef EXYNOS4_ASV_ENABLED
+#define ASV_LEVEL 12 /* ASV0, 1, 11 is reserved */
+
+static unsigned int asv_3d_volt_9_table[MALI_DVFS_STEPS][ASV_LEVEL] = {
+ { 950000, 925000, 900000, 900000, 875000, 875000, 875000, 875000, 850000, 850000, 850000, 850000}, /* L3(160Mhz) */
+#if (MALI_DVFS_STEPS > 1)
+ { 975000, 950000, 925000, 925000, 925000, 900000, 900000, 875000, 875000, 875000, 875000, 850000}, /* L2(266Mhz) */
+#if (MALI_DVFS_STEPS > 2)
+ { 1050000, 1025000, 1000000, 1000000, 975000, 950000, 950000, 950000, 925000, 925000, 925000, 900000}, /* L1(350Mhz) */
+#if (MALI_DVFS_STEPS > 3)
+ { 1100000, 1075000, 1050000, 1050000, 1050000, 1025000, 1025000, 1000000, 1000000, 1000000, 975000, 950000}, /* L0(440Mhz) */
+#endif
+#endif
+#endif
+};
+#endif
+
+/*dvfs status*/
+mali_dvfs_currentstatus maliDvfsStatus;
+int mali_dvfs_control=0;
+
+static u32 mali_dvfs_utilization = 255;
+
+static void mali_dvfs_work_handler(struct work_struct *w);
+
+static struct workqueue_struct *mali_dvfs_wq = 0;
+extern mali_io_address clk_register_map;
+extern _mali_osk_lock_t *mali_dvfs_lock;
+
+int mali_runtime_resumed = -1;
+
+static DECLARE_WORK(mali_dvfs_work, mali_dvfs_work_handler);
+
+/* lock/unlock CPU freq by Mali */
+#include <linux/types.h>
+#include <mach/cpufreq.h>
+
+atomic_t mali_cpufreq_lock;
+
+int cpufreq_lock_by_mali(unsigned int freq)
+{
+#ifdef CONFIG_EXYNOS4_CPUFREQ
+/* #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_ARCH_EXYNOS4) */
+ unsigned int level;
+
+ if (atomic_read(&mali_cpufreq_lock) == 0) {
+ if (exynos_cpufreq_get_level(freq * 1000, &level)) {
+ printk(KERN_ERR
+ "Mali: failed to get cpufreq level for %dMHz",
+ freq);
+ return -EINVAL;
+ }
+
+ if (exynos_cpufreq_lock(DVFS_LOCK_ID_G3D, level)) {
+ printk(KERN_ERR
+ "Mali: failed to cpufreq lock for L%d", level);
+ return -EINVAL;
+ }
+
+ atomic_set(&mali_cpufreq_lock, 1);
+ printk(KERN_DEBUG "Mali: cpufreq locked on <%d>%dMHz\n", level,
+ freq);
+ }
+#endif
+ return 0;
+}
+
+void cpufreq_unlock_by_mali(void)
+{
+#ifdef CONFIG_EXYNOS4_CPUFREQ
+/* #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_ARCH_EXYNOS4) */
+ if (atomic_read(&mali_cpufreq_lock) == 1) {
+ exynos_cpufreq_lock_free(DVFS_LOCK_ID_G3D);
+ atomic_set(&mali_cpufreq_lock, 0);
+ printk(KERN_DEBUG "Mali: cpufreq locked off\n");
+ }
+#endif
+}
+
+static unsigned int get_mali_dvfs_status(void)
+{
+ return maliDvfsStatus.currentStep;
+}
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+int get_mali_dvfs_control_status(void)
+{
+ return mali_dvfs_control;
+}
+
+mali_bool set_mali_dvfs_current_step(unsigned int step)
+{
+ _mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+ maliDvfsStatus.currentStep = step % MAX_MALI_DVFS_STEPS;
+ if (step >= MAX_MALI_DVFS_STEPS)
+ mali_runtime_resumed = maliDvfsStatus.currentStep;
+ _mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);
+ return MALI_TRUE;
+}
+#endif
+static mali_bool set_mali_dvfs_status(u32 step,mali_bool boostup)
+{
+ u32 validatedStep=step;
+ int err;
+
+#ifdef CONFIG_REGULATOR
+ if (mali_regulator_get_usecount() == 0) {
+ MALI_DEBUG_PRINT(1, ("regulator use_count is 0 \n"));
+ return MALI_FALSE;
+ }
+#endif
+
+ if (boostup) {
+#ifdef CONFIG_REGULATOR
+ /*change the voltage*/
+ mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+ /*change the clock*/
+ mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+ } else {
+ /*change the clock*/
+ mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+#ifdef CONFIG_REGULATOR
+ /*change the voltage*/
+ mali_regulator_set_voltage(mali_dvfs[step].vol, mali_dvfs[step].vol);
+#endif
+ }
+
+#ifdef EXYNOS4_ASV_ENABLED
+ if (mali_dvfs[step].clock == 160)
+ exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V);
+ else
+ exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_130V);
+#endif
+
+
+ set_mali_dvfs_current_step(validatedStep);
+ /*for future use*/
+ maliDvfsStatus.pCurrentDvfs = &mali_dvfs[validatedStep];
+
+ /* lock/unlock CPU freq by Mali */
+ if (mali_dvfs[step].clock == 440)
+ err = cpufreq_lock_by_mali(1200);
+ else
+ cpufreq_unlock_by_mali();
+
+ return MALI_TRUE;
+}
+
+static void mali_platform_wating(u32 msec)
+{
+ /*sample wating
+ change this in the future with proper check routine.
+ */
+ unsigned int read_val;
+ while(1) {
+ read_val = _mali_osk_mem_ioread32(clk_register_map, 0x00);
+ if ((read_val & 0x8000)==0x0000) break;
+ _mali_osk_time_ubusydelay(100); // 1000 -> 100 : 20101218
+ }
+ /* _mali_osk_time_ubusydelay(msec*1000);*/
+}
+
+static mali_bool change_mali_dvfs_status(u32 step, mali_bool boostup )
+{
+
+ MALI_DEBUG_PRINT(1, ("> change_mali_dvfs_status: %d, %d \n",step, boostup));
+
+ if (!set_mali_dvfs_status(step, boostup)) {
+ MALI_DEBUG_PRINT(1, ("error on set_mali_dvfs_status: %d, %d \n",step, boostup));
+ return MALI_FALSE;
+ }
+
+ /*wait until clock and voltage is stablized*/
+ mali_platform_wating(MALI_DVFS_WATING); /*msec*/
+
+ return MALI_TRUE;
+}
+
+#ifdef EXYNOS4_ASV_ENABLED
+extern unsigned int exynos_result_of_asv;
+
+static mali_bool mali_dvfs_table_update(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ MALI_PRINT((":::exynos_result_of_asv : %d\n", exynos_result_of_asv));
+ mali_dvfs[i].vol = asv_3d_volt_9_table[i][exynos_result_of_asv];
+ MALI_PRINT(("mali_dvfs[%d].vol = %d\n", i, mali_dvfs[i].vol));
+ }
+
+ return MALI_TRUE;
+}
+#endif
+
+static unsigned int decideNextStatus(unsigned int utilization)
+{
+ static unsigned int level = 0; // 0:stay, 1:up
+ static int mali_dvfs_clk = 0;
+
+ if (mali_runtime_resumed >= 0) {
+ level = mali_runtime_resumed;
+ mali_runtime_resumed = -1;
+ return level;
+ }
+
+ if (mali_dvfs_threshold[maliDvfsStatus.currentStep].upthreshold
+ <= mali_dvfs_threshold[maliDvfsStatus.currentStep].downthreshold) {
+ MALI_PRINT(("upthreadshold is smaller than downthreshold: %d < %d\n",
+ mali_dvfs_threshold[maliDvfsStatus.currentStep].upthreshold,
+ mali_dvfs_threshold[maliDvfsStatus.currentStep].downthreshold));
+ return level;
+ }
+
+ if (!mali_dvfs_control && level == maliDvfsStatus.currentStep) {
+ if (utilization > (int)(255 * mali_dvfs_threshold[maliDvfsStatus.currentStep].upthreshold / 100) &&
+ level < MALI_DVFS_STEPS - 1) {
+ level++;
+ }
+ if (utilization < (int)(255 * mali_dvfs_threshold[maliDvfsStatus.currentStep].downthreshold / 100) &&
+ level > 0) {
+ level--;
+ }
+ } else if (mali_dvfs_control == 999) {
+ int i = 0;
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ step[i].clk = mali_dvfs_all[i].clock;
+ }
+#ifdef EXYNOS4_ASV_ENABLED
+ mali_dvfs_table_update();
+#endif
+ i = 0;
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ mali_dvfs[i].clock = step[i].clk;
+ }
+ mali_dvfs_control = 0;
+ level = 0;
+
+ step0_clk = step[0].clk;
+ change_dvfs_tableset(step0_clk, 0);
+#if (MALI_DVFS_STEPS > 1)
+ step1_clk = step[1].clk;
+ change_dvfs_tableset(step1_clk, 1);
+#if (MALI_DVFS_STEPS > 2)
+ step2_clk = step[2].clk;
+ change_dvfs_tableset(step2_clk, 2);
+#if (MALI_DVFS_STEPS > 3)
+ step3_clk = step[3].clk;
+ change_dvfs_tableset(step3_clk, 3);
+#endif
+#endif
+#endif
+ } else if (mali_dvfs_control != mali_dvfs_clk && mali_dvfs_control != 999) {
+ if (mali_dvfs_control < mali_dvfs_all[1].clock && mali_dvfs_control > 0) {
+ int i = 0;
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ step[i].clk = mali_dvfs_all[0].clock;
+ }
+ maliDvfsStatus.currentStep = 0;
+ } else if (mali_dvfs_control < mali_dvfs_all[2].clock && mali_dvfs_control >= mali_dvfs_all[1].clock) {
+ int i = 0;
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ step[i].clk = mali_dvfs_all[1].clock;
+ }
+ maliDvfsStatus.currentStep = 1;
+ } else if (mali_dvfs_control < mali_dvfs_all[3].clock && mali_dvfs_control >= mali_dvfs_all[2].clock) {
+ int i = 0;
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ step[i].clk = mali_dvfs_all[2].clock;
+ }
+ maliDvfsStatus.currentStep = 2;
+ } else {
+ int i = 0;
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ step[i].clk = mali_dvfs_all[3].clock;
+ }
+ maliDvfsStatus.currentStep = 3;
+ }
+ step0_clk = step[0].clk;
+ change_dvfs_tableset(step0_clk, 0);
+#if (MALI_DVFS_STEPS > 1)
+ step1_clk = step[1].clk;
+ change_dvfs_tableset(step1_clk, 1);
+#if (MALI_DVFS_STEPS > 2)
+ step2_clk = step[2].clk;
+ change_dvfs_tableset(step2_clk, 2);
+#if (MALI_DVFS_STEPS > 3)
+ step3_clk = step[3].clk;
+ change_dvfs_tableset(step3_clk, 3);
+#endif
+#endif
+#endif
+ level = maliDvfsStatus.currentStep;
+ }
+
+ mali_dvfs_clk = mali_dvfs_control;
+
+ if (_mali_osk_atomic_read(&bottomlock_status) > 0) {
+ if (level < bottom_lock_step)
+ level = bottom_lock_step;
+ }
+
+ return level;
+}
+
+static mali_bool mali_dvfs_status(u32 utilization)
+{
+ unsigned int nextStatus = 0;
+ unsigned int curStatus = 0;
+ mali_bool boostup = MALI_FALSE;
+ static int stay_count = 0;
+#ifdef EXYNOS4_ASV_ENABLED
+ static mali_bool asv_applied = MALI_FALSE;
+#endif
+
+ MALI_DEBUG_PRINT(1, ("> mali_dvfs_status: %d \n",utilization));
+#ifdef EXYNOS4_ASV_ENABLED
+ if (asv_applied == MALI_FALSE) {
+ mali_dvfs_table_update();
+ change_mali_dvfs_status(1, 0);
+ asv_applied = MALI_TRUE;
+
+ return MALI_TRUE;
+ }
+#endif
+
+ /*decide next step*/
+ curStatus = get_mali_dvfs_status();
+ nextStatus = decideNextStatus(utilization);
+
+ MALI_DEBUG_PRINT(1, ("= curStatus %d, nextStatus %d, maliDvfsStatus.currentStep %d \n", curStatus, nextStatus, maliDvfsStatus.currentStep));
+
+ /*if next status is same with current status, don't change anything*/
+ if ((curStatus != nextStatus && stay_count == 0)) {
+ /*check if boost up or not*/
+ if (nextStatus > maliDvfsStatus.currentStep) boostup = 1;
+
+ /*change mali dvfs status*/
+ if (!change_mali_dvfs_status(nextStatus,boostup)) {
+ MALI_DEBUG_PRINT(1, ("error on change_mali_dvfs_status \n"));
+ return MALI_FALSE;
+ }
+ stay_count = mali_dvfs_staycount[maliDvfsStatus.currentStep].staycount;
+ } else {
+ if (stay_count > 0)
+ stay_count--;
+ }
+
+ return MALI_TRUE;
+}
+
+
+
+int mali_dvfs_is_running(void)
+{
+ return bMaliDvfsRun;
+
+}
+
+
+
+void mali_dvfs_late_resume(void)
+{
+ // set the init clock as low when resume
+ set_mali_dvfs_status(0,0);
+}
+
+
+static void mali_dvfs_work_handler(struct work_struct *w)
+{
+ int change_clk = 0;
+ int change_step = 0;
+ bMaliDvfsRun=1;
+
+ /* dvfs table change when clock was changed */
+ if (step0_clk != mali_dvfs[0].clock) {
+ MALI_PRINT(("::: step0_clk change to %d Mhz\n", step0_clk));
+ change_clk = step0_clk;
+ change_step = 0;
+ step0_clk = change_dvfs_tableset(change_clk, change_step);
+ }
+#if (MALI_DVFS_STEPS > 1)
+ if (step1_clk != mali_dvfs[1].clock) {
+ MALI_PRINT(("::: step1_clk change to %d Mhz\n", step1_clk));
+ change_clk = step1_clk;
+ change_step = 1;
+ step1_clk = change_dvfs_tableset(change_clk, change_step);
+ }
+ if (step0_up != mali_dvfs_threshold[0].upthreshold) {
+ MALI_PRINT(("::: step0_up change to %d %\n", step0_up));
+ mali_dvfs_threshold[0].upthreshold = step0_up;
+ }
+ if (step1_down != mali_dvfs_threshold[1].downthreshold) {
+ MALI_PRINT((":::step1_down change to %d %\n", step1_down));
+ mali_dvfs_threshold[1].downthreshold = step1_down;
+ }
+#if (MALI_DVFS_STEPS > 2)
+ if (step2_clk != mali_dvfs[2].clock) {
+ MALI_PRINT(("::: step2_clk change to %d Mhz\n", step2_clk));
+ change_clk = step2_clk;
+ change_step = 2;
+ step2_clk = change_dvfs_tableset(change_clk, change_step);
+ }
+ if (step1_up != mali_dvfs_threshold[1].upthreshold) {
+ MALI_PRINT((":::step1_up change to %d %\n", step1_up));
+ mali_dvfs_threshold[1].upthreshold = step1_up;
+ }
+ if (step2_down != mali_dvfs_threshold[2].downthreshold) {
+ MALI_PRINT((":::step2_down change to %d %\n", step2_down));
+ mali_dvfs_threshold[2].downthreshold = step2_down;
+ }
+#if (MALI_DVFS_STEPS > 3)
+ if (step3_clk != mali_dvfs[3].clock) {
+ MALI_PRINT(("::: step3_clk change to %d Mhz\n", step3_clk));
+ change_clk = step3_clk;
+ change_step = 3;
+ step3_clk = change_dvfs_tableset(change_clk, change_step);
+ }
+ if (step2_up != mali_dvfs_threshold[2].upthreshold) {
+ MALI_PRINT((":::step2_up change to %d %\n", step2_up));
+ mali_dvfs_threshold[2].upthreshold = step2_up;
+ }
+ if (step3_down != mali_dvfs_threshold[3].downthreshold) {
+ MALI_PRINT((":::step3_down change to %d %\n", step3_down));
+ mali_dvfs_threshold[3].downthreshold = step3_down;
+ }
+#endif
+#endif
+#endif
+
+
+#ifdef DEBUG
+ mali_dvfs[0].vol = step0_vol;
+ mali_dvfs[1].vol = step1_vol;
+ mali_dvfs[2].vol = step2_vol;
+ mali_dvfs[3].vol = step3_vol;
+#endif
+ MALI_DEBUG_PRINT(3, ("=== mali_dvfs_work_handler\n"));
+
+ if (!mali_dvfs_status(mali_dvfs_utilization))
+ MALI_DEBUG_PRINT(1,( "error on mali dvfs status in mali_dvfs_work_handler"));
+
+ bMaliDvfsRun=0;
+}
+
+mali_bool init_mali_dvfs_status(int step)
+{
+ /*default status
+ add here with the right function to get initilization value.
+ */
+ if (!mali_dvfs_wq)
+ mali_dvfs_wq = create_singlethread_workqueue("mali_dvfs");
+
+ _mali_osk_atomic_init(&bottomlock_status, 0);
+
+ /*add a error handling here*/
+ set_mali_dvfs_current_step(step);
+
+ return MALI_TRUE;
+}
+
+void deinit_mali_dvfs_status(void)
+{
+ if (mali_dvfs_wq)
+ destroy_workqueue(mali_dvfs_wq);
+
+ _mali_osk_atomic_term(&bottomlock_status);
+
+ mali_dvfs_wq = NULL;
+}
+
+mali_bool mali_dvfs_handler(u32 utilization)
+{
+ mali_dvfs_utilization = utilization;
+ queue_work_on(0, mali_dvfs_wq,&mali_dvfs_work);
+
+ /*add error handle here*/
+ return MALI_TRUE;
+}
+
+int change_dvfs_tableset(int change_clk, int change_step)
+{
+ int err;
+
+ if (change_clk < mali_dvfs_all[1].clock) {
+ mali_dvfs[change_step].clock = mali_dvfs_all[0].clock;
+ } else if (change_clk < mali_dvfs_all[2].clock && change_clk >= mali_dvfs_all[1].clock) {
+ mali_dvfs[change_step].clock = mali_dvfs_all[1].clock;
+ } else if (change_clk < mali_dvfs_all[3].clock && change_clk >= mali_dvfs_all[2].clock) {
+ mali_dvfs[change_step].clock = mali_dvfs_all[2].clock;
+ } else {
+ mali_dvfs[change_step].clock = mali_dvfs_all[3].clock;
+ }
+
+ MALI_PRINT((":::mali dvfs step %d clock and voltage = %d Mhz, %d V\n",change_step, mali_dvfs[change_step].clock, mali_dvfs[change_step].vol));
+
+ if (maliDvfsStatus.currentStep == change_step) {
+#ifdef CONFIG_REGULATOR
+ /*change the voltage*/
+ mali_regulator_set_voltage(mali_dvfs[change_step].vol, mali_dvfs[change_step].vol);
+#endif
+ /*change the clock*/
+ mali_clk_set_rate(mali_dvfs[change_step].clock, mali_dvfs[change_step].freq);
+
+ /* lock/unlock CPU freq by Mali */
+ if (mali_dvfs[change_step].clock == 440)
+ err = cpufreq_lock_by_mali(1200);
+ else
+ cpufreq_unlock_by_mali();
+ }
+
+ return mali_dvfs[change_step].clock;
+}
+
+void mali_default_step_set(int step, mali_bool boostup)
+{
+ mali_clk_set_rate(mali_dvfs[step].clock, mali_dvfs[step].freq);
+
+ if (maliDvfsStatus.currentStep == 1)
+ set_mali_dvfs_status(step, boostup);
+}
+
+int mali_dvfs_bottom_lock_push(int lock_step)
+{
+ int prev_status = _mali_osk_atomic_read(&bottomlock_status);
+
+ if (prev_status < 0) {
+ MALI_PRINT(("gpu bottom lock status is not valid for push\n"));
+ return -1;
+ }
+ if (bottom_lock_step < lock_step) {
+ bottom_lock_step = lock_step;
+ if (get_mali_dvfs_status() < lock_step) {
+ mali_regulator_set_voltage(mali_dvfs[lock_step].vol,
+ mali_dvfs[lock_step].vol);
+ mali_clk_set_rate(mali_dvfs[lock_step].clock,
+ mali_dvfs[lock_step].freq);
+ set_mali_dvfs_current_step(lock_step);
+ }
+ }
+ return _mali_osk_atomic_inc_return(&bottomlock_status);
+}
+
+int mali_dvfs_bottom_lock_pop(void)
+{
+ int prev_status = _mali_osk_atomic_read(&bottomlock_status);
+ if (prev_status <= 0) {
+ MALI_PRINT(("gpu bottom lock status is not valid for pop\n"));
+ return -1;
+ } else if (prev_status == 1) {
+ bottom_lock_step = 0;
+ MALI_PRINT(("gpu bottom lock release\n"));
+ }
+
+ return _mali_osk_atomic_dec_return(&bottomlock_status);
+}
+
+#if MALI_VOLTAGE_LOCK
+int mali_vol_get_from_table(int vol)
+{
+ int i;
+ for (i = 0; i < MALI_DVFS_STEPS; i++) {
+ if (mali_dvfs[i].vol >= vol)
+ return mali_dvfs[i].vol;
+ }
+ MALI_PRINT(("Failed to get voltage from mali_dvfs table, maximum voltage is %d uV\n", mali_dvfs[MALI_DVFS_STEPS-1].vol));
+ return 0;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/regs/mali_200_regs.h b/drivers/media/video/samsung/mali/regs/mali_200_regs.h
new file mode 100644
index 0000000..e9da7ab
--- /dev/null
+++ b/drivers/media/video/samsung/mali/regs/mali_200_regs.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALI200_REGS_H_
+#define _MALI200_REGS_H_
+
+/**
+ * Enum for management register addresses.
+ */
+enum mali200_mgmt_reg
+{
+ MALI200_REG_ADDR_MGMT_VERSION = 0x1000,
+ MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x1004,
+ MALI200_REG_ADDR_MGMT_STATUS = 0x1008,
+ MALI200_REG_ADDR_MGMT_CTRL_MGMT = 0x100c,
+
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x1020,
+ MALI200_REG_ADDR_MGMT_INT_CLEAR = 0x1024,
+ MALI200_REG_ADDR_MGMT_INT_MASK = 0x1028,
+ MALI200_REG_ADDR_MGMT_INT_STATUS = 0x102c,
+
+ MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW = 0x1044,
+
+ MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x1050,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x1080,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x1084,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x108c,
+
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x10a0,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x10a4,
+ MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x10ac,
+
+ MALI200_REG_SIZEOF_REGISTER_BANK = 0x10f0
+
+};
+
+#define MALI200_REG_VAL_PERF_CNT_ENABLE 1
+
+enum mali200_mgmt_ctrl_mgmt {
+ MALI200_REG_VAL_CTRL_MGMT_STOP_BUS = (1<<0),
+#if defined(USING_MALI200)
+ MALI200_REG_VAL_CTRL_MGMT_FLUSH_CACHES = (1<<3),
+#endif
+ MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET = (1<<5),
+ MALI200_REG_VAL_CTRL_MGMT_START_RENDERING = (1<<6),
+#if defined(USING_MALI400)
+ MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET = (1<<7),
+#endif
+};
+
+enum mali200_mgmt_irq {
+ MALI200_REG_VAL_IRQ_END_OF_FRAME = (1<<0),
+ MALI200_REG_VAL_IRQ_END_OF_TILE = (1<<1),
+ MALI200_REG_VAL_IRQ_HANG = (1<<2),
+ MALI200_REG_VAL_IRQ_FORCE_HANG = (1<<3),
+ MALI200_REG_VAL_IRQ_BUS_ERROR = (1<<4),
+ MALI200_REG_VAL_IRQ_BUS_STOP = (1<<5),
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT = (1<<6),
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT = (1<<7),
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR = (1<<8),
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND = (1<<9),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW = (1<<10),
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW = (1<<11),
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED = (1<<12),
+};
+
+#if defined USING_MALI200
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined USING_MALI400
+#define MALI200_REG_VAL_IRQ_MASK_ALL ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_END_OF_TILE |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_CNT_0_LIMIT |\
+ MALI200_REG_VAL_IRQ_CNT_1_LIMIT |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW |\
+ MALI400PP_REG_VAL_IRQ_RESET_COMPLETED))
+#else
+#error "No supported mali core defined"
+#endif
+
+#if defined USING_MALI200
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR))
+#elif defined USING_MALI400
+#define MALI200_REG_VAL_IRQ_MASK_USED ((enum mali200_mgmt_irq) (\
+ MALI200_REG_VAL_IRQ_END_OF_FRAME |\
+ MALI200_REG_VAL_IRQ_HANG |\
+ MALI200_REG_VAL_IRQ_FORCE_HANG |\
+ MALI200_REG_VAL_IRQ_BUS_ERROR |\
+ MALI200_REG_VAL_IRQ_BUS_STOP |\
+ MALI200_REG_VAL_IRQ_WRITE_BOUNDARY_ERROR |\
+ MALI400PP_REG_VAL_IRQ_INVALID_PLIST_COMMAND |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_UNDERFLOW |\
+ MALI400PP_REG_VAL_IRQ_CALL_STACK_OVERFLOW))
+#else
+#error "No supported mali core defined"
+#endif
+
+#define MALI200_REG_VAL_IRQ_MASK_NONE ((enum mali200_mgmt_irq)(0))
+
+enum mali200_mgmt_status {
+ MALI200_REG_VAL_STATUS_RENDERING_ACTIVE = (1<<0),
+ MALI200_REG_VAL_STATUS_BUS_STOPPED = (1<<4),
+};
+
+enum mali200_render_unit
+{
+ MALI200_REG_ADDR_FRAME = 0x0000,
+};
+
+#if defined USING_MALI200
+#define MALI200_NUM_REGS_FRAME ((0x04C/4)+1)
+#elif defined USING_MALI400
+#define MALI200_NUM_REGS_FRAME ((0x058/4)+1)
+#else
+#error "No supported mali core defined"
+#endif
+
+enum mali200_wb_unit {
+ MALI200_REG_ADDR_WB0 = 0x0100,
+ MALI200_REG_ADDR_WB1 = 0x0200,
+ MALI200_REG_ADDR_WB2 = 0x0300
+};
+
+/** The number of registers in one single writeback unit */
+#ifndef MALI200_NUM_REGS_WBx
+#define MALI200_NUM_REGS_WBx ((0x02C/4)+1)
+#endif
+
+/* This should be in the top 16 bit of the version register of Mali PP */
+#if defined USING_MALI200
+#define MALI_PP_PRODUCT_ID 0xC807
+#elif defined USING_MALI400
+#define MALI300_PP_PRODUCT_ID 0xCE07
+#define MALI400_PP_PRODUCT_ID 0xCD07
+#define MALI_PP_PRODUCT_ID MALI400_PP_PRODUCT_ID
+#else
+#error "No supported mali core defined"
+#endif
+
+
+#endif /* _MALI200_REGS_H_ */
diff --git a/drivers/media/video/samsung/mali/regs/mali_gp_regs.h b/drivers/media/video/samsung/mali/regs/mali_gp_regs.h
new file mode 100644
index 0000000..14719a3
--- /dev/null
+++ b/drivers/media/video/samsung/mali/regs/mali_gp_regs.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _MALIGP2_CONROL_REGS_H_
+#define _MALIGP2_CONROL_REGS_H_
+
+/**
+ * These are the different geometry processor controll registers.
+ * Their usage is to control and monitor the operation of the
+ * Vertex Shader and the Polygon List Builer in the geometry processor.
+ * Addresses are in 32-bit word relative sizes.
+ * @see [P0081] "Geometry Processor Data Structures" for details
+ */
+
+typedef enum {
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR = 0x00,
+ MALIGP2_REG_ADDR_MGMT_VSCL_END_ADDR = 0x04,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_START_ADDR = 0x08,
+ MALIGP2_REG_ADDR_MGMT_PLBUCL_END_ADDR = 0x0c,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR = 0x10,
+ MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR = 0x14,
+ MALIGP2_REG_ADDR_MGMT_CMD = 0x20,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT = 0x24,
+ MALIGP2_REG_ADDR_MGMT_INT_CLEAR = 0x28,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK = 0x2C,
+ MALIGP2_REG_ADDR_MGMT_INT_STAT = 0x30,
+ MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW = 0x34,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x3C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x40,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x44,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x48,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x4C,
+ MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x50,
+ MALIGP2_REG_ADDR_MGMT_STATUS = 0x68,
+ MALIGP2_REG_ADDR_MGMT_VERSION = 0x6C,
+ MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR_READ = 0x80,
+ MALIGP2_REG_ADDR_MGMT_PLBCL_START_ADDR_READ = 0x84,
+ MALIGP2_CONTR_AXI_BUS_ERROR_STAT = 0x94,
+ MALIGP2_REGISTER_ADDRESS_SPACE_SIZE = 0x98,
+} maligp_reg_addr_mgmt_addr;
+
+#define MALIGP2_REG_VAL_PERF_CNT_ENABLE 1
+
+/**
+ * Commands to geometry processor.
+ * @see MALIGP2_CTRL_REG_CMD
+ */
+typedef enum
+{
+ MALIGP2_REG_VAL_CMD_START_VS = (1<< 0),
+ MALIGP2_REG_VAL_CMD_START_PLBU = (1<< 1),
+ MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC = (1<< 4),
+ MALIGP2_REG_VAL_CMD_RESET = (1<< 5),
+ MALIGP2_REG_VAL_CMD_FORCE_HANG = (1<< 6),
+ MALIGP2_REG_VAL_CMD_STOP_BUS = (1<< 9),
+#if defined(USING_MALI400)
+ MALI400GP_REG_VAL_CMD_SOFT_RESET = (1<<10),
+#endif
+} mgp_contr_reg_val_cmd;
+
+
+/** @defgroup MALIGP2_IRQ
+ * Interrupt status of geometry processor.
+ * @see MALIGP2_CTRL_REG_INT_RAWSTAT, MALIGP2_REG_ADDR_MGMT_INT_CLEAR,
+ * MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_ADDR_MGMT_INT_STAT
+ * @{
+ */
+#define MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST (1 << 0)
+#define MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST (1 << 1)
+#define MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM (1 << 2)
+#define MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ (1 << 3)
+#define MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ (1 << 4)
+#define MALIGP2_REG_VAL_IRQ_HANG (1 << 5)
+#define MALIGP2_REG_VAL_IRQ_FORCE_HANG (1 << 6)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT (1 << 7)
+#define MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT (1 << 8)
+#define MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR (1 << 9)
+#define MALIGP2_REG_VAL_IRQ_SYNC_ERROR (1 << 10)
+#define MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR (1 << 11)
+#if defined USING_MALI400
+#define MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED (1 << 12)
+#define MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD (1 << 13)
+#define MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD (1 << 14)
+#define MALI400GP_REG_VAL_IRQ_RESET_COMPLETED (1 << 19)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW (1 << 20)
+#define MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW (1 << 21)
+#define MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS (1 << 22)
+#elif !defined USING_MALI200
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining all IRQs in MaliGP2 */
+#if defined USING_MALI200
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined USING_MALI400
+#define MALIGP2_REG_VAL_IRQ_MASK_ALL \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_VS_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_PLBU_SEM_IRQ | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_0_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_PERF_CNT_1_LIMIT | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_AXI_BUS_STOPPED | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_RESET_COMPLETED | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining the IRQs in MaliGP2 which we use*/
+#if defined USING_MALI200
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR)
+#elif defined USING_MALI400
+#define MALIGP2_REG_VAL_IRQ_MASK_USED \
+ (\
+ MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | \
+ MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | \
+ MALIGP2_REG_VAL_IRQ_HANG | \
+ MALIGP2_REG_VAL_IRQ_FORCE_HANG | \
+ MALIGP2_REG_VAL_IRQ_WRITE_BOUND_ERR | \
+ MALIGP2_REG_VAL_IRQ_SYNC_ERROR | \
+ MALIGP2_REG_VAL_IRQ_AXI_BUS_ERROR | \
+ MALI400GP_REG_VAL_IRQ_VS_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_PLB_INVALID_CMD | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_UNDERFLOW | \
+ MALI400GP_REG_VAL_IRQ_SEMAPHORE_OVERFLOW | \
+ MALI400GP_REG_VAL_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+#else
+#error "No supported mali core defined"
+#endif
+
+/* Mask defining non IRQs on MaliGP2*/
+#define MALIGP2_REG_VAL_IRQ_MASK_NONE 0
+
+/** }@ defgroup MALIGP2_IRQ*/
+
+/** @defgroup MALIGP2_STATUS
+ * The different Status values to the geometry processor.
+ * @see MALIGP2_CTRL_REG_STATUS
+ * @{
+ */
+#define MALIGP2_REG_VAL_STATUS_VS_ACTIVE 0x0002
+#define MALIGP2_REG_VAL_STATUS_BUS_STOPPED 0x0004
+#define MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE 0x0008
+#define MALIGP2_REG_VAL_STATUS_BUS_ERROR 0x0040
+#define MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR 0x0100
+/** }@ defgroup MALIGP2_STATUS*/
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ACTIVE (\
+ MALIGP2_REG_VAL_STATUS_VS_ACTIVE|\
+ MALIGP2_REG_VAL_STATUS_PLBU_ACTIVE)
+
+
+#define MALIGP2_REG_VAL_STATUS_MASK_ERROR (\
+ MALIGP2_REG_VAL_STATUS_BUS_ERROR |\
+ MALIGP2_REG_VAL_STATUS_WRITE_BOUND_ERR )
+
+/* This should be in the top 16 bit of the version register of gp.*/
+#if defined(USING_MALI200)
+#define MALI_GP_PRODUCT_ID 0xA07
+#elif defined(USING_MALI400)
+#define MALI300_GP_PRODUCT_ID 0xC07
+#define MALI400_GP_PRODUCT_ID 0xB07
+#define MALI_GP_PRODUCT_ID MALI400_GP_PRODUCT_ID
+#else
+#error "No supported mali core defined"
+#endif
+
+/**
+ * The different sources for instrumented on the geometry processor.
+ * @see MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC
+ */
+
+enum MALIGP2_cont_reg_perf_cnt_src {
+ MALIGP2_REG_VAL_PERF_CNT1_SRC_NUMBER_OF_VERTICES_PROCESSED = 0x0a,
+};
+
+#endif
diff --git a/drivers/media/video/samsung/mali/timestamp-arm11-cc/mali_timestamp.c b/drivers/media/video/samsung/mali/timestamp-arm11-cc/mali_timestamp.c
new file mode 100644
index 0000000..cddfa58
--- /dev/null
+++ b/drivers/media/video/samsung/mali/timestamp-arm11-cc/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/media/video/samsung/mali/timestamp-arm11-cc/mali_timestamp.h b/drivers/media/video/samsung/mali/timestamp-arm11-cc/mali_timestamp.h
new file mode 100644
index 0000000..442c6e0
--- /dev/null
+++ b/drivers/media/video/samsung/mali/timestamp-arm11-cc/mali_timestamp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ /*
+ * reset counters and overflow flags
+ */
+
+ u32 mask = (1 << 0) | /* enable all three counters */
+ (0 << 1) | /* reset both Count Registers to 0x0 */
+ (1 << 2) | /* reset the Cycle Counter Register to 0x0 */
+ (0 << 3) | /* 1 = Cycle Counter Register counts every 64th processor clock cycle */
+ (0 << 4) | /* Count Register 0 interrupt enable */
+ (0 << 5) | /* Count Register 1 interrupt enable */
+ (0 << 6) | /* Cycle Counter interrupt enable */
+ (0 << 8) | /* Count Register 0 overflow flag (clear or write, flag on read) */
+ (0 << 9) | /* Count Register 1 overflow flag (clear or write, flag on read) */
+ (1 << 10); /* Cycle Counter Register overflow flag (clear or write, flag on read) */
+
+ __asm__ __volatile__ ("MCR p15, 0, %0, c15, c12, 0" : : "r" (mask) );
+
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ u32 result;
+
+ /* this is for the clock cycles */
+ __asm__ __volatile__ ("MRC p15, 0, %0, c15, c12, 1" : "=r" (result));
+
+ return (u64)result;
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/media/video/samsung/mali/timestamp-default/mali_timestamp.c b/drivers/media/video/samsung/mali/timestamp-default/mali_timestamp.c
new file mode 100644
index 0000000..cddfa58
--- /dev/null
+++ b/drivers/media/video/samsung/mali/timestamp-default/mali_timestamp.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_timestamp.h"
+
+/* This file is intentionally left empty, as all functions are inlined in mali_profiling_sampler.h */
diff --git a/drivers/media/video/samsung/mali/timestamp-default/mali_timestamp.h b/drivers/media/video/samsung/mali/timestamp-default/mali_timestamp.h
new file mode 100644
index 0000000..470eac9
--- /dev/null
+++ b/drivers/media/video/samsung/mali/timestamp-default/mali_timestamp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_TIMESTAMP_H__
+#define __MALI_TIMESTAMP_H__
+
+#include "mali_osk.h"
+
+MALI_STATIC_INLINE _mali_osk_errcode_t _mali_timestamp_reset(void)
+{
+ return _MALI_OSK_ERR_OK;
+}
+
+MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
+{
+ return _mali_osk_time_get_ns();
+}
+
+#endif /* __MALI_TIMESTAMP_H__ */
diff --git a/drivers/media/video/samsung/mfc5x/Kconfig b/drivers/media/video/samsung/mfc5x/Kconfig
new file mode 100644
index 0000000..0ff8a60
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/Kconfig
@@ -0,0 +1,39 @@
+#
+# Configuration for Multi Format Codecs (MFC)
+#
+config VIDEO_MFC5X
+ bool "Samsung MFC (Multi Format Codec - FIMV 5.x) Driver"
+ depends on USE_LEGACY_MFC
+ default n
+ ---help---
+ This is a Samsung Multi Format Codecs (MFC) FIMV V5.x
+
+config VIDEO_MFC_MAX_INSTANCE
+ int "Maximum size of MFC instance (1-4)"
+ range 1 4
+ depends on VIDEO_MFC5X
+ default 4
+
+config VIDEO_MFC_MEM_PORT_COUNT
+ int "Default number of MFC memory ports (1-2)"
+ range 1 2
+ depends on VIDEO_MFC5X && (!EXYNOS_CONTENT_PATH_PROTECTION)
+ default 2
+
+config VIDEO_MFC_VCM_UMP
+ bool "Support UMP over VCM for MFC"
+ depends on VIDEO_MFC5X && VCM_MMU && VIDEO_UMP
+ default y
+
+config VIDEO_MFC5X_DEBUG
+ bool "MFC driver debug message"
+ depends on VIDEO_MFC5X
+ default n
+
+config VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN
+ bool "4k align for chroma and luma size in dec"
+ depends on VIDEO_MFC5X && SLP
+ ---help---
+ To use physical address on the gem interface.
+
+
diff --git a/drivers/media/video/samsung/mfc5x/Makefile b/drivers/media/video/samsung/mfc5x/Makefile
new file mode 100644
index 0000000..eee420e
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/Makefile
@@ -0,0 +1,19 @@
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_dev.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_dec.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_enc.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_inst.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_cmd.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_shm.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_reg.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_buf.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_pm.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_ctrl.o
+obj-$(CONFIG_VIDEO_MFC5X) += mfc_mem.o
+
+ifeq ($(CONFIG_VIDEO_MFC5X_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+ifeq ($(CONFIG_VIDEO_MFC_VCM_UMP),y)
+EXTRA_CFLAGS += -Idrivers/media/video/samsung/ump/include
+endif
diff --git a/drivers/media/video/samsung/mfc5x/SsbSipMfcApi.h b/drivers/media/video/samsung/mfc5x/SsbSipMfcApi.h
new file mode 100644
index 0000000..495f1ba
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/SsbSipMfcApi.h
@@ -0,0 +1,435 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * user interface header for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Alternatively, Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _SSBSIP_MFC_API_H_
+#define _SSBSIP_MFC_API_H_
+
+/*--------------------------------------------------------------------------------*/
+/* Definition */
+/*--------------------------------------------------------------------------------*/
+#define MAX_DECODER_INPUT_BUFFER_SIZE (1024 * 3072)
+#define MAX_ENCODER_OUTPUT_BUFFER_SIZE (1024 * 3072)
+
+#define SUPPORT_1080P 1
+
+#if SUPPORT_1080P
+#define MMAP_BUFFER_SIZE_MMAP (70*1024*1024) /* only C110 use this value. in C210, memory size is decided in menuconfig*/
+#else
+#define MMAP_BUFFER_SIZE_MMAP (62*1024*1024)
+#endif
+
+#define SAMSUNG_MFC_DEV_NAME "/dev/s3c-mfc"
+
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+#define SUPPORT_SLICE_ENCODING 1
+#else
+#define SUPPORT_SLICE_ENCODING 0
+#endif
+
+/*--------------------------------------------------------------------------------*/
+/* Structure and Type */
+/*--------------------------------------------------------------------------------*/
+typedef enum {
+ H264_DEC,
+ VC1_DEC, /* VC1 advaced Profile decoding */
+ MPEG4_DEC,
+ XVID_DEC,
+ MPEG1_DEC,
+ MPEG2_DEC,
+ H263_DEC,
+ VC1RCV_DEC, /* VC1 simple/main profile decoding */
+ FIMV1_DEC,
+ FIMV2_DEC,
+ FIMV3_DEC,
+ FIMV4_DEC,
+ H264_ENC,
+ MPEG4_ENC,
+ H263_ENC,
+ UNKNOWN_TYPE
+} SSBSIP_MFC_CODEC_TYPE;
+
+typedef enum {
+ DONT_CARE = 0,
+ I_FRAME = 1,
+ NOT_CODED = 2
+} SSBSIP_MFC_FORCE_SET_FRAME_TYPE;
+
+typedef enum {
+ NV12_LINEAR = 0,
+ NV12_TILE,
+ NV21_LINEAR,
+} SSBSIP_MFC_INSTRM_MODE_TYPE;
+
+#if SUPPORT_SLICE_ENCODING
+typedef enum {
+ FRAME = 0,
+ SLICE = 1,
+} SSBSIP_MFC_OUTSTRM_MODE_TYPE;
+#endif
+
+typedef enum {
+ NO_CACHE = 0,
+ CACHE = 1
+} SSBIP_MFC_BUFFER_TYPE;
+
+typedef enum {
+ MFC_DEC_SETCONF_POST_ENABLE = 1,
+ MFC_DEC_SETCONF_EXTRA_BUFFER_NUM,
+ MFC_DEC_SETCONF_DISPLAY_DELAY,
+ MFC_DEC_SETCONF_IS_LAST_FRAME,
+ MFC_DEC_SETCONF_SLICE_ENABLE,
+ MFC_DEC_SETCONF_CRC_ENABLE,
+ MFC_DEC_SETCONF_FIMV1_WIDTH_HEIGHT,
+ MFC_DEC_SETCONF_FRAME_TAG,
+ MFC_DEC_GETCONF_CRC_DATA,
+ MFC_DEC_GETCONF_BUF_WIDTH_HEIGHT,
+ MFC_DEC_GETCONF_CROP_INFO,
+ MFC_DEC_GETCONF_FRAME_TAG,
+
+ /* C210 specific feature */
+ MFC_DEC_SETCONF_IMMEDIATELY_DISPLAY,
+ MFC_DEC_SETCONF_DPB_FLUSH,
+ MFC_DEC_SETCONF_PIXEL_CACHE,
+ MFC_DEC_GETCONF_WIDTH_HEIGHT,
+
+ MFC_DEC_SETCONF_SEI_PARSE,
+ MFC_DEC_GETCONF_FRAME_PACKING,
+} SSBSIP_MFC_DEC_CONF;
+
+typedef enum {
+ MFC_ENC_SETCONF_FRAME_TYPE = 100,
+ MFC_ENC_SETCONF_CHANGE_FRAME_RATE,
+ MFC_ENC_SETCONF_CHANGE_BIT_RATE,
+ MFC_ENC_SETCONF_FRAME_TAG,
+ MFC_ENC_SETCONF_ALLOW_FRAME_SKIP,
+ MFC_ENC_GETCONF_FRAME_TAG,
+
+ /* C210 specific feature */
+ MFC_ENC_SETCONF_VUI_INFO,
+ MFC_ENC_SETCONF_I_PERIOD,
+ MFC_ENC_SETCONF_HIER_P,
+
+ MFC_ENC_SETCONF_SEI_GEN,
+ MFC_ENC_SETCONF_FRAME_PACKING,
+} SSBSIP_MFC_ENC_CONF;
+
+typedef enum {
+ MFC_GETOUTBUF_STATUS_NULL = 0,
+ MFC_GETOUTBUF_DECODING_ONLY = 1,
+ MFC_GETOUTBUF_DISPLAY_DECODING,
+ MFC_GETOUTBUF_DISPLAY_ONLY,
+ MFC_GETOUTBUF_DISPLAY_END,
+ MFC_GETOUTBUF_CHANGE_RESOL
+} SSBSIP_MFC_DEC_OUTBUF_STATUS;
+
+typedef enum {
+ MFC_FRAME_TYPE_NOT_CODED,
+ MFC_FRAME_TYPE_I_FRAME,
+ MFC_FRAME_TYPE_P_FRAME,
+ MFC_FRAME_TYPE_B_FRAME,
+ MFC_FRAME_TYPE_OTHERS
+} SSBSIP_MFC_FRAME_TYPE;
+
+typedef enum {
+ MFC_RET_OK = 1,
+ MFC_RET_FAIL = -1000,
+ MFC_RET_OPEN_FAIL = -1001,
+ MFC_RET_CLOSE_FAIL = -1002,
+
+ MFC_RET_DEC_INIT_FAIL = -2000,
+ MFC_RET_DEC_EXE_TIME_OUT = -2001,
+ MFC_RET_DEC_EXE_ERR = -2002,
+ MFC_RET_DEC_GET_INBUF_FAIL = -2003,
+ MFC_RET_DEC_SET_INBUF_FAIL = -2004,
+ MFC_RET_DEC_GET_OUTBUF_FAIL = -2005,
+ MFC_RET_DEC_GET_CONF_FAIL = -2006,
+ MFC_RET_DEC_SET_CONF_FAIL = -2007,
+
+ MFC_RET_ENC_INIT_FAIL = -3000,
+ MFC_RET_ENC_EXE_TIME_OUT = -3001,
+ MFC_RET_ENC_EXE_ERR = -3002,
+ MFC_RET_ENC_GET_INBUF_FAIL = -3003,
+ MFC_RET_ENC_SET_INBUF_FAIL = -3004,
+ MFC_RET_ENC_GET_OUTBUF_FAIL = -3005,
+ MFC_RET_ENC_SET_OUTBUF_FAIL = -3006,
+ MFC_RET_ENC_GET_CONF_FAIL = -3007,
+ MFC_RET_ENC_SET_CONF_FAIL = -3008,
+
+ MFC_RET_INVALID_PARAM = -4000
+} SSBSIP_MFC_ERROR_CODE;
+
+typedef struct {
+ void *YPhyAddr; /* [OUT] physical address of Y */
+ void *CPhyAddr; /* [OUT] physical address of CbCr */
+ void *YVirAddr; /* [OUT] virtual address of Y */
+ void *CVirAddr; /* [OUT] virtual address of CbCr */
+
+ int img_width; /* [OUT] width of real image */
+ int img_height; /* [OUT] height of real image */
+ int buf_width; /* [OUT] width aligned to 16 */
+ int buf_height; /* [OUT] height alighed to 16 */
+
+ int timestamp_top; /* [OUT] timestamp of top filed(This is used for interlaced stream) */
+ int timestamp_bottom; /* [OUT] timestamp of bottom filed(This is used for interlaced stream) */
+ int consumedByte; /* [OUT] the number of byte consumed during decoding */
+ int res_change; /* [OUT] whether resolution is changed or not. 0: not change, 1: increased, 2: decreased */
+ int crop_top_offset; /* [OUT] crop information, top_offset */
+ int crop_bottom_offset; /* [OUT] crop information, bottom_offset */
+ int crop_left_offset; /* [OUT] crop information, left_offset */
+ int crop_right_offset; /* [OUT] crop information, right_offset */
+ int disp_pic_frame_type; /* [OUT] display picture frame type information */
+
+ /* C210 UMP feature */
+ unsigned int y_cookie; /* [OUT] cookie for Y address */
+ unsigned int c_cookie; /* [OUT] cookie for CbCr address, If it is 0, Y and CbCr is in continous memory */
+} SSBSIP_MFC_DEC_OUTPUT_INFO;
+
+typedef struct {
+ void *YPhyAddr; /* [IN/OUT] physical address of Y */
+ void *CPhyAddr; /* [IN/OUT] physical address of CbCr */
+ void *YVirAddr; /* [IN/OUT] virtual address of Y */
+ void *CVirAddr; /* [IN/OUT] virtual address of CbCr */
+ int YSize; /* [IN/OUT] input size of Y data */
+ int CSize; /* [IN/OUT] input size of CbCr data */
+
+ /* C210 UMP feature */
+ unsigned int y_cookie; /* [OUT] cookie for Y address */
+ unsigned int c_cookie; /* [OUT] cookie for CbCr address, If it is 0, Y and CbCr is in continous memory */
+} SSBSIP_MFC_ENC_INPUT_INFO;
+
+typedef struct {
+ unsigned int dataSize; /* [OUT] encoded data size(without header) */
+ unsigned int headerSize; /* [OUT] encoded header size */
+ unsigned int frameType; /* [OUT] frame type of encoded stream */
+ void *StrmPhyAddr; /* [OUT] physical address of Y */
+ void *StrmVirAddr; /* [OUT] virtual address of Y */
+ void *encodedYPhyAddr; /* [OUT] physical address of Y which is flushed */
+ void *encodedCPhyAddr; /* [OUT] physical address of C which is flushed */
+
+ /* C210 UMP feature */
+ unsigned int strm_cookie; /* [OUT] cooke for stream buffer */
+ unsigned int y_encoded_cookie; /* [OUT] cookie for Y address */
+ unsigned int c_encoded_cookie; /* [OUT] cookie for CbCr address, If it is 0, Y and CbCr is in continous memory */
+} SSBSIP_MFC_ENC_OUTPUT_INFO;
+
+typedef struct {
+ /* common parameters */
+ SSBSIP_MFC_CODEC_TYPE codecType; /* [IN] codec type */
+ int SourceWidth; /* [IN] width of video to be encoded */
+ int SourceHeight; /* [IN] height of video to be encoded */
+ int IDRPeriod; /* [IN] GOP number(interval of I-frame) */
+ int SliceMode; /* [IN] Multi slice mode */
+ int RandomIntraMBRefresh; /* [IN] cyclic intra refresh */
+ int EnableFRMRateControl; /* [IN] frame based rate control enable */
+ int Bitrate; /* [IN] rate control parameter(bit rate) */
+ int FrameQp; /* [IN] The quantization parameter of the frame */
+ int FrameQp_P; /* [IN] The quantization parameter of the P frame */
+ int QSCodeMax; /* [IN] Maximum Quantization value */
+ int QSCodeMin; /* [IN] Minimum Quantization value */
+ int CBRPeriodRf; /* [IN] Reaction coefficient parameter for rate control */
+ int PadControlOn; /* [IN] Enable padding control */
+ int LumaPadVal; /* [IN] Luma pel value used to fill padding area */
+ int CbPadVal; /* [IN] CB pel value used to fill padding area */
+ int CrPadVal; /* [IN] CR pel value used to fill padding area */
+ int FrameMap; /* [IN] Encoding input mode(tile mode or linear mode) */
+#if SUPPORT_SLICE_ENCODING
+ int OutputMode; /* [IN] Output mode: Frame/Slice */
+#endif
+
+ /* H.264 specific parameters */
+ int ProfileIDC; /* [IN] profile */
+ int LevelIDC; /* [IN] level */
+ int FrameQp_B; /* [IN] The quantization parameter of the B frame */
+ int FrameRate; /* [IN] rate control parameter(frame rate) */
+ int SliceArgument; /* [IN] MB number or byte number */
+ int NumberBFrames; /* [IN] The number of consecutive B frame inserted */
+ int NumberReferenceFrames; /* [IN] The number of reference pictures used */
+ int NumberRefForPframes; /* [IN] The number of reference pictures used for encoding P pictures */
+ int LoopFilterDisable; /* [IN] disable the loop filter */
+ int LoopFilterAlphaC0Offset; /* [IN] Alpha & C0 offset for H.264 loop filter */
+ int LoopFilterBetaOffset; /* [IN] Beta offset for H.264 loop filter */
+ int SymbolMode; /* [IN] The mode of entropy coding(CABAC, CAVLC) */
+ int PictureInterlace; /* [IN] Enables the interlace mode */
+ int Transform8x8Mode; /* [IN] Allow 8x8 transform(This is allowed only for high profile) */
+ int EnableMBRateControl; /* [IN] Enable macroblock-level rate control */
+ int DarkDisable; /* [IN] Disable adaptive rate control on dark region */
+ int SmoothDisable; /* [IN] Disable adaptive rate control on smooth region */
+ int StaticDisable; /* [IN] Disable adaptive rate control on static region */
+ int ActivityDisable; /* [IN] Disable adaptive rate control on high activity region */
+} SSBSIP_MFC_ENC_H264_PARAM;
+
+typedef struct {
+ /* common parameters */
+ SSBSIP_MFC_CODEC_TYPE codecType; /* [IN] codec type */
+ int SourceWidth; /* [IN] width of video to be encoded */
+ int SourceHeight; /* [IN] height of video to be encoded */
+ int IDRPeriod; /* [IN] GOP number(interval of I-frame) */
+ int SliceMode; /* [IN] Multi slice mode */
+ int RandomIntraMBRefresh; /* [IN] cyclic intra refresh */
+ int EnableFRMRateControl; /* [IN] frame based rate control enable */
+ int Bitrate; /* [IN] rate control parameter(bit rate) */
+ int FrameQp; /* [IN] The quantization parameter of the frame */
+ int FrameQp_P; /* [IN] The quantization parameter of the P frame */
+ int QSCodeMax; /* [IN] Maximum Quantization value */
+ int QSCodeMin; /* [IN] Minimum Quantization value */
+ int CBRPeriodRf; /* [IN] Reaction coefficient parameter for rate control */
+ int PadControlOn; /* [IN] Enable padding control */
+ int LumaPadVal; /* [IN] Luma pel value used to fill padding area */
+ int CbPadVal; /* [IN] CB pel value used to fill padding area */
+ int CrPadVal; /* [IN] CR pel value used to fill padding area */
+ int FrameMap; /* [IN] Encoding input mode(tile mode or linear mode) */
+#if SUPPORT_SLICE_ENCODING
+ int OutputMode; /* [IN] Output mode: Frame/Slice */
+#endif
+
+ /* MPEG4 specific parameters */
+ int ProfileIDC; /* [IN] profile */
+ int LevelIDC; /* [IN] level */
+ int FrameQp_B; /* [IN] The quantization parameter of the B frame */
+ int TimeIncreamentRes; /* [IN] frame rate */
+ int VopTimeIncreament; /* [IN] frame rate */
+ int SliceArgument; /* [IN] MB number or byte number */
+ int NumberBFrames; /* [IN] The number of consecutive B frame inserted */
+ int DisableQpelME; /* [IN] disable quarter-pixel motion estimation */
+} SSBSIP_MFC_ENC_MPEG4_PARAM;
+
+typedef struct {
+ /* common parameters */
+ SSBSIP_MFC_CODEC_TYPE codecType; /* [IN] codec type */
+ int SourceWidth; /* [IN] width of video to be encoded */
+ int SourceHeight; /* [IN] height of video to be encoded */
+ int IDRPeriod; /* [IN] GOP number(interval of I-frame) */
+ int SliceMode; /* [IN] Multi slice mode */
+ int RandomIntraMBRefresh; /* [IN] cyclic intra refresh */
+ int EnableFRMRateControl; /* [IN] frame based rate control enable */
+ int Bitrate; /* [IN] rate control parameter(bit rate) */
+ int FrameQp; /* [IN] The quantization parameter of the frame */
+ int FrameQp_P; /* [IN] The quantization parameter of the P frame */
+ int QSCodeMax; /* [IN] Maximum Quantization value */
+ int QSCodeMin; /* [IN] Minimum Quantization value */
+ int CBRPeriodRf; /* [IN] Reaction coefficient parameter for rate control */
+ int PadControlOn; /* [IN] Enable padding control */
+ int LumaPadVal; /* [IN] Luma pel value used to fill padding area */
+ int CbPadVal; /* [IN] CB pel value used to fill padding area */
+ int CrPadVal; /* [IN] CR pel value used to fill padding area */
+ int FrameMap; /* [IN] Encoding input mode(tile mode or linear mode) */
+#if SUPPORT_SLICE_ENCODING
+ int OutputMode; /* [IN] Output mode: Frame/Slice */
+#endif
+
+ /* H.263 specific parameters */
+ int FrameRate; /* [IN] rate control parameter(frame rate) */
+} SSBSIP_MFC_ENC_H263_PARAM;
+
+typedef struct {
+ int width;
+ int height;
+ int buf_width;
+ int buf_height;
+} SSBSIP_MFC_IMG_RESOLUTION;
+
+typedef struct {
+ int crop_top_offset;
+ int crop_bottom_offset;
+ int crop_left_offset;
+ int crop_right_offset;
+} SSBSIP_MFC_CROP_INFORMATION;
+
+typedef struct {
+ int available;
+ unsigned int arrangement_id;
+ int arrangement_cancel_flag;
+ unsigned char arrangement_type;
+ int quincunx_sampling_flag;
+ unsigned char content_interpretation_type;
+ int spatial_flipping_flag;
+ int frame0_flipped_flag;
+ int field_views_flag;
+ int current_frame_is_frame0_flag;
+ unsigned char frame0_grid_pos_x;
+ unsigned char frame0_grid_pos_y;
+ unsigned char frame1_grid_pos_x;
+ unsigned char frame1_grid_pos_y;
+} SSBSIP_MFC_FRAME_PACKING;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*--------------------------------------------------------------------------------*/
+/* Format Conversion API */
+/*--------------------------------------------------------------------------------*/
+/* Format Conversion API */
+void Y_tile_to_linear_4x2(unsigned char *p_linear_addr, unsigned char *p_tiled_addr, unsigned int x_size, unsigned int y_size);
+void CbCr_tile_to_linear_4x2(unsigned char *p_linear_addr, unsigned char *p_tiled_addr, unsigned int x_size, unsigned int y_size);
+
+/* C210 specific feature */
+void tile_to_linear_64x32_4x2_neon(unsigned char *p_linear_addr, unsigned char *p_tiled_addr, unsigned int x_size, unsigned int y_size);
+void tile_to_linear_64x32_4x2_uv_neon(unsigned char *p_linear_addr, unsigned char *p_tiled_addr, unsigned int x_size, unsigned int y_size);
+void Convert_NV12_to_I420_NEON(unsigned char *YUV420p, unsigned char *YVU420sp, unsigned int YSize, unsigned int UVPlaneSize);
+
+/*--------------------------------------------------------------------------------*/
+/* Decoding APIs */
+/*--------------------------------------------------------------------------------*/
+void *SsbSipMfcDecOpen(void);
+void *SsbSipMfcDecOpenExt(void *value);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcDecInit(void *openHandle, SSBSIP_MFC_CODEC_TYPE codec_type, int Frameleng);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcDecExe(void *openHandle, int lengthBufFill);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcDecClose(void *openHandle);
+void *SsbSipMfcDecGetInBuf(void *openHandle, void **phyInBuf, int inputBufferSize);
+
+
+#if (defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(USE_UMP))
+SSBSIP_MFC_ERROR_CODE SsbSipMfcDecSetInBuf(void *openHandle, unsigned int secure_id, int size);
+#else
+SSBSIP_MFC_ERROR_CODE SsbSipMfcDecSetInBuf(void *openHandle, void *phyInBuf, void *virInBuf, int size);
+#endif
+
+SSBSIP_MFC_DEC_OUTBUF_STATUS SsbSipMfcDecGetOutBuf(void *openHandle, SSBSIP_MFC_DEC_OUTPUT_INFO *output_info);
+
+SSBSIP_MFC_ERROR_CODE SsbSipMfcDecSetConfig(void *openHandle, SSBSIP_MFC_DEC_CONF conf_type, void *value);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcDecGetConfig(void *openHandle, SSBSIP_MFC_DEC_CONF conf_type, void *value);
+
+/*--------------------------------------------------------------------------------*/
+/* Encoding APIs */
+/*--------------------------------------------------------------------------------*/
+void *SsbSipMfcEncOpen(void);
+void *SsbSipMfcEncOpenExt(void *value);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncInit(void *openHandle, void *param);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncExe(void *openHandle);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncClose(void *openHandle);
+
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncGetInBuf(void *openHandle, SSBSIP_MFC_ENC_INPUT_INFO *input_info);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncSetInBuf(void *openHandle, SSBSIP_MFC_ENC_INPUT_INFO *input_info);
+
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncGetOutBuf(void *openHandle, SSBSIP_MFC_ENC_OUTPUT_INFO *output_info);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncSetOutBuf (void *openHandle, void *phyOutbuf, void *virOutbuf, int outputBufferSize);
+
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncSetConfig(void *openHandle, SSBSIP_MFC_ENC_CONF conf_type, void *value);
+SSBSIP_MFC_ERROR_CODE SsbSipMfcEncGetConfig(void *openHandle, SSBSIP_MFC_ENC_CONF conf_type, void *value);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SSBSIP_MFC_API_H_ */
diff --git a/drivers/media/video/samsung/mfc5x/mfc.h b/drivers/media/video/samsung/mfc5x/mfc.h
new file mode 100644
index 0000000..de1849c
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc.h
@@ -0,0 +1,101 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Global header for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_H_
+#define __MFC_H_ __FILE__
+
+#define MAX_HOR_SIZE 1920
+#define MAX_VER_SIZE 1088
+#define MAX_HOR_RES 1920
+#define MAX_VER_RES 1080
+
+#define MAX_MEM_OFFSET 0x10000000
+
+#ifdef CONFIG_VIDEO_MFC_MAX_INSTANCE
+#define MFC_MAX_INSTANCE_NUM (CONFIG_VIDEO_MFC_MAX_INSTANCE)
+#else
+#define MFC_MAX_INSTANCE_NUM (1)
+#endif
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+
+#define MFC_MAX_MEM_CHUNK_NUM (2)
+
+#define MFC_MAX_MEM_PORT_NUM (1)
+
+#define MFC_MEMSIZE_PORT_A (CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC_SECURE << 10)
+#define MFC_MEMSIZE_PORT_B (CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC_NORMAL << 10)
+
+#define MFC_MEMSIZE_DRM 0x20000
+#define MFC_SHM_OFS_DRM 0x400
+
+#else
+
+#ifdef CONFIG_VIDEO_MFC_MEM_PORT_COUNT
+#define MFC_MAX_MEM_PORT_NUM (CONFIG_VIDEO_MFC_MEM_PORT_COUNT)
+#else
+#define MFC_MAX_MEM_PORT_NUM (2)
+#endif
+
+#if (CONFIG_VIDEO_MFC_MEM_PORT_COUNT == 1)
+
+#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC
+#define MFC_MEMSIZE_PORT_A (CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC << 10)
+#define MFC_MEMSIZE_PORT_B 0
+#else
+#define MFC_MEMSIZE_PORT_A 0x4000000
+#define MFC_MEMSIZE_PORT_B 0
+#endif
+
+#else
+
+#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC0
+#define MFC_MEMSIZE_PORT_A (CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC0 << 10)
+#else
+#define MFC_MEMSIZE_PORT_A 0x2000000
+#endif
+
+#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC1
+#define MFC_MEMSIZE_PORT_B (CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC1 << 10)
+#else
+#define MFC_MEMSIZE_PORT_B 0x2000000
+#endif
+
+#endif
+
+#endif
+
+#if defined(CONFIG_S5P_SYSMMU_MFC_L) && defined(CONFIG_S5P_SYSMMU_MFC_R)
+#define SYSMMU_MFC_ON
+#endif
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP) && !defined(SYSMMU_MFC_ON)
+#error In order to use UMP over VCM, you must configure System MMU for MFC_L and MFC_R!
+#endif
+
+#if defined(CONFIG_S5P_VMEM) && !defined(SYSMMU_MFC_ON)
+#error In order to use S5PVEM, you must configure System MMU for MFC_L and MFC_R!
+#endif
+
+/* if possible, the free virtual addr. for MFC be aligned with 128KB */
+#if defined(CONFIG_S5P_VMEM)
+#if defined(CONFIG_VMSPLIT_3G)
+#define MFC_FREEBASE 0xF0000000
+#elif defined(CONFIG_VMSPLIT_2G)
+#define MFC_FREEBASE 0xE0000000
+#else
+#error Not support current memory split configuration
+#endif
+#endif
+
+#endif /* __MFC_H_ */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_buf.c b/drivers/media/video/samsung/mfc5x/mfc_buf.c
new file mode 100644
index 0000000..e0e243d
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_buf.c
@@ -0,0 +1,1037 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_buf.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Buffer manager for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+
+#include "mfc.h"
+#include "mfc_mem.h"
+#include "mfc_buf.h"
+#include "mfc_log.h"
+#include "mfc_errno.h"
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+#include <plat/s5p-vcm.h>
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_interface_vcm.h"
+#endif
+
+#define PRINT_BUF
+#undef DEBUG_ALLOC_FREE
+
+static struct list_head mfc_alloc_head[MFC_MAX_MEM_PORT_NUM];
+/* The free node list sorted by real address */
+static struct list_head mfc_free_head[MFC_MAX_MEM_PORT_NUM];
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+static enum MFC_BUF_ALLOC_SCHEME buf_alloc_scheme = MBS_FIRST_FIT;
+#else
+static enum MFC_BUF_ALLOC_SCHEME buf_alloc_scheme = MBS_BEST_FIT;
+#endif
+
+/* FIXME: test locking, add locking mechanisim */
+/*
+static spinlock_t lock;
+*/
+
+
+void mfc_print_buf(void)
+{
+#ifdef PRINT_BUF
+ struct list_head *pos;
+ struct mfc_alloc_buffer *alloc = NULL;
+ struct mfc_free_buffer *free = NULL;
+ int port, i;
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ mfc_dbg("---- port %d buffer list ----", port);
+
+ i = 0;
+ list_for_each(pos, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+ mfc_dbg("[A #%04d] addr: 0x%08x, size: %d",
+ i, (unsigned int)alloc->addr, alloc->size);
+ mfc_dbg("\t real: 0x%08lx", alloc->real);
+ mfc_dbg("\t type: 0x%08x, owner: %d",
+ alloc->type, alloc->owner);
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ mfc_dbg("\t* vcm sysmmu");
+ if (alloc->vcm_s) {
+ mfc_dbg("\t start: 0x%08x, res_size : 0x%08x\n",
+ (unsigned int)alloc->vcm_s->res.start,
+ (unsigned int)alloc->vcm_s->res.res_size);
+ mfc_dbg("\t bound_size: 0x%08x\n",
+ (unsigned int)alloc->vcm_s->res.bound_size);
+ }
+
+ mfc_dbg("\t* vcm kernel");
+ if (alloc->vcm_k) {
+ mfc_dbg("\t start: 0x%08x, res_size : 0x%08x\n",
+ (unsigned int)alloc->vcm_k->start,
+ (unsigned int)alloc->vcm_k->res_size);
+ mfc_dbg("\t bound_size: 0x%08x\n",
+ (unsigned int)alloc->vcm_k->bound_size);
+ }
+
+ mfc_dbg("\t* ump");
+ if (alloc->ump_handle) {
+ mfc_dbg("\t secure id: 0x%08x",
+ mfc_ump_get_id(alloc->ump_handle));
+ }
+#elif defined(CONFIG_S5P_VMEM)
+ mfc_dbg("\t vmem cookie: 0x%08x addr: 0x%08lx, size: %d",
+ alloc->vmem_cookie, alloc->vmem_addr,
+ alloc->vmem_size);
+#else
+ mfc_dbg("\t offset: 0x%08x", alloc->ofs);
+#endif
+ i++;
+ }
+
+ i = 0;
+ list_for_each(pos, &mfc_free_head[port]) {
+ free = list_entry(pos, struct mfc_free_buffer, list);
+ mfc_dbg("[F #%04d] addr: 0x%08lx, size: %d",
+ i, free->real, free->size);
+ i++;
+ }
+ }
+#endif
+}
+
+static int mfc_put_free_buf(unsigned long addr, unsigned int size, int port)
+{
+ struct list_head *pos, *nxt;
+ struct mfc_free_buffer *free;
+ struct mfc_free_buffer *next = NULL;
+ struct mfc_free_buffer *prev;
+ /* 0x00: not merged, 0x01: prev merged, 0x02: next merged */
+ int merged = 0x00;
+
+ if (!size)
+ return -EINVAL;
+
+ mfc_dbg("addr: 0x%08lx, size: %d, port: %d\n", addr, size, port);
+
+ list_for_each_safe(pos, nxt, &mfc_free_head[port]) {
+ next = list_entry(pos, struct mfc_free_buffer, list);
+
+ /*
+ * When the allocated address must be align without VMEM,
+ * the free buffer can be overlap
+ * previous free buffer temporaily
+ * Target buffer will be shrink after this operation
+ */
+ if (addr <= next->real) {
+ prev = list_entry(pos->prev, struct mfc_free_buffer, list);
+
+ mfc_dbg("prev->addr: 0x%08lx, size: %d", prev->real, prev->size);
+ /* merge previous free buffer */
+ if (prev && ((prev->real + prev->size) == addr)) {
+ addr = prev->real;
+ size += prev->size;
+
+ prev->size = size;
+
+ merged |= 0x01;
+ mfc_dbg("auto merge free buffer[p]: addr: 0x%08lx, size: %d",
+ prev->real, prev->size);
+ }
+
+ mfc_dbg("next->addr: 0x%08lx, size: %d", next->real, next->size);
+ /* merge next free buffer */
+ if ((addr + size) == next->real) {
+ next->real = addr;
+ next->size += size;
+
+ if (merged)
+ prev->size = next->size;
+
+ merged |= 0x02;
+ mfc_dbg("auto merge free buffer[n]: addr: 0x%08lx, size: %d",
+ next->real, next->size);
+ }
+
+ break;
+ }
+ }
+
+ if (!merged) {
+ free = (struct mfc_free_buffer *)
+ kzalloc(sizeof(struct mfc_free_buffer), GFP_KERNEL);
+
+ if (unlikely(free == NULL))
+ return -ENOMEM;
+
+ free->real = addr;
+ free->size = size;
+
+ list_add_tail(&free->list, pos);
+ }
+
+ /* bi-directional merged */
+ else if ((merged & 0x03) == 0x03) {
+ list_del(&next->list);
+ kfree(next);
+ }
+
+ return 0;
+}
+
+static unsigned long mfc_get_free_buf(unsigned int size, int align, int port)
+{
+ struct list_head *pos, *nxt;
+ struct mfc_free_buffer *free;
+ struct mfc_free_buffer *match = NULL;
+ int align_size = 0;
+ unsigned long addr = 0;
+
+ mfc_dbg("size: %d, align: %d, port: %d\n",
+ size, align, port);
+
+ if (list_empty(&mfc_free_head[port])) {
+ mfc_err("no free node in mfc buffer\n");
+
+ return 0;
+ }
+
+ /* find best fit area */
+ list_for_each_safe(pos, nxt, &mfc_free_head[port]) {
+ free = list_entry(pos, struct mfc_free_buffer, list);
+
+#if (defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(CONFIG_S5P_VMEM))
+ /*
+ * Align the start address.
+ * We assume the start address of free buffer aligned with 4KB
+ */
+ align_size = ALIGN(align_size + size, PAGE_SIZE) - size;
+
+ if (align > PAGE_SIZE) {
+ align_size = ALIGN(free->real, align) - free->real;
+ align_size += ALIGN(align_size + size, PAGE_SIZE) - size;
+ } else {
+ align_size = ALIGN(align_size + size, PAGE_SIZE) - size;
+ }
+#else
+ align_size = ALIGN(free->real, align) - free->real;
+#endif
+ if (free->size >= (size + align_size)) {
+ if (buf_alloc_scheme == MBS_BEST_FIT) {
+ if (match != NULL) {
+ if (free->size < match->size)
+ match = free;
+ } else {
+ match = free;
+ }
+ } else if (buf_alloc_scheme == MBS_FIRST_FIT) {
+ match = free;
+ break;
+ }
+ }
+ }
+
+ if (match != NULL) {
+ addr = match->real;
+ align_size = ALIGN(addr, align) - addr;
+
+#if !(defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(CONFIG_S5P_VMEM))
+ if (align_size > 0) {
+ /*
+ * When the allocated address must be align without VMEM,
+ * the free buffer can be overlap
+ * previous free buffer temporaily
+ */
+ if (mfc_put_free_buf(match->real, align_size, port) < 0)
+ return 0;
+ }
+#endif
+ /* change allocated buffer address & size */
+ match->real += (size + align_size);
+ match->size -= (size + align_size);
+
+ if (match->size == 0) {
+ list_del(&match->list);
+ kfree(match);
+ }
+ } else {
+ mfc_err("no suitable free node in mfc buffer\n");
+
+ return 0;
+ }
+
+ return addr;
+}
+
+int mfc_init_buf(void)
+{
+#ifndef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ int port;
+#endif
+ int ret = 0;
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ INIT_LIST_HEAD(&mfc_alloc_head[0]);
+ INIT_LIST_HEAD(&mfc_free_head[0]);
+
+ if (mfc_put_free_buf(mfc_mem_data_base(0),
+ mfc_mem_data_size(0), 0) < 0)
+ mfc_err("failed to add free buffer: [0x%08lx: %d]\n",
+ mfc_mem_data_base(0), mfc_mem_data_size(0));
+
+ if (mfc_put_free_buf(mfc_mem_data_base(1),
+ mfc_mem_data_size(1), 0) < 0)
+ mfc_dbg("failed to add free buffer: [0x%08lx: %d]\n",
+ mfc_mem_data_base(1), mfc_mem_data_size(1));
+
+ if (list_empty(&mfc_free_head[0]))
+ ret = -1;
+
+#else
+ for (port = 0; port < mfc_mem_count(); port++) {
+ INIT_LIST_HEAD(&mfc_alloc_head[port]);
+ INIT_LIST_HEAD(&mfc_free_head[port]);
+
+ if (mfc_put_free_buf(mfc_mem_data_base(port),
+ mfc_mem_data_size(port), port) < 0)
+ mfc_err("failed to add free buffer: [0x%08lx: %d]\n",
+ mfc_mem_data_base(port),
+ mfc_mem_data_size(port));
+ }
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ if (list_empty(&mfc_free_head[port]))
+ ret = -1;
+ }
+#endif
+
+ /*
+ spin_lock_init(&lock);
+ */
+
+ mfc_print_buf();
+
+ return ret;
+}
+
+void mfc_final_buf(void)
+{
+ struct list_head *pos, *nxt;
+ struct mfc_alloc_buffer *alloc;
+ struct mfc_free_buffer *free;
+ int port;
+ /*
+ unsigned long flags;
+ */
+
+ /*
+ spin_lock_irqsave(&lock, flags);
+ */
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ if (alloc->ump_handle)
+ mfc_ump_unmap(alloc->ump_handle);
+
+ if (alloc->vcm_k)
+ mfc_vcm_unmap(alloc->vcm_k);
+
+ if (alloc->vcm_s)
+ mfc_vcm_unbind(alloc->vcm_s,
+ alloc->type & MBT_OTHER);
+
+ if (mfc_put_free_buf(alloc->vcm_addr,
+ alloc->vcm_size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#elif defined(CONFIG_S5P_VMEM)
+ if (alloc->vmem_cookie)
+ s5p_vfree(alloc->vmem_cookie);
+
+ if (mfc_put_free_buf(alloc->vmem_addr,
+ alloc->vmem_size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#else
+ if (mfc_put_free_buf(alloc->real,
+ alloc->size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#endif
+ }
+ }
+
+ /*
+ spin_unlock_irqrestore(&lock, flags);
+ */
+
+ mfc_print_buf();
+
+ /*
+ spin_lock_irqsave(&lock, flags);
+ */
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_free_head[port]) {
+ free = list_entry(pos, struct mfc_free_buffer, list);
+ list_del(&free->list);
+ kfree(free);
+ }
+ }
+
+ /*
+ spin_unlock_irqrestore(&lock, flags);
+ */
+
+ mfc_print_buf();
+}
+
+void mfc_set_buf_alloc_scheme(enum MFC_BUF_ALLOC_SCHEME scheme)
+{
+ buf_alloc_scheme = scheme;
+}
+
+void mfc_merge_buf(void)
+{
+ struct list_head *pos, *nxt;
+ struct mfc_free_buffer *n1;
+ struct mfc_free_buffer *n2;
+ int port;
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_free_head[port]) {
+ n1 = list_entry(pos, struct mfc_free_buffer, list);
+ n2 = list_entry(nxt, struct mfc_free_buffer, list);
+
+ mfc_dbg("merge pre: n1: 0x%08lx, n2: 0x%08lx",
+ n1->real, n2->real);
+
+ if (!list_is_last(pos, &mfc_free_head[port])) {
+ if ((n1->real + n1->size) == n2->real) {
+ n2->real = n1->real;
+ n2->size += n1->size;
+ list_del(&n1->list);
+ kfree(n1);
+ }
+ }
+
+ mfc_dbg("merge aft: n1: 0x%08lx, n2: 0x%08lx, last: %d",
+ n1->real, n2->real,
+ list_is_last(pos, &mfc_free_head[port]));
+ }
+ }
+
+#ifdef DEBUG_ALLOC_FREE
+ mfc_print_buf();
+#endif
+}
+
+/* FIXME: port auto select, return values */
+struct mfc_alloc_buffer *_mfc_alloc_buf(
+ struct mfc_inst_ctx *ctx, unsigned int size, int align, int flag)
+{
+ unsigned long addr;
+ struct mfc_alloc_buffer *alloc;
+ int port = flag & 0xFFFF;
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ int align_size = 0;
+ struct ump_vcm ump_vcm;
+#elif defined(CONFIG_S5P_VMEM)
+ int align_size = 0;
+#endif
+ /*
+ unsigned long flags;
+ */
+
+ if (!size)
+ return NULL;
+
+ alloc = (struct mfc_alloc_buffer *)
+ kzalloc(sizeof(struct mfc_alloc_buffer), GFP_KERNEL);
+
+ if (unlikely(alloc == NULL))
+ return NULL;
+
+ /* FIXME: right position? */
+ if (port > (mfc_mem_count() - 1))
+ port = mfc_mem_count() - 1;
+
+ /*
+ spin_lock_irqsave(&lock, flags);
+ */
+
+ addr = mfc_get_free_buf(size, align, port);
+
+ mfc_dbg("mfc_get_free_buf: 0x%08lx\n", addr);
+
+ if (!addr) {
+ mfc_dbg("cannot get suitable free buffer\n");
+ /* FIXME: is it need?
+ mfc_put_free_buf(addr, size, port);
+ */
+ kfree(alloc);
+ /*
+ spin_unlock_irqrestore(&lock, flags);
+ */
+
+ return NULL;
+ }
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ if (align > PAGE_SIZE) {
+ align_size = ALIGN(addr, align) - addr;
+ align_size += ALIGN(align_size + size, PAGE_SIZE) - size;
+ } else {
+ align_size = ALIGN(align_size + size, PAGE_SIZE) - size;
+ }
+
+ alloc->vcm_s = mfc_vcm_bind(addr, size + align_size);
+ if (IS_ERR(alloc->vcm_s)) {
+ mfc_put_free_buf(addr, size, port);
+ kfree(alloc);
+
+ return NULL;
+ /*
+ return PTR_ERR(alloc->vcm_s);
+ */
+ }
+
+ if (flag & MBT_KERNEL) {
+ alloc->vcm_k = mfc_vcm_map(alloc->vcm_s->res.phys);
+ if (IS_ERR(alloc->vcm_k)) {
+ mfc_vcm_unbind(alloc->vcm_s,
+ alloc->type & MBT_OTHER);
+ mfc_put_free_buf(addr, size, port);
+ kfree(alloc);
+
+ return NULL;
+ /*
+ return PTR_ERR(alloc->vcm_k);
+ */
+ }
+ }
+
+ if (flag & MBT_USER) {
+ ump_vcm.vcm = alloc->vcm_s->res.vcm;
+ ump_vcm.vcm_res = &(alloc->vcm_s->res);
+ ump_vcm.dev_id = VCM_DEV_MFC;
+
+ alloc->ump_handle = mfc_ump_map(alloc->vcm_s->res.phys, (unsigned long)&ump_vcm);
+ if (IS_ERR(alloc->ump_handle)) {
+ mfc_vcm_unmap(alloc->vcm_k);
+ mfc_vcm_unbind(alloc->vcm_s,
+ alloc->type & MBT_OTHER);
+ mfc_put_free_buf(addr, size, port);
+ kfree(alloc);
+
+ return NULL;
+ /*
+ return PTR_ERR(alloc->vcm_k);
+ */
+ }
+ }
+
+ alloc->vcm_addr = addr;
+ alloc->vcm_size = size + align_size;
+#elif defined(CONFIG_S5P_VMEM)
+ if (align > PAGE_SIZE) {
+ align_size = ALIGN(addr, align) - addr;
+ align_size += ALIGN(align_size + size, PAGE_SIZE) - size;
+ } else {
+ align_size = ALIGN(align_size + size, PAGE_SIZE) - size;
+ }
+
+ alloc->vmem_cookie = s5p_vmem_vmemmap(size + align_size,
+ addr, addr + (size + align_size));
+
+ if (!alloc->vmem_cookie) {
+ mfc_dbg("cannot map free buffer to memory\n");
+ mfc_put_free_buf(addr, size, port);
+ kfree(alloc);
+
+ return NULL;
+ }
+
+ alloc->vmem_addr = addr;
+ alloc->vmem_size = size + align_size;
+#endif
+ alloc->real = ALIGN(addr, align);
+ alloc->size = size;
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ if (alloc->vcm_k)
+ alloc->addr = (unsigned char *)alloc->vcm_k->start;
+ else
+ alloc->addr = NULL;
+#elif defined(CONFIG_S5P_VMEM)
+ alloc->addr = (unsigned char *)(mfc_mem_addr(port) +
+ mfc_mem_base_ofs(alloc->real));
+#else
+ alloc->addr = (unsigned char *)(mfc_mem_addr(port) +
+ mfc_mem_base_ofs(alloc->real));
+ /*
+ alloc->user = (unsigned char *)(ctx->userbase +
+ mfc_mem_data_ofs(alloc->real, 1));
+ */
+ alloc->ofs = mfc_mem_data_ofs(alloc->real, 1);
+#endif
+ alloc->type = flag & 0xFFFF0000;
+ alloc->owner = ctx->id;
+
+ list_add(&alloc->list, &mfc_alloc_head[port]);
+
+ /*
+ spin_unlock_irqrestore(&lock, flags);
+ */
+
+#ifdef DEBUG_ALLOC_FREE
+ mfc_print_buf();
+#endif
+
+ return alloc;
+}
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+unsigned int mfc_vcm_bind_from_others(struct mfc_inst_ctx *ctx,
+ struct mfc_buf_alloc_arg *args, int flag)
+{
+ int ret;
+ unsigned long addr;
+ unsigned int size;
+ unsigned int secure_id = args->secure_id;
+ int port = flag & 0xFFFF;
+
+ struct vcm_res *vcm_res;
+ struct vcm_mmu_res *s_res;
+ struct mfc_alloc_buffer *alloc;
+
+ ump_dd_handle ump_mem;
+
+ /* FIXME: right position? */
+ if (port > (mfc_mem_count() - 1))
+ port = mfc_mem_count() - 1;
+
+ ump_mem = ump_dd_handle_create_from_secure_id(secure_id);
+ ump_dd_reference_add(ump_mem);
+
+ vcm_res = (struct vcm_res *)
+ ump_dd_meminfo_get(secure_id, (void*)VCM_DEV_MFC);
+ if (!vcm_res) {
+ mfc_dbg("%s: Failed to get vcm_res\n", __func__);
+ goto err_ret;
+ }
+
+ size = vcm_res->bound_size;
+
+ alloc = (struct mfc_alloc_buffer *)
+ kzalloc(sizeof(struct mfc_alloc_buffer), GFP_KERNEL);
+ if(!alloc) {
+ mfc_dbg("%s: Failed to get mfc_alloc_buffer\n", __func__);
+ goto err_ret;
+ }
+
+ addr = mfc_get_free_buf(size, ALIGN_2KB, port);
+ if (!addr) {
+ mfc_dbg("cannot get suitable free buffer\n");
+ goto err_ret_alloc;
+ }
+ mfc_dbg("mfc_get_free_buf: 0x%08lx\n", addr);
+
+ s_res = kzalloc(sizeof(struct vcm_mmu_res), GFP_KERNEL);
+ if (!s_res) {
+ mfc_dbg("%s: Failed to get vcm_mmu_res\n", __func__);
+ goto err_ret_alloc;
+ }
+
+ s_res->res.start = addr;
+ s_res->res.res_size = size;
+ s_res->res.vcm = ctx->dev->vcm_info.sysmmu_vcm;
+ INIT_LIST_HEAD(&s_res->bound);
+
+ ret = vcm_bind(&s_res->res, vcm_res->phys);
+ if (ret < 0) {
+ mfc_dbg("%s: Failed to vcm_bind\n", __func__);
+ goto err_ret_s_res;
+ }
+
+ alloc->vcm_s = s_res;
+ alloc->vcm_addr = addr;
+ alloc->ump_handle = ump_mem;
+ alloc->vcm_size = size;
+ alloc->real = addr;
+ alloc->size = size;
+ alloc->type = flag & 0xFFFF0000;
+ alloc->owner = ctx->id;
+
+ list_add(&alloc->list, &mfc_alloc_head[port]);
+
+ mfc_print_buf();
+
+ return 0;
+
+err_ret_s_res:
+ kfree(s_res);
+err_ret_alloc:
+ kfree(alloc);
+err_ret:
+ return -1;
+}
+#endif
+
+int
+mfc_alloc_buf(struct mfc_inst_ctx *ctx, struct mfc_buf_alloc_arg *args, int flag)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, args->size, args->align, flag);
+
+ if (unlikely(alloc == NULL))
+ return MFC_MEM_ALLOC_FAIL;
+ /*
+ args->phys = (unsigned int)alloc->real;
+ */
+ args->addr = (unsigned int)alloc->addr;
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ if (alloc->ump_handle)
+ args->secure_id = mfc_ump_get_id(alloc->ump_handle);
+#elif defined(CONFIG_S5P_VMEM)
+ args->cookie = (unsigned int)alloc->vmem_cookie;
+#else
+ args->offset = alloc->ofs;
+#endif
+ return MFC_OK;
+}
+
+int _mfc_free_buf(unsigned long real)
+{
+ struct list_head *pos, *nxt;
+ struct mfc_alloc_buffer *alloc;
+ int port;
+ int found = 0;
+ /*
+ unsigned long flags;
+ */
+
+ mfc_dbg("addr: 0x%08lx\n", real);
+
+ /*
+ spin_lock_irqsave(&lock, flags);
+ */
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+
+ if (alloc->real == real) {
+ found = 1;
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ if (alloc->ump_handle)
+ mfc_ump_unmap(alloc->ump_handle);
+
+ if (alloc->vcm_k)
+ mfc_vcm_unmap(alloc->vcm_k);
+
+ if (alloc->vcm_s)
+ mfc_vcm_unbind(alloc->vcm_s,
+ alloc->type & MBT_OTHER);
+
+ if (mfc_put_free_buf(alloc->vcm_addr,
+ alloc->vcm_size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#elif defined(CONFIG_S5P_VMEM)
+ if (alloc->vmem_cookie)
+ s5p_vfree(alloc->vmem_cookie);
+
+ if (mfc_put_free_buf(alloc->vmem_addr,
+ alloc->vmem_size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#else
+ if (mfc_put_free_buf(alloc->real,
+ alloc->size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#endif
+ break;
+ }
+ }
+
+ if (found)
+ break;
+ }
+
+ /*
+ spin_unlock_irqrestore(&lock, flags);
+ */
+
+#ifdef DEBUG_ALLOC_FREE
+ mfc_print_buf();
+#endif
+
+ if (found)
+ return 0;
+
+ return -1;
+}
+
+int mfc_free_buf(struct mfc_inst_ctx *ctx, unsigned int key)
+{
+ unsigned long real;
+
+ real = mfc_get_buf_real(ctx->id, key);
+ if (unlikely(real == 0))
+ return MFC_MEM_INVALID_ADDR_FAIL;
+
+ if (_mfc_free_buf(real) < 0)
+ return MFC_MEM_INVALID_ADDR_FAIL;
+
+ return MFC_OK;
+}
+
+void mfc_free_buf_type(int owner, int type)
+{
+ int port;
+ struct list_head *pos, *nxt;
+ struct mfc_alloc_buffer *alloc;
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+
+ if ((alloc->owner == owner) && (alloc->type == type)) {
+ if (mfc_put_free_buf(alloc->real,
+ alloc->size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+ }
+ }
+ }
+}
+
+/* FIXME: add MFC Buffer Type */
+void mfc_free_buf_inst(int owner)
+{
+ struct list_head *pos, *nxt;
+ int port;
+ struct mfc_alloc_buffer *alloc;
+ /*
+ unsigned long flags;
+ */
+
+ mfc_dbg("owner: %d\n", owner);
+
+ /*
+ spin_lock_irqsave(&lock, flags);
+ */
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+
+ if (alloc->owner == owner) {
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ if (alloc->ump_handle)
+ mfc_ump_unmap(alloc->ump_handle);
+
+ if (alloc->vcm_k)
+ mfc_vcm_unmap(alloc->vcm_k);
+
+ if (alloc->vcm_s)
+ mfc_vcm_unbind(alloc->vcm_s,
+ alloc->type & MBT_OTHER);
+
+ if (mfc_put_free_buf(alloc->vcm_addr,
+ alloc->vcm_size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#elif defined(CONFIG_S5P_VMEM)
+ if (alloc->vmem_cookie)
+ s5p_vfree(alloc->vmem_cookie);
+
+ if (mfc_put_free_buf(alloc->vmem_addr,
+ alloc->vmem_size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#else
+ if (mfc_put_free_buf(alloc->real,
+ alloc->size, port) < 0) {
+
+ mfc_err("failed to add free buffer\n");
+ } else {
+ list_del(&alloc->list);
+ kfree(alloc);
+ }
+#endif
+ }
+ }
+ }
+
+ /*
+ spin_unlock_irqrestore(&lock, flags);
+ */
+
+#ifdef DEBUG_ALLOC_FREE
+ mfc_print_buf();
+#endif
+}
+
+unsigned long mfc_get_buf_real(int owner, unsigned int key)
+{
+ struct list_head *pos, *nxt;
+ int port;
+ struct mfc_alloc_buffer *alloc;
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ mfc_dbg("owner: %d, secure id: 0x%08x\n", owner, key);
+#elif defined(CONFIG_S5P_VMEM)
+ mfc_dbg("owner: %d, cookie: 0x%08x\n", owner, key);
+#else
+ mfc_dbg("owner: %d, offset: 0x%08x\n", owner, key);
+#endif
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+
+ if (alloc->owner == owner) {
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ if (alloc->ump_handle) {
+ if (mfc_ump_get_id(alloc->ump_handle) == key)
+ return alloc->real;
+ }
+#elif defined(CONFIG_S5P_VMEM)
+ if (alloc->vmem_cookie == key)
+ return alloc->real;
+#else
+ if (alloc->ofs == key)
+ return alloc->real;
+#endif
+ }
+ }
+ }
+
+ return 0;
+}
+
+#if 0
+unsigned char *mfc_get_buf_addr(int owner, unsigned char *user)
+{
+ struct list_head *pos, *nxt;
+ int port;
+ struct mfc_alloc_buffer *alloc;
+
+ mfc_dbg("owner: %d, user: 0x%08x\n", owner, (unsigned int)user);
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+
+ if ((alloc->owner == owner)
+ && (alloc->user == user)){
+
+ return alloc->addr;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+unsigned char *_mfc_get_buf_addr(int owner, unsigned char *user)
+{
+ struct list_head *pos, *nxt;
+ int port;
+ struct mfc_alloc_buffer *alloc;
+
+ mfc_dbg("owner: %d, user: 0x%08x\n", owner, (unsigned int)user);
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+
+ if ((alloc->owner == owner)
+ && ((alloc->user <= user) || ((alloc->user + alloc->size) > user))){
+
+ return alloc->addr;
+ }
+ }
+ }
+
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+void *mfc_get_buf_ump_handle(unsigned long real)
+{
+ struct list_head *pos, *nxt;
+ int port;
+ struct mfc_alloc_buffer *alloc;
+
+ mfc_dbg("real: 0x%08lx\n", real);
+
+ for (port = 0; port < mfc_mem_count(); port++) {
+ list_for_each_safe(pos, nxt, &mfc_alloc_head[port]) {
+ alloc = list_entry(pos, struct mfc_alloc_buffer, list);
+
+ if (alloc->real == real)
+ return alloc->ump_handle;
+ }
+ }
+
+ return NULL;
+}
+#endif
+
diff --git a/drivers/media/video/samsung/mfc5x/mfc_buf.h b/drivers/media/video/samsung/mfc5x/mfc_buf.h
new file mode 100644
index 0000000..7fafb94
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_buf.h
@@ -0,0 +1,195 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_buf.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Buffer manager for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_BUF_H_
+#define __MFC_BUF_H_ __FILE__
+
+#include <linux/list.h>
+
+#include "mfc.h"
+#include "mfc_inst.h"
+#include "mfc_interface.h"
+
+/* FIXME */
+#define ALIGN_4B (1 << 2)
+#define ALIGN_2KB (1 << 11)
+#define ALIGN_4KB (1 << 12)
+#define ALIGN_8KB (1 << 13)
+#define ALIGN_64KB (1 << 16)
+#define ALIGN_128KB (1 << 17)
+
+#define ALIGN_W 128 /* Tile, Horizontal, Luma & Chroma */
+#define ALIGN_H 32 /* Tile, Vertical, Luma & Chroma */
+#define ALIGN_W_L 16 /* Linear, Horizontal, Luma & Chroma */
+#define ALIGN_H_L_L 16 /* Linear, Vertical, Luma */
+#define ALIGN_H_L_C 8 /* Linear, Vertical, Chroma */
+
+/* System */ /* Size, Port, Align */
+#define MFC_FW_SYSTEM_SIZE (0x80000) /* 512KB, A, N(4KB for VMEM) */
+
+/* Instance */
+#define MFC_CTX_SIZE_L (0x96000) /* 600KB, N, 2KB, H.264 Decoding only */
+#define MFC_CTX_SIZE (0x2800) /* 10KB, N, 2KB */
+#define MFC_SHM_SIZE (0x400) /* 1KB, N, 4B */
+
+/* Decoding */
+#define MFC_CPB_SIZE (0x400000) /* Max.4MB, A, 2KB */
+#define MFC_DESC_SIZE (0x20000) /* Max.128KB, A, 2KB */
+
+#define MFC_DEC_NBMV_SIZE (0x4000) /* 16KB, A, 2KB */
+#define MFC_DEC_NBIP_SIZE (0x8000) /* 32KB, A, 2KB */
+#define MFC_DEC_NBDCAC_SIZE (0x4000) /* 16KB, A, 2KB */
+#define MFC_DEC_UPNBMV_SIZE (0x11000) /* 68KB, A, 2KB */
+#define MFC_DEC_SAMV_SIZE (0x40000) /* 256KB, A, 2KB */
+#define MFC_DEC_OTLINE_SIZE (0x8000) /* 32KB, A, 2KB */
+#define MFC_DEC_SYNPAR_SIZE (0x11000) /* 68KB, A, 2KB */
+#define MFC_DEC_BITPLANE_SIZE (0x800) /* 2KB, A, 2KB */
+
+/* Encoding */
+#define MFC_STRM_SIZE (0x300000) /* 3MB, A, 2KB (multi. 4KB) */
+
+/* FIXME: variable size */
+#define MFC_ENC_UPMV_SIZE (0x10000) /* Var, A, 2KB */
+#define MFC_ENC_COLFLG_SIZE (0x10000) /* Var, A, 2KB */
+#define MFC_ENC_INTRAMD_SIZE (0x10000) /* Var, A, 2KB */
+#define MFC_ENC_INTRAPRED_SIZE (0x4000) /* 16KB, A, 2KB */
+#define MFC_ENC_NBORINFO_SIZE (0x10000) /* Var, A, 2KB */
+#define MFC_ENC_ACDCCOEF_SIZE (0x10000) /* Var, A, 2KB */
+
+#define MFC_LUMA_ALIGN ALIGN_8KB
+#define MFC_CHROMA_ALIGN ALIGN_8KB
+#define MFC_MV_ALIGN ALIGN_8KB /* H.264 Decoding only */
+
+#define PORT_A 0
+#define PORT_B 1
+
+/* FIXME: MFC Buffer Type add as allocation parameter */
+/*
+#define MBT_ACCESS_MASK (0xFF << 24)
+#define MBT_SYSMMU (0x01 << 24)
+*/
+#define MBT_KERNEL (0x02 << 24)
+#define MBT_USER (0x04 << 24)
+#define MBT_OTHER (0x08 << 24)
+#if 0
+#define MBT_TYPE_MASK (0xFF << 16)
+#define MBT_CTX (MBT_SYSMMU | MBT_KERNEL | (0x01 << 16))/* S, K */
+#define MBT_DESC (MBT_SYSMMU | (0x02 << 16)) /* S */
+#define MBT_CODEC (MBT_SYSMMU | (0x04 << 16)) /* S */
+#define MBT_SHM (MBT_SYSMMU | MBT_KERNEL | (0x08 << 16))/* S, K */
+#define MBT_CPB (MBT_SYSMMU | MBT_USER | (0x10 << 16))/* D: S, [K], U E: */
+#define MBT_DPB (MBT_SYSMMU | MBT_USER | (0x20 << 16))/* D: S, [K], U E: */
+#endif
+#define MBT_CTX (MBT_KERNEL | (0x01 << 16)) /* S, K */
+#define MBT_DESC (0x02 << 16) /* S */
+#define MBT_CODEC (0x04 << 16) /* S */
+#define MBT_SHM (MBT_KERNEL | (0x08 << 16)) /* S, K */
+#if 0
+#define MBT_CPB (MBT_USER | (0x10 << 16)) /* D: S, [K], U E: */
+#define MBT_DPB (MBT_USER | (0x20 << 16)) /* D: S, [K], U E: */
+#endif
+#define MBT_CPB (MBT_KERNEL | MBT_USER | (0x10 << 16)) /* D: S, [K], U E: */
+#define MBT_DPB (MBT_KERNEL | MBT_USER | (0x20 << 16)) /* D: S, [K], U E: */
+
+enum MFC_BUF_ALLOC_SCHEME {
+ MBS_BEST_FIT = 0,
+ MBS_FIRST_FIT = 1,
+};
+
+/* Remove before Release */
+#if 0
+#define CPB_BUF_SIZE (0x400000) /* 3MB : 3x1024x1024 for decoder */
+#define DESC_BUF_SIZE (0x20000) /* 128KB : 128x1024 */
+#define SHARED_BUF_SIZE (0x10000) /* 64KB : 64x1024 */
+#define PRED_BUF_SIZE (0x10000) /* 64KB : 64x1024 */
+#define DEC_CODEC_BUF_SIZE (0x80000) /* 512KB : 512x1024 size per instance */
+#define ENC_CODEC_BUF_SIZE (0x50000) /* 320KB : 512x1024 size per instance */
+
+#define STREAM_BUF_SIZE (0x200000) /* 2MB : 2x1024x1024 for encoder */
+#define MV_BUF_SIZE (0x10000) /* 64KB : 64x1024 for encoder */
+
+#define MFC_CONTEXT_SIZE_L (640 * 1024) /* 600KB -> 640KB for alignment */
+#define VC1DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
+#define MPEG2DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
+#define H263DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
+#define MPEG4DEC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
+#define H264ENC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
+#define MPEG4ENC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
+#define H263ENC_CONTEXT_SIZE (64 * 1024) /* 10KB -> 64KB for alignment */
+
+#define DESC_BUF_SIZE (0x20000) /* 128KB : 128x1024 */
+#define SHARED_MEM_SIZE (0x1000) /* 4KB : 4x1024 size */
+
+#define CPB_BUF_SIZE (0x400000) /* 4MB : 4x1024x1024 for decoder */
+#define STREAM_BUF_SIZE (0x200000) /* 2MB : 2x1024x1024 for encoder */
+#define ENC_UP_INTRA_PRED_SIZE (0x10000) /* 64KB : 64x1024 for encoder */
+#endif
+
+struct mfc_alloc_buffer {
+ struct list_head list;
+ unsigned long real; /* phys. or virt. addr for MFC */
+ unsigned int size; /* allocation size */
+ unsigned char *addr; /* kernel virtual address space */
+ unsigned int type; /* buffer type */
+ int owner; /* instance context id */
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ struct vcm_mmu_res *vcm_s;
+ struct vcm_res *vcm_k;
+ unsigned long vcm_addr;
+ size_t vcm_size;
+ void *ump_handle;
+#elif defined(CONFIG_S5P_VMEM)
+ unsigned int vmem_cookie;
+ unsigned long vmem_addr;
+ size_t vmem_size;
+#else
+ unsigned int ofs; /*
+ * offset phys. or virt. contiguous memory
+ * phys.[bootmem, memblock] virt.[vmalloc]
+ * when user use mmap,
+ * user can access whole of memory by offset.
+ */
+#endif
+};
+
+struct mfc_free_buffer {
+ struct list_head list;
+ unsigned long real; /* phys. or virt. addr for MFC */
+ unsigned int size;
+};
+
+void mfc_print_buf(void);
+
+int mfc_init_buf(void);
+void mfc_final_buf(void);
+void mfc_set_buf_alloc_scheme(enum MFC_BUF_ALLOC_SCHEME scheme);
+void mfc_merge_buf(void);
+struct mfc_alloc_buffer *_mfc_alloc_buf(
+ struct mfc_inst_ctx *ctx, unsigned int size, int align, int flag);
+int mfc_alloc_buf(
+ struct mfc_inst_ctx *ctx, struct mfc_buf_alloc_arg* args, int flag);
+int _mfc_free_buf(unsigned long real);
+int mfc_free_buf(struct mfc_inst_ctx *ctx, unsigned int key);
+void mfc_free_buf_type(int owner, int type);
+void mfc_free_buf_inst(int owner);
+unsigned long mfc_get_buf_real(int owner, unsigned int key);
+/*
+unsigned char *mfc_get_buf_addr(int owner, unsigned char *user);
+unsigned char *_mfc_get_buf_addr(int owner, unsigned char *user);
+*/
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+unsigned int mfc_vcm_bind_from_others(struct mfc_inst_ctx *ctx,
+ struct mfc_buf_alloc_arg *args, int flag);
+void *mfc_get_buf_ump_handle(unsigned long real);
+#endif
+#endif /* __MFC_BUF_H_ */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_cmd.c b/drivers/media/video/samsung/mfc5x/mfc_cmd.c
new file mode 100644
index 0000000..38b4757
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_cmd.c
@@ -0,0 +1,504 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_cmd.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Command interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+
+#include <mach/regs-mfc.h>
+
+#include "mfc_cmd.h"
+#include "mfc_reg.h"
+#include "mfc_log.h"
+#include "mfc_dec.h"
+#include "mfc_enc.h"
+#include "mfc_mem.h"
+#include "mfc_buf.h"
+
+static unsigned int r2h_cmd;
+static struct mfc_cmd_args r2h_args;
+
+#undef MFC_PERF
+
+#ifdef MFC_PERF
+static int framecnt = 0;
+struct timeval tv1, tv2;
+#endif
+
+irqreturn_t mfc_irq(int irq, void *dev_id)
+{
+ struct mfc_dev *dev = (struct mfc_dev *)dev_id;
+
+ r2h_cmd = read_reg(MFC_RISC2HOST_CMD);
+ mfc_dbg("MFC IRQ: %d\n", r2h_cmd);
+
+ if (((r2h_cmd >= OPEN_CH_RET) && (r2h_cmd <= CLOSE_CH_RET)) ||
+ ((r2h_cmd >= SEQ_DONE_RET) && (r2h_cmd <= EDFU_INIT_RET)) ||
+ ( r2h_cmd == ERR_RET)) {
+ memset(&r2h_args, 0, sizeof(struct mfc_cmd_args));
+
+ r2h_args.arg[0] = read_reg(MFC_RISC2HOST_ARG1);
+ r2h_args.arg[1] = read_reg(MFC_RISC2HOST_ARG2);
+ r2h_args.arg[2] = read_reg(MFC_RISC2HOST_ARG3);
+ r2h_args.arg[3] = read_reg(MFC_RISC2HOST_ARG4);
+
+ if (r2h_cmd == ERR_RET)
+ mfc_dbg("F/W error code: disp: %d, dec: %d",
+ (r2h_args.arg[1] >> 16) & 0xFFFF,
+ (r2h_args.arg[1] & 0xFFFF));
+ } else {
+ mfc_dbg("Unknown R2H return value: %d\n", r2h_cmd);
+#if 0
+ mfc_err("== (0x64: 0x%08x) (0x68: 0x%08x) (0xE4: 0x%08x) (0xE8: 0x%08x)\n", read_reg(0x64), read_reg(0x68),read_reg(0xe4), read_reg(0xe8));
+ mfc_err("== (0xF0: 0x%08x) (0xF4: 0x%08x) (0xF8: 0x%08x) (0xFC: 0x%08x)\n", read_reg(0xf0), read_reg(0xf4), read_reg(0xf8), read_reg(0xfc));
+ mfc_err("== PWR 0x%08x CLK 0x%08x MASK 0x%08x PEND 0x%08x\n", __raw_readl(S5P_PMU_MFC_CONF),__raw_readl(EXYNOS4_CLKGATE_IP_MFC),
+ __raw_readl(S5P_VA_GIC_CPU + 0x1010c), __raw_readl(S5P_VA_GIC_CPU + 0x1020c));
+#endif
+ }
+
+#ifdef MFC_PERF
+ if (framecnt > 0) {
+ do_gettimeofday(&tv2);
+
+ mfc_info("%d, %ld", framecnt,
+ (long)(((tv2.tv_sec * 1000000) + tv2.tv_usec) - ((tv1.tv_sec * 1000000) + tv1.tv_usec)));
+
+ framecnt++;
+ }
+#endif
+
+ /*
+ * FIXME: check is codec command return or error
+ * move to mfc_wait_codec() ?
+ */
+
+ write_reg(0xFFFF, MFC_SI_RTN_CHID);
+
+ write_reg(0, MFC_RISC2HOST_CMD);
+ write_reg(0, MFC_RISC_HOST_INT);
+
+ /* FIXME: codec wait_queue processing */
+ dev->irq_sys = 1;
+ wake_up(&dev->wait_sys);
+
+ return IRQ_HANDLED;
+}
+
+#if 0
+static bool mfc_wait_codec(struct mfc_inst_ctx *ctx, enum mfc_r2h_ret ret)
+{
+ /*
+ if (wait_event_timeout(dev->wait_codec[0], 0, timeout) == 0) {
+ mfc_err("F/W timeout: 0x%02x\n", ret);
+
+ return false;
+ }
+
+ if (r2h_cmd == ERR_RET)
+ mfc_err("F/W error code: 0x%02x", r2h_args.arg[1] & 0xFFFF);
+
+ return false;
+ }
+
+ if (r2h_cmd != ret) {
+ mfc_err("F/W return (0x%02x) waiting for (0x%02x)\n",
+ r2h_cmd, ret);
+
+ return false;
+ }
+ */
+ return true;
+}
+#endif
+
+static bool
+mfc_wait_sys(struct mfc_dev *dev, enum mfc_r2h_ret ret, long timeout)
+{
+
+ if (wait_event_timeout(dev->wait_sys, dev->irq_sys, timeout) == 0) {
+ mfc_err("F/W timeout waiting for: %d\n", ret);
+ dev->irq_sys = 0;
+
+ return false;
+ }
+
+ dev->irq_sys = 0;
+
+ if (r2h_cmd == ERR_RET) {
+ mfc_err("F/W error code: disp: %d, dec: %d",
+ (r2h_args.arg[1] >> 16) & 0xFFFF,
+ (r2h_args.arg[1] & 0xFFFF));
+
+ return false;
+ }
+
+#if SUPPORT_SLICE_ENCODING
+ if ((ret == FRAME_DONE_RET) && (r2h_cmd == EDFU_INIT_RET)
+ && (dev->slice_encoding_flag == 0)) {
+ mfc_dbg("Slice encoding start : %d\n", r2h_cmd);
+ dev->slice_encoding_flag = 1;
+ dev->slice_sys = 0;
+ } else if ((ret == FRAME_DONE_RET) && (r2h_cmd == FRAME_DONE_RET)
+ && (dev->slice_encoding_flag)) {
+ mfc_dbg("Slice encoding done : %d\n", r2h_cmd);
+ dev->slice_sys = 1;
+ dev->slice_encoding_flag = 0;
+ if (dev->wait_slice_timeout == 1)
+ wake_up(&dev->wait_slice);
+ }
+#endif
+
+ if (r2h_cmd != ret) {
+#if SUPPORT_SLICE_ENCODING
+ /* exceptional case: FRAME_START -> EDFU_INIT_RET */
+ if ((ret == FRAME_DONE_RET) && (r2h_cmd == EDFU_INIT_RET))
+ return true;
+
+ /* exceptional case: CLOSE_CH_RET -> ABORT_RET */
+ if ((ret == CLOSE_CH_RET) && (r2h_cmd == ABORT_RET))
+ return true;
+#endif
+ mfc_err("F/W return (%d) waiting for (%d)\n",
+ r2h_cmd, ret);
+
+ return false;
+ }
+
+ return true;
+}
+
+static bool write_h2r_cmd(enum mfc_h2r_cmd cmd, struct mfc_cmd_args *args)
+{
+ enum mfc_h2r_cmd pending_cmd;
+ unsigned long timeo = jiffies;
+
+ timeo += msecs_to_jiffies(H2R_CMD_TIMEOUT);
+
+ /* wait until host to risc command register becomes 'NOP' */
+ do {
+ pending_cmd = read_reg(MFC_HOST2RISC_CMD);
+
+ if (pending_cmd == H2R_NOP)
+ break;
+
+ schedule_timeout_uninterruptible(1);
+ /* FiXME: cpu_relax() */
+ } while (time_before(jiffies, timeo));
+
+ if (pending_cmd != H2R_NOP)
+ return false;
+
+ write_reg(args->arg[0], MFC_HOST2RISC_ARG1);
+ write_reg(args->arg[1], MFC_HOST2RISC_ARG2);
+ write_reg(args->arg[2], MFC_HOST2RISC_ARG3);
+ write_reg(args->arg[3], MFC_HOST2RISC_ARG4);
+
+ write_reg(cmd, MFC_HOST2RISC_CMD);
+
+ return true;
+}
+
+int mfc_cmd_fw_start(struct mfc_dev *dev)
+{
+ /* release RISC reset */
+ write_reg(0x3FF, MFC_SW_RESET);
+
+ if (mfc_wait_sys(dev, FW_STATUS_RET,
+ msecs_to_jiffies(H2R_INT_TIMEOUT)) == false) {
+ mfc_err("failed to check F/W\n");
+ return MFC_FW_LOAD_FAIL;
+ }
+
+ return MFC_OK;
+}
+
+int mfc_cmd_sys_init(struct mfc_dev *dev)
+{
+ struct mfc_cmd_args h2r_args;
+ unsigned int fw_version, fw_memsize;
+
+ memset(&h2r_args, 0, sizeof(struct mfc_cmd_args));
+ h2r_args.arg[0] = MFC_FW_SYSTEM_SIZE;
+
+ if (write_h2r_cmd(SYS_INIT, &h2r_args) == false)
+ return MFC_CMD_FAIL;
+
+ if (mfc_wait_sys(dev, SYS_INIT_RET,
+ msecs_to_jiffies(H2R_INT_TIMEOUT)) == false) {
+ mfc_err("failed to init system\n");
+ return MFC_FW_INIT_FAIL;
+ }
+
+ fw_version = read_reg(MFC_FW_VERSION);
+ fw_memsize = r2h_args.arg[0];
+
+ mfc_info("MFC F/W version: %02x-%02x-%02x, %dkB\n",
+ (fw_version >> 16) & 0xff,
+ (fw_version >> 8) & 0xff,
+ (fw_version) & 0xff,
+ (fw_memsize) >> 10);
+
+ return MFC_OK;
+}
+
+int mfc_cmd_sys_sleep(struct mfc_dev *dev)
+{
+ struct mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct mfc_cmd_args));
+
+ if (write_h2r_cmd(SLEEP, &h2r_args) == false)
+ return MFC_CMD_FAIL;
+
+ if (mfc_wait_sys(dev, SLEEP_RET,
+ msecs_to_jiffies(H2R_INT_TIMEOUT)) == false) {
+ mfc_err("failed to sleep\n");
+ return MFC_SLEEP_FAIL;
+ }
+
+ return MFC_OK;
+}
+
+int mfc_cmd_sys_wakeup(struct mfc_dev *dev)
+{
+ struct mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct mfc_cmd_args));
+
+ if (write_h2r_cmd(WAKEUP, &h2r_args) == false)
+ return MFC_CMD_FAIL;
+
+ /* release RISC reset */
+ write_reg(0x3FF, MFC_SW_RESET);
+
+ if (mfc_wait_sys(dev, WAKEUP_RET,
+ //msecs_to_jiffies(H2R_INT_TIMEOUT)) == false) {
+ msecs_to_jiffies(20000)) == false) {
+ mfc_err("failed to wakeup\n");
+ return MFC_WAKEUP_FAIL;
+ }
+
+ return MFC_OK;
+}
+
+int mfc_cmd_inst_open(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_cmd_args h2r_args;
+ unsigned int crc = 0;
+ unsigned int pixelcache = 0;
+ struct mfc_dec_ctx *dec_ctx;
+ struct mfc_enc_ctx *enc_ctx;
+
+ if (ctx->type == DECODER) {
+ dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+
+ crc = dec_ctx->crc & 0x1;
+ pixelcache = dec_ctx->pixelcache & 0x3;
+ } else {
+ enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ pixelcache = enc_ctx->pixelcache & 0x3;
+ }
+
+ memset(&h2r_args, 0, sizeof(struct mfc_cmd_args));
+ h2r_args.arg[0] = ctx->codecid;
+ h2r_args.arg[1] = crc << 31 | pixelcache;
+ h2r_args.arg[2] = ctx->ctxbufofs;
+ h2r_args.arg[3] = ctx->ctxbufsize;
+
+ if (write_h2r_cmd(OPEN_CH, &h2r_args) == false)
+ return MFC_CMD_FAIL;
+
+ if (mfc_wait_sys(ctx->dev, OPEN_CH_RET,
+ msecs_to_jiffies(H2R_INT_TIMEOUT)) == false) {
+ mfc_err("failed to open instance\n");
+ return MFC_OPEN_FAIL;
+ }
+
+ ctx->cmd_id = r2h_args.arg[0];
+
+ mfc_dbg("inst id: %d, cmd id: %d, codec id: %d",
+ ctx->id, ctx->cmd_id, ctx->codecid);
+
+#ifdef MFC_PERF
+ framecnt = 0;
+#endif
+
+ return ctx->cmd_id;
+}
+
+int mfc_cmd_inst_close(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_cmd_args h2r_args;
+
+ memset(&h2r_args, 0, sizeof(struct mfc_cmd_args));
+ h2r_args.arg[0] = ctx->cmd_id;
+
+ if (write_h2r_cmd(CLOSE_CH, &h2r_args) == false)
+ return MFC_CMD_FAIL;
+
+ if (mfc_wait_sys(ctx->dev, CLOSE_CH_RET,
+ msecs_to_jiffies(H2R_INT_TIMEOUT)) == false) {
+ mfc_err("failed to close instance\n");
+ return MFC_CLOSE_FAIL;
+ }
+#if SUPPORT_SLICE_ENCODING
+ /* retry instance close */
+ if (r2h_cmd == ABORT_RET) {
+ if (write_h2r_cmd(CLOSE_CH, &h2r_args) == false)
+ return MFC_CMD_FAIL;
+
+ if (mfc_wait_sys(ctx->dev, CLOSE_CH_RET,
+ msecs_to_jiffies(H2R_INT_TIMEOUT)) == false) {
+ mfc_err("failed to close instance\n");
+ return MFC_CLOSE_FAIL;
+ }
+ }
+#endif
+
+ return MFC_OK;
+}
+
+int mfc_cmd_seq_start(struct mfc_inst_ctx *ctx)
+{
+ /* all codec command pass the shared mem addrees */
+ write_reg(ctx->shmofs, MFC_SI_CH1_HOST_WR_ADR);
+
+ write_reg((SEQ_HEADER << 16 & 0x70000) | ctx->cmd_id,
+ MFC_SI_CH1_INST_ID);
+
+ /* FIXME: close_instance ? */
+ /* FIXME: mfc_wait_codec */
+ if (mfc_wait_sys(ctx->dev, SEQ_DONE_RET,
+ msecs_to_jiffies(CODEC_INT_TIMEOUT)) == false) {
+ mfc_err("failed to init seq start\n");
+ return MFC_DEC_INIT_FAIL;
+ }
+
+ if ((r2h_args.arg[1] & 0xFFFF) == 175) {
+ mfc_err("Non compliant feature detected\n");
+ return MFC_DEC_INIT_FAIL;
+ }
+
+ return MFC_OK;
+}
+
+int mfc_cmd_init_buffers(struct mfc_inst_ctx *ctx)
+{
+ /* all codec command pass the shared mem addrees */
+ write_reg(ctx->shmofs, MFC_SI_CH1_HOST_WR_ADR);
+
+ write_reg((INIT_BUFFERS << 16 & 0x70000) | ctx->cmd_id,
+ MFC_SI_CH1_INST_ID);
+
+ /* FIXME: close_instance ? */
+ /* FIXME: mfc_wait_codec */
+ if (mfc_wait_sys(ctx->dev, INIT_BUFFERS_RET,
+ msecs_to_jiffies(CODEC_INT_TIMEOUT)) == false) {
+ mfc_err("failed to init buffers\n");
+ return MFC_DEC_INIT_FAIL;
+ }
+
+#ifdef MFC_PERF
+ framecnt = 1;
+#endif
+
+ return MFC_OK;
+}
+
+int mfc_cmd_frame_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx;
+
+ /* all codec command pass the shared mem addrees */
+ write_reg(ctx->shmofs, MFC_SI_CH1_HOST_WR_ADR);
+
+ if (ctx->type == DECODER) {
+ dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+
+ mfc_dbg("dec_ctx->lastframe: %d", dec_ctx->lastframe);
+
+ if (dec_ctx->lastframe) {
+ write_reg((LAST_SEQ << 16 & 0x70000) | ctx->cmd_id,
+ MFC_SI_CH1_INST_ID);
+ dec_ctx->lastframe = 0;
+ } else if (ctx->resolution_status == RES_SET_CHANGE) {
+ mfc_dbg("FRAME_START_REALLOC\n");
+ write_reg((FRAME_START_REALLOC << 16 & 0x70000) | ctx->cmd_id,
+ MFC_SI_CH1_INST_ID);
+ ctx->resolution_status = RES_WAIT_FRAME_DONE;
+ } else {
+ write_reg((FRAME_START << 16 & 0x70000) | ctx->cmd_id,
+ MFC_SI_CH1_INST_ID);
+ }
+ } else { /* == ENCODER */
+ write_reg((FRAME_START << 16 & 0x70000) | ctx->cmd_id,
+ MFC_SI_CH1_INST_ID);
+ }
+
+#ifdef MFC_PERF
+ do_gettimeofday(&tv1);
+#endif
+
+ /* FIXME: close_instance ? */
+ /* FIXME: mfc_wait_codec */
+ if (mfc_wait_sys(ctx->dev, FRAME_DONE_RET,
+ msecs_to_jiffies(CODEC_INT_TIMEOUT)) == false) {
+ mfc_err("failed to frame start\n");
+ return MFC_DEC_EXE_TIME_OUT;
+ }
+
+ return MFC_OK;
+}
+
+#if SUPPORT_SLICE_ENCODING
+int mfc_cmd_slice_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ struct mfc_cmd_args h2r_args;
+
+ /* all codec command pass the shared mem addrees */
+ write_reg(ctx->shmofs, MFC_SI_CH1_HOST_WR_ADR);
+
+ if (enc_ctx->slicecount == 0) {
+ write_reg((FRAME_START << 16 & 0x70000) | ctx->cmd_id,
+ MFC_SI_CH1_INST_ID);
+
+ enc_ctx->slicecount = 1;
+ } else {
+ memset(&h2r_args, 0, sizeof(struct mfc_cmd_args));
+ h2r_args.arg[0] = enc_ctx->streamaddr >> 11;
+
+ if (write_h2r_cmd(CONTINUE_ENC, &h2r_args) == false)
+ return MFC_CMD_FAIL;
+ }
+
+#ifdef MFC_PERF
+ do_gettimeofday(&tv1);
+#endif
+
+ if (mfc_wait_sys(ctx->dev, FRAME_DONE_RET,
+ msecs_to_jiffies(CODEC_INT_TIMEOUT)) == false) {
+ mfc_err("failed to slice start\n");
+ return MFC_DEC_EXE_TIME_OUT;
+ }
+
+ if (r2h_cmd == EDFU_INIT_RET)
+ enc_ctx->slicecount++;
+ else /* FRAME_DONE_RET */
+ enc_ctx->slicecount = 0;
+
+ enc_ctx->slicesize = r2h_args.arg[2];
+
+ return MFC_OK;
+}
+#endif
diff --git a/drivers/media/video/samsung/mfc5x/mfc_cmd.h b/drivers/media/video/samsung/mfc5x/mfc_cmd.h
new file mode 100644
index 0000000..ac1f0c9
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_cmd.h
@@ -0,0 +1,90 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_cmd.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Command interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_CMD_H
+#define __MFC_CMD_H __FILE__
+
+#include <linux/interrupt.h>
+
+#include "mfc_dev.h"
+
+#define MAX_H2R_ARG 4
+#define H2R_CMD_TIMEOUT 1000 /* ms */
+#define H2R_INT_TIMEOUT 5000 /* ms */
+#define CODEC_INT_TIMEOUT 1000 /* ms */
+#define SLICE_ENC_TIMEOUT 1000 /* ms */
+
+enum mfc_h2r_cmd {
+ H2R_NOP = 0,
+ OPEN_CH = 1,
+ CLOSE_CH = 2,
+ SYS_INIT = 3,
+ FLUSH = 4,
+ SLEEP = 5,
+ WAKEUP = 6,
+ CONTINUE_ENC = 7,
+ ABORT_ENC = 8,
+};
+
+enum mfc_codec_cmd {
+ SEQ_HEADER = 1,
+ FRAME_START = 2,
+ LAST_SEQ = 3,
+ INIT_BUFFERS = 4,
+ FRAME_START_REALLOC = 5,
+ FRAME_BATCH_START = 6,
+};
+
+enum mfc_r2h_ret {
+ R2H_NOP = 0,
+ OPEN_CH_RET = 1,
+ CLOSE_CH_RET = 2,
+
+ SEQ_DONE_RET = 4,
+ FRAME_DONE_RET = 5,
+ SLICE_DONE_RET = 6,
+ ENC_COMPLETE_RET = 7,
+ SYS_INIT_RET = 8,
+ FW_STATUS_RET = 9,
+ SLEEP_RET = 10,
+ WAKEUP_RET = 11,
+ FLUSH_CMD_RET = 12,
+ ABORT_RET = 13,
+ BATCH_ENC_RET = 14,
+ INIT_BUFFERS_RET = 15,
+ EDFU_INIT_RET = 16,
+
+ ERR_RET = 32,
+};
+
+struct mfc_cmd_args {
+ unsigned int arg[MAX_H2R_ARG];
+};
+
+irqreturn_t mfc_irq(int irq, void *dev_id);
+
+int mfc_cmd_fw_start(struct mfc_dev *dev);
+int mfc_cmd_sys_init(struct mfc_dev *dev);
+int mfc_cmd_sys_sleep(struct mfc_dev *dev);
+int mfc_cmd_sys_wakeup(struct mfc_dev *dev);
+
+int mfc_cmd_inst_open(struct mfc_inst_ctx *ctx);
+int mfc_cmd_inst_close(struct mfc_inst_ctx *ctx);
+int mfc_cmd_seq_start(struct mfc_inst_ctx *ctx);
+int mfc_cmd_init_buffers(struct mfc_inst_ctx *ctx);
+int mfc_cmd_frame_start(struct mfc_inst_ctx *ctx);
+#if SUPPORT_SLICE_ENCODING
+int mfc_cmd_slice_start(struct mfc_inst_ctx *ctx);
+#endif
+
+#endif /* __MFC_CMD_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_ctrl.c b/drivers/media/video/samsung/mfc5x/mfc_ctrl.c
new file mode 100644
index 0000000..11d35d3
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_ctrl.c
@@ -0,0 +1,186 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_ctrl.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Control interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <mach/regs-mfc.h>
+
+#include "mfc.h"
+#include "mfc_mem.h"
+#include "mfc_reg.h"
+#include "mfc_log.h"
+#include "mfc_cmd.h"
+#include "mfc_dev.h"
+#include "mfc_errno.h"
+#include "mfc_pm.h"
+
+#define MC_STATUS_TIMEOUT 1000 /* ms */
+
+static bool mfc_reset(void)
+{
+ unsigned int mc_status;
+ unsigned long timeo = jiffies;
+
+ timeo += msecs_to_jiffies(MC_STATUS_TIMEOUT);
+
+ /* Stop procedure */
+ /* FIXME: F/W can be access invalid address */
+ /* Reset VI */
+ /*
+ write_reg(0x3F7, MFC_SW_RESET);
+ */
+ write_reg(0x3F6, MFC_SW_RESET); /* Reset RISC */
+ write_reg(0x3E2, MFC_SW_RESET); /* All reset except for MC */
+ mdelay(10);
+
+ /* Check MC status */
+ do {
+ mc_status = (read_reg(MFC_MC_STATUS) & 0x3);
+
+ if (mc_status == 0)
+ break;
+
+ schedule_timeout_uninterruptible(1);
+ /* FiXME: cpu_relax() */
+ } while (time_before(jiffies, timeo));
+
+ if (mc_status != 0)
+ return false;
+
+ write_reg(0x0, MFC_SW_RESET);
+ write_reg(0x3FE, MFC_SW_RESET);
+
+ return true;
+}
+
+static void mfc_init_memctrl(void)
+{
+ /* Channel A, Port 0 */
+ write_reg(mfc_mem_base(0), MFC_MC_DRAMBASE_ADR_A);
+#if MFC_MAX_MEM_PORT_NUM == 1
+ /* Channel B, Port 0 */
+ write_reg(mfc_mem_base(0), MFC_MC_DRAMBASE_ADR_B);
+#else
+ /* Channel B, Port 1 */
+ write_reg(mfc_mem_base(1), MFC_MC_DRAMBASE_ADR_B);
+#endif
+ mfc_dbg("Master A - 0x%08x\n",
+ read_reg(MFC_MC_DRAMBASE_ADR_A));
+ mfc_dbg("Master B - 0x%08x\n",
+ read_reg(MFC_MC_DRAMBASE_ADR_B));
+}
+
+static void mfc_clear_cmds(void)
+{
+ write_reg(0xFFFFFFFF, MFC_SI_CH1_INST_ID);
+ write_reg(0xFFFFFFFF, MFC_SI_CH2_INST_ID);
+
+ write_reg(H2R_NOP, MFC_RISC2HOST_CMD);
+ write_reg(R2H_NOP, MFC_HOST2RISC_CMD);
+}
+
+int mfc_load_firmware(const unsigned char *data, size_t size)
+{
+ volatile unsigned char *fw;
+
+ if (!data || size == 0)
+ return 0;
+
+ /* MFC F/W area already 128KB aligned */
+ fw = mfc_mem_addr(0);
+
+ memcpy((void *)fw, data, size);
+
+ mfc_mem_cache_clean((void *)fw, size);
+
+ return 1;
+}
+
+int mfc_start(struct mfc_dev *dev)
+{
+ int ret;
+
+ /* FIXME: when MFC start, load firmware again */
+ /*
+ dev->fw.state = mfc_load_firmware(dev->fw.info->data, dev->fw.info->size);
+ */
+
+ mfc_clock_on(dev);
+
+ if (mfc_reset() == false) {
+ mfc_clock_off(dev);
+ return MFC_FAIL;
+ }
+
+ mfc_init_memctrl();
+ mfc_clear_cmds();
+
+ ret = mfc_cmd_fw_start(dev);
+ if (ret < 0) {
+ mfc_clock_off(dev);
+ return ret;
+ }
+
+ ret = mfc_cmd_sys_init(dev);
+
+ mfc_clock_off(dev);
+
+ return ret;
+}
+
+int mfc_sleep(struct mfc_dev *dev)
+{
+ int ret;
+
+ mfc_clock_on(dev);
+
+ /* FIXME: add SFR backup? */
+
+ ret = mfc_cmd_sys_sleep(dev);
+
+ mfc_clock_off(dev);
+
+ /* FIXME: add mfc_power_off()? */
+
+ /* FIXME: ret = 0 */
+ return ret;
+}
+
+int mfc_wakeup(struct mfc_dev *dev)
+{
+ int ret;
+
+ /* FIXME: add mfc_power_on()? */
+
+ mfc_clock_on(dev);
+
+ if (mfc_reset() == false) {
+ mfc_clock_off(dev);
+ return MFC_FAIL;
+ }
+
+ mfc_init_memctrl();
+ mfc_clear_cmds();
+
+ ret = mfc_cmd_sys_wakeup(dev);
+
+ mfc_clock_off(dev);
+
+ /* FIXME: ret = 0 */
+ return ret;
+}
+
diff --git a/drivers/media/video/samsung/mfc5x/mfc_ctrl.h b/drivers/media/video/samsung/mfc5x/mfc_ctrl.h
new file mode 100644
index 0000000..7822f59
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_ctrl.h
@@ -0,0 +1,22 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_ctrl.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Control interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_CTRL_H
+#define __MFC_CTRL_H __FILE__
+
+int mfc_load_firmware(const unsigned char *data, size_t size);
+int mfc_start(struct mfc_dev *dev);
+int mfc_sleep(struct mfc_dev *dev);
+int mfc_wakeup(struct mfc_dev *dev);
+
+#endif /* __MFC_CTRL_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_dec.c b/drivers/media/video/samsung/mfc5x/mfc_dec.c
new file mode 100644
index 0000000..6e0645d
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_dec.c
@@ -0,0 +1,2416 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_dec.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Decoder interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_BUSFREQ_OPP
+#include <plat/cpu.h>
+#include <mach/busfreq_exynos4.h>
+#define HD_MOVIE_SIZE_MULTIPLY_WIDTH_HEIGHT (1281*721)
+#endif
+
+#if defined(CONFIG_BUSFREQ) || defined(CONFIG_EXYNOS4_CPUFREQ)
+#include <mach/cpufreq.h>
+#endif
+#include <mach/regs-mfc.h>
+
+#include "mfc_dec.h"
+#include "mfc_cmd.h"
+#include "mfc_log.h"
+
+#include "mfc_shm.h"
+#include "mfc_reg.h"
+#include "mfc_mem.h"
+#include "mfc_buf.h"
+
+#undef DUMP_STREAM
+
+#ifdef DUMP_STREAM
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/file.h>
+
+static void mfc_fw_debug(void);
+static void dump_stream(unsigned long address, unsigned int size);
+#endif
+static LIST_HEAD(mfc_decoders);
+
+#if 0
+#define MPEG4_START_CODE_PREFIX_SIZE 3
+#define MPEG4_START_CODE_PREFIX 0x000001
+#define MPEG4_START_CODE_MASK 0x000000FF
+static int find_mpeg4_startcode(unsigned long addr, unsigned int size)
+{
+ unsigned char *data;
+ unsigned int i = 0;
+
+ /* FIXME: optimize cache operation size */
+ mfc_mem_cache_inv((void *)addr, size);
+
+ /* FIXME: optimize matching algorithm */
+ data = (unsigned char *)addr;
+
+ for (i = 0; i < (size - MPEG4_START_CODE_PREFIX_SIZE); i++) {
+ if ((data[i] == 0x00) && (data[i + 1] == 0x00) && (data[i + 2] == 0x01))
+ return i;
+ }
+
+ return -1;
+}
+
+static int check_vcl(unsigned long addr, unsigned int size)
+{
+ return -1;
+}
+#endif
+
+#ifdef DUMP_STREAM
+static void mfc_fw_debug(void)
+{
+ mfc_err("============= MFC FW Debug (Ver: 0x%08x) ================\n",
+ read_reg(0x58));
+ mfc_err("== (0x64: 0x%08x) (0x68: 0x%08x) (0xE4: 0x%08x) \
+ (0xE8: 0x%08x)\n", read_reg(0x64), read_reg(0x68),
+ read_reg(0xe4), read_reg(0xe8));
+ mfc_err("== (0xF0: 0x%08x) (0xF4: 0x%08x) (0xF8: 0x%08x) \
+ (0xFC: 0x%08x)\n", read_reg(0xf0), read_reg(0xf4),
+ read_reg(0xf8), read_reg(0xfc));
+}
+
+static void dump_stream(unsigned long address, unsigned int size)
+{
+ int i, j;
+ struct file *file;
+ loff_t pos = 0;
+ int fd;
+ unsigned long addr = (unsigned long) phys_to_virt(address);
+ mm_segment_t old_fs;
+ char filename[] = "/data/mfc_decinit_instream.raw";
+
+ printk(KERN_INFO "---- start stream dump ----\n");
+ printk(KERN_INFO "size: 0x%04x\n", size);
+ for (i = 0; i < size; i += 16) {
+ mfc_dbg("0x%04x: ", i);
+
+ if ((size - i) >= 16) {
+ for (j = 0; j < 16; j++)
+ mfc_dbg("0x%02x ",
+ (u8)(*(u8 *)(addr + i + j)));
+ } else {
+ for (j = 0; j < (size - i); j++)
+ mfc_dbg("0x%02x ",
+ (u8)(*(u8 *)(addr + i + j)));
+ }
+ mfc_dbg("\n");
+ }
+ printk(KERN_INFO "---- end stream dump ----\n");
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ fd = sys_open(filename, O_WRONLY|O_CREAT, 0644);
+ if (fd >= 0) {
+ sys_write(fd, (u8 *)addr, size);
+ file = fget(fd);
+ if (file) {
+ vfs_write(file, (u8 *)addr, size, &pos);
+ fput(file);
+ }
+ sys_close(fd);
+ } else {
+ mfc_err("........Open fail : %d\n", fd);
+ }
+
+ set_fs(old_fs);
+}
+#endif
+
+/*
+ * [1] alloc_ctx_buf() implementations
+ */
+ static int alloc_ctx_buf(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_CTX_SIZE, ALIGN_2KB, MBT_CTX | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc context buffer\n");
+
+ return -1;
+ }
+
+ ctx->ctxbufofs = mfc_mem_base_ofs(alloc->real) >> 11;
+ ctx->ctxbufsize = alloc->size;
+
+ memset((void *)alloc->addr, 0, alloc->size);
+
+ mfc_mem_cache_clean((void *)alloc->addr, alloc->size);
+
+ return 0;
+}
+
+static int h264_alloc_ctx_buf(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_CTX_SIZE_L, ALIGN_2KB, MBT_CTX | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc context buffer\n");
+
+ return -1;
+ }
+
+ ctx->ctxbufofs = mfc_mem_base_ofs(alloc->real) >> 11;
+ ctx->ctxbufsize = alloc->size;
+
+ memset((void *)alloc->addr, 0, alloc->size);
+
+ mfc_mem_cache_clean((void *)alloc->addr, alloc->size);
+
+ return 0;
+}
+
+/*
+ * [2] alloc_desc_buf() implementations
+ */
+static int alloc_desc_buf(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ /* FIXME: size fixed? */
+ alloc = _mfc_alloc_buf(ctx, MFC_DESC_SIZE, ALIGN_2KB, MBT_DESC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc descriptor buffer\n");
+
+ return -1;
+ }
+
+ ctx->descbufofs = mfc_mem_base_ofs(alloc->real) >> 11;
+ /* FIXME: size fixed? */
+ ctx->descbufsize = MFC_DESC_SIZE;
+
+ return 0;
+}
+
+/*
+ * [3] pre_seq_start() implementations
+ */
+static int pre_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ unsigned reg;
+
+ /* slice interface */
+ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL);
+ if (dec_ctx->slice)
+ reg |= (1 << 31);
+ else
+ reg &= ~(1 << 31);
+ write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL);
+
+ return 0;
+}
+
+static int h264_pre_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv;
+ unsigned int reg;
+
+ pre_seq_start(ctx);
+
+ /* display delay */
+ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL);
+ if (h264->dispdelay_en > 0) {
+ /* enable */
+ reg |= (1 << 30);
+ /* value */
+ reg &= ~(0x3FFF << 16);
+ reg |= ((h264->dispdelay_val & 0x3FFF) << 16);
+ } else {
+ /* disable & value clear */
+ reg &= ~(0x7FFF << 16);
+ }
+ write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL);
+
+ write_shm(ctx, h264->sei_parse, SEI_ENABLE);
+
+ return 0;
+}
+
+static int mpeg4_pre_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv;
+ unsigned int reg;
+
+ pre_seq_start(ctx);
+
+ /* loop filter, this register can be used by both decoders & encoders */
+ reg = read_reg(MFC_ENC_LF_CTRL);
+ if (mpeg4->postfilter)
+ reg |= (1 << 0);
+ else
+ reg &= ~(1 << 0);
+ write_reg(reg, MFC_ENC_LF_CTRL);
+
+ return 0;
+}
+
+static int fimv1_pre_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_fimv1 *fimv1 = (struct mfc_dec_fimv1 *)dec_ctx->d_priv;
+
+ pre_seq_start(ctx);
+
+ /* set width, height for FIMV1 */
+ write_reg(fimv1->width, MFC_SI_FIMV1_HRESOL);
+ write_reg(fimv1->height, MFC_SI_FIMV1_VRESOL);
+
+ return 0;
+}
+
+/*
+ * [4] post_seq_start() implementations
+ */
+static int post_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ unsigned int shm;
+
+ /* CHKME: case of FIMV1 */
+ ctx->width = read_reg(MFC_SI_HRESOL);
+ ctx->height = read_reg(MFC_SI_VRESOL);
+
+ dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER);
+ dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb;
+
+ shm = read_shm(ctx, DISP_PIC_PROFILE);
+ dec_ctx->level = (shm >> 8) & 0xFF;
+ dec_ctx->profile = shm & 0x1F;
+
+ return 0;
+}
+
+static int h264_post_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv;
+ unsigned int shm;
+
+ /*
+ post_seq_start(ctx);
+ */
+ ctx->width = read_reg(MFC_SI_HRESOL);
+ ctx->height = read_reg(MFC_SI_VRESOL);
+
+ dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER);
+ dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb;
+
+ mfc_dbg("nummindpb: %d, numextradpb: %d\n", dec_ctx->nummindpb,
+ dec_ctx->numextradpb);
+
+ shm = read_shm(ctx, DISP_PIC_PROFILE);
+ dec_ctx->level = (shm >> 8) & 0xFF;
+ dec_ctx->profile = shm & 0x1F;
+
+ /* FIXME: consider it */
+ /*
+ h264->dispdelay_en > 0
+
+ if (dec_ctx->numtotaldpb < h264->dispdelay_val)
+ dec_ctx->numtotaldpb = h264->dispdelay_val;
+ */
+
+ h264->crop_r_ofs = (read_shm(ctx, CROP_INFO1) >> 16) & 0xFFFF;
+ h264->crop_l_ofs = read_shm(ctx, CROP_INFO1) & 0xFFFF;
+ h264->crop_b_ofs = (read_shm(ctx, CROP_INFO2) >> 16) & 0xFFFF;
+ h264->crop_t_ofs = read_shm(ctx, CROP_INFO2) & 0xFFFF;
+
+ return 0;
+}
+
+static int mpeg4_post_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv;
+ unsigned int shm;
+
+ /*
+ post_seq_start(ctx);
+ */
+ ctx->width = read_reg(MFC_SI_HRESOL);
+ ctx->height = read_reg(MFC_SI_VRESOL);
+
+ dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER);
+ dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb;
+
+ shm = read_shm(ctx, DISP_PIC_PROFILE);
+ dec_ctx->level = (shm >> 8) & 0xFF;
+ dec_ctx->profile = shm & 0x1F;
+
+ mpeg4->aspect_ratio = read_shm(ctx, ASPECT_RATIO_INFO) & 0xF;
+ if (mpeg4->aspect_ratio == 0xF) {
+ shm = read_shm(ctx, EXTENDED_PAR);
+ mpeg4->ext_par_width = (shm >> 16) & 0xFFFF;
+ mpeg4->ext_par_height = shm & 0xFFFF;
+ } else {
+ mpeg4->ext_par_width = 0;
+ mpeg4->ext_par_height = 0;
+ }
+
+ return 0;
+}
+
+static int vc1_post_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ unsigned int shm;
+
+ /*
+ post_seq_start(ctx);
+ */
+ ctx->width = read_reg(MFC_SI_HRESOL);
+ ctx->height = read_reg(MFC_SI_VRESOL);
+
+ dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER);
+ dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb;
+
+ shm = read_shm(ctx, DISP_PIC_PROFILE);
+ dec_ctx->level = (shm >> 8) & 0xFF;
+ dec_ctx->profile = shm & 0x1F;
+
+ return 0;
+}
+
+static int fimv1_post_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_fimv1 *fimv1 = (struct mfc_dec_fimv1 *)dec_ctx->d_priv;
+ unsigned int shm;
+
+ /*
+ post_seq_start(ctx);
+ */
+ ctx->width = read_reg(MFC_SI_HRESOL);
+ ctx->height = read_reg(MFC_SI_VRESOL);
+
+ dec_ctx->nummindpb = read_reg(MFC_SI_BUF_NUMBER);
+ dec_ctx->numtotaldpb = dec_ctx->nummindpb + dec_ctx->numextradpb;
+
+ shm = read_shm(ctx, DISP_PIC_PROFILE);
+ dec_ctx->level = (shm >> 8) & 0xFF;
+ dec_ctx->profile = shm & 0x1F;
+
+ fimv1->aspect_ratio = read_shm(ctx, ASPECT_RATIO_INFO) & 0xF;
+ if (fimv1->aspect_ratio == 0xF) {
+ shm = read_shm(ctx, EXTENDED_PAR);
+ fimv1->ext_par_width = (shm >> 16) & 0xFFFF;
+ fimv1->ext_par_height = shm & 0xFFFF;
+ } else {
+ fimv1->ext_par_width = 0;
+ fimv1->ext_par_height = 0;
+ }
+
+ return 0;
+}
+
+/*
+ * [5] set_init_arg() implementations
+ */
+static int set_init_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_init_arg *dec_init_arg = (struct mfc_dec_init_arg *)arg;
+
+ dec_init_arg->out_frm_width = ctx->width;
+ dec_init_arg->out_frm_height = ctx->height;
+ dec_init_arg->out_buf_width = ALIGN(ctx->width, ALIGN_W);
+ dec_init_arg->out_buf_height = ALIGN(ctx->height, ALIGN_H);
+
+ dec_init_arg->out_dpb_cnt = dec_ctx->numtotaldpb;
+
+ return 0;
+}
+
+static int h264_set_init_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv;
+ struct mfc_dec_init_arg *dec_init_arg = (struct mfc_dec_init_arg *)arg;
+
+ set_init_arg(ctx, arg);
+
+ dec_init_arg->out_crop_right_offset = h264->crop_r_ofs;
+ dec_init_arg->out_crop_left_offset = h264->crop_l_ofs;
+ dec_init_arg->out_crop_bottom_offset = h264->crop_b_ofs;
+ dec_init_arg->out_crop_top_offset = h264->crop_t_ofs;
+
+ return 0;
+}
+
+static int mpeg4_set_init_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ /*
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv;
+ struct mfc_dec_init_arg *dec_init_arg = (struct mfc_dec_init_arg *)arg;
+ */
+
+ set_init_arg(ctx, arg);
+
+ /*
+ dec_init_arg->out_aspect_ratio = mpeg4->aspect_ratio;
+ dec_init_arg->out_ext_par_width = mpeg4->ext_par_width;
+ dec_init_arg->out_ext_par_height = mpeg4->ext_par_height;
+ */
+
+ return 0;
+}
+
+/*
+ * [6] set_codec_bufs() implementations
+ */
+static int set_codec_bufs(struct mfc_inst_ctx *ctx)
+{
+ return 0;
+}
+
+static int h264_set_codec_bufs(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_VERT_NB_MV_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBIP_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_VERT_NB_IP_ADR);
+
+ return 0;
+}
+
+static int vc1_set_codec_bufs(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBDCAC_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_NB_DCAC_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_UPNBMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_NB_MV_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_SAMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_SA_MV_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_OTLINE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_OT_LINE_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_BITPLANE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_BITPLANE3_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_BITPLANE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_BITPLANE2_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_BITPLANE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_BITPLANE1_ADR);
+
+ return 0;
+}
+
+static int mpeg4_set_codec_bufs(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBDCAC_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_NB_DCAC_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_UPNBMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_NB_MV_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_SAMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_SA_MV_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_OTLINE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_OT_LINE_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_SYNPAR_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_SP_ADR);
+
+ return 0;
+}
+
+static int h263_set_codec_bufs(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_NBDCAC_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_NB_DCAC_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_UPNBMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_NB_MV_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_SAMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_SA_MV_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_DEC_OTLINE_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_OT_LINE_ADR);
+
+ return 0;
+}
+
+/*
+ * [7] set_dpbs() implementations
+ */
+static int set_dpbs(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+ int i;
+ unsigned int reg;
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+
+ /* width: 128B align, height: 32B align, size: 8KB align */
+ /* add some guard buffers to luma & chroma */
+ dec_ctx->lumasize = ALIGN(ctx->width + 24, ALIGN_W) * ALIGN(ctx->height + 16, ALIGN_H);
+ dec_ctx->lumasize = ALIGN(dec_ctx->lumasize, ALIGN_8KB);
+ dec_ctx->chromasize = ALIGN(ctx->width + 16, ALIGN_W) * ALIGN((ctx->height >> 1) + 4, ALIGN_H);
+ dec_ctx->chromasize = ALIGN(dec_ctx->chromasize, ALIGN_8KB);
+
+ for (i = 0; i < dec_ctx->numtotaldpb; i++) {
+ /*
+ * allocate chroma buffer
+ */
+#ifdef CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN
+ alloc = _mfc_alloc_buf(ctx, dec_ctx->chromasize, \
+ ALIGN_4KB, MBT_DPB | PORT_A);
+#else
+ alloc = _mfc_alloc_buf(ctx, dec_ctx->chromasize, ALIGN_2KB, MBT_DPB | PORT_A);
+#endif
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_DPB);
+ mfc_err("failed alloc chroma buffer\n");
+
+ return -1;
+ }
+
+ /* clear first DPB chroma buffer, referrence buffer for
+ vectors starting with p-frame */
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((i == 0) && (!ctx->drm_flag)) {
+#else
+ if (i == 0) {
+#endif
+ memset((void *)alloc->addr, 0x80, alloc->size);
+ mfc_mem_cache_clean((void *)alloc->addr, alloc->size);
+ }
+
+ /*
+ * set chroma buffer address
+ */
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_CHROMA_ADR + (4 * i));
+
+ /*
+ * allocate luma buffer
+ */
+#ifdef CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN
+ alloc = _mfc_alloc_buf(ctx, dec_ctx->lumasize, \
+ ALIGN_4KB, MBT_DPB | PORT_B);
+#else
+ alloc = _mfc_alloc_buf(ctx, dec_ctx->lumasize, ALIGN_2KB, MBT_DPB | PORT_B);
+#endif
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_DPB);
+ mfc_err("failed alloc luma buffer\n");
+
+ return -1;
+ }
+
+ /* clear first DPB luma buffer, referrence buffer for
+ vectors starting with p-frame */
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((i == 0) && (!ctx->drm_flag)) {
+#else
+ if (i == 0) {
+#endif
+ memset((void *)alloc->addr, 0x0, alloc->size);
+ mfc_mem_cache_clean((void *)alloc->addr, alloc->size);
+ }
+
+ /*
+ * set luma buffer address
+ */
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_LUMA_ADR + (4 * i));
+ }
+
+ write_shm(ctx, dec_ctx->lumasize, ALLOCATED_LUMA_DPB_SIZE);
+ write_shm(ctx, dec_ctx->chromasize, ALLOCATED_CHROMA_DPB_SIZE);
+ write_shm(ctx, 0, ALLOCATED_MV_SIZE);
+
+ /* set DPB number */
+ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL);
+ reg &= ~(0x3FFF);
+ reg |= dec_ctx->numtotaldpb;
+ write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL);
+
+ return 0;
+}
+
+static int h264_set_dpbs(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+ int i;
+ unsigned int reg;
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv;
+
+ /* width: 128B align, height: 32B align, size: 8KB align */
+ dec_ctx->lumasize = ALIGN(ctx->width, ALIGN_W) * ALIGN(ctx->height, ALIGN_H);
+ dec_ctx->lumasize = ALIGN(dec_ctx->lumasize, ALIGN_8KB);
+ dec_ctx->chromasize = ALIGN(ctx->width, ALIGN_W) * ALIGN(ctx->height >> 1, ALIGN_H);
+ dec_ctx->chromasize = ALIGN(dec_ctx->chromasize, ALIGN_8KB);
+
+ h264->mvsize = ALIGN(ctx->width, ALIGN_W) * ALIGN(ctx->height >> 2, ALIGN_H);
+ h264->mvsize = ALIGN(h264->mvsize, ALIGN_8KB);
+
+ for (i = 0; i < dec_ctx->numtotaldpb; i++) {
+ /*
+ * allocate chroma buffer
+ */
+#ifdef CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN
+ alloc = _mfc_alloc_buf(ctx, dec_ctx->chromasize, \
+ ALIGN_4KB, MBT_DPB | PORT_A);
+#else
+ alloc = _mfc_alloc_buf(ctx, dec_ctx->chromasize, ALIGN_2KB, MBT_DPB | PORT_A);
+#endif
+
+
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_DPB);
+ mfc_err("failed alloc chroma buffer\n");
+
+ return -1;
+ }
+
+ /* clear last DPB chroma buffer, referrence buffer for
+ vectors starting with p-frame */
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((i == (dec_ctx->numtotaldpb - 1)) && (!ctx->drm_flag)) {
+#else
+ if (i == (dec_ctx->numtotaldpb - 1)) {
+#endif
+ memset((void *)alloc->addr, 0x80, alloc->size);
+ mfc_mem_cache_clean((void *)alloc->addr, alloc->size);
+ }
+
+ /*
+ * set chroma buffer address
+ */
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_CHROMA_ADR + (4 * i));
+
+ /*
+ * allocate luma buffer
+ */
+#ifdef CONFIG_VIDEO_MFC5X_DEC_CHROMA_LUMA_4K_ALIGN
+ alloc = _mfc_alloc_buf(ctx, dec_ctx->lumasize, \
+ ALIGN_4KB, MBT_DPB | PORT_B);
+#else
+ alloc = _mfc_alloc_buf(ctx, dec_ctx->lumasize, ALIGN_2KB, MBT_DPB | PORT_B);
+#endif
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_DPB);
+ mfc_err("failed alloc luma buffer\n");
+
+ return -1;
+ }
+
+ /* clear last DPB luma buffer, referrence buffer for
+ vectors starting with p-frame */
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((i == (dec_ctx->numtotaldpb - 1)) && (!ctx->drm_flag)) {
+#else
+ if (i == (dec_ctx->numtotaldpb - 1)) {
+#endif
+ memset((void *)alloc->addr, 0x0, alloc->size);
+ mfc_mem_cache_clean((void *)alloc->addr, alloc->size);
+ }
+
+ /*
+ * set luma buffer address
+ */
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_LUMA_ADR + (4 * i));
+
+ /*
+ * allocate mv buffer
+ */
+ alloc = _mfc_alloc_buf(ctx, h264->mvsize, ALIGN_2KB, MBT_DPB | PORT_B);
+ if (alloc == NULL) {
+ mfc_free_buf_type(ctx->id, MBT_DPB);
+ mfc_err("failed alloc mv buffer\n");
+
+ return -1;
+ }
+ /*
+ * set mv buffer address
+ */
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_MV_ADR + (4 * i));
+ }
+
+ write_shm(ctx, dec_ctx->lumasize, ALLOCATED_LUMA_DPB_SIZE);
+ write_shm(ctx, dec_ctx->chromasize, ALLOCATED_CHROMA_DPB_SIZE);
+
+ write_shm(ctx, h264->mvsize, ALLOCATED_MV_SIZE);
+
+ /* set DPB number */
+ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL);
+ reg &= ~(0x3FFF);
+ reg |= dec_ctx->numtotaldpb;
+ write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL);
+
+ return 0;
+}
+
+/*
+ * [8] pre_frame_start() implementations
+ */
+static int pre_frame_start(struct mfc_inst_ctx *ctx)
+{
+ return 0;
+}
+
+/*
+ * [9] post_frame_start() implementations
+ */
+static int post_frame_start(struct mfc_inst_ctx *ctx)
+{
+ return 0;
+}
+
+static int h264_post_frame_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv;
+ unsigned int shm;
+
+ /* h264->sei_parse */
+ h264->fp.available = read_shm(ctx, FRAME_PACK_SEI_AVAIL) & 0x1;
+
+ if (h264->fp.available) {
+ h264->fp.arrangement_id = read_shm(ctx, FRAME_PACK_ARRGMENT_ID);
+
+ shm = read_shm(ctx, FRAME_PACK_DEC_INFO);
+ h264->fp.arrangement_cancel_flag = (shm >> 0) & 0x1;
+ h264->fp.arrangement_type = (shm >> 1) & 0x7F;
+ h264->fp.quincunx_sampling_flag = (shm >> 8) & 0x1;
+ h264->fp.content_interpretation_type = (shm >> 9) & 0x3F;
+ h264->fp.spatial_flipping_flag = (shm >> 15) & 0x1;
+ h264->fp.frame0_flipped_flag = (shm >> 16) & 0x1;
+ h264->fp.field_views_flag = (shm >> 17) & 0x1;
+ h264->fp.current_frame_is_frame0_flag = (shm >> 18) & 0x1;
+
+ shm = read_shm(ctx, FRAME_PACK_GRID_POS);
+ h264->fp.frame0_grid_pos_x = (shm >> 0) & 0xF;
+ h264->fp.frame0_grid_pos_y = (shm >> 4) & 0xF;
+ h264->fp.frame1_grid_pos_x = (shm >> 8) & 0xF;
+ h264->fp.frame1_grid_pos_y = (shm >> 12) & 0xF;
+ } else {
+ memset((void *)&h264->fp, 0x00, sizeof(struct mfc_frame_packing));
+ }
+
+ return 0;
+}
+
+/*
+ * [10] multi_frame_start() implementations
+ */
+static int multi_data_frame(struct mfc_inst_ctx *ctx)
+{
+ return 0;
+}
+
+static int mpeg4_multi_data_frame(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv;
+
+ if (!mpeg4->packedpb)
+ return 0;
+
+ /* FIXME: I_FRAME is valid? */
+ if ((dec_ctx->decframetype == DEC_FRM_I) || (dec_ctx->decframetype == DEC_FRM_P)) {
+
+ }
+
+ return 0;
+}
+
+/*
+ * [11] set_exe_arg() implementations
+ */
+static int set_exe_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ /*
+ struct mfc_dec_exe_arg *dec_exe_arg = (struct mfc_dec_exe_arg *)arg;
+ */
+
+ return 0;
+}
+
+/*
+ * [12] get_codec_cfg() implementations
+ */
+static int get_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ /*struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;*/
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+
+ int ret = 0;
+
+ mfc_dbg("type: 0x%08x", type);
+
+ /*
+ MFC_DEC_GETCONF_CRC_DATA = DEC_GET,
+ MFC_DEC_GETCONF_BUF_WIDTH_HEIGHT
+ MFC_DEC_GETCONF_FRAME_TAG,
+ MFC_DEC_GETCONF_PIC_TIME,
+
+ MFC_DEC_GETCONF_ASPECT_RATIO:
+ MFC_DEC_GETCONF_EXTEND_PAR:
+ */
+
+ switch (type) {
+ case MFC_DEC_GETCONF_CRC_DATA:
+ usercfg->basic.values[0] = 0x12;
+ usercfg->basic.values[1] = 0x34;
+ usercfg->basic.values[2] = 0x56;
+ usercfg->basic.values[3] = 0x78;
+
+ break;
+
+ default:
+ mfc_dbg("not common cfg, try to codec specific: 0x%08x\n", type);
+ ret = 1;
+
+ break;
+ }
+
+ return ret;
+}
+
+static int h264_get_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv;
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+ int ret = 0;
+
+ mfc_dbg("type: 0x%08x", type);
+ ret = get_codec_cfg(ctx, type, arg);
+ if (ret <= 0)
+ return ret;
+
+ switch (type) {
+ case MFC_DEC_GETCONF_FRAME_PACKING:
+ if (ctx->state < INST_STATE_EXE) {
+ mfc_dbg("invalid instance state: 0x%08x\n", type);
+ return MFC_STATE_INVALID;
+ }
+
+ memcpy(&usercfg->frame_packing, &h264->fp, sizeof(struct mfc_frame_packing));
+
+ break;
+
+ default:
+ mfc_err("invalid get config type: 0x%08x\n", type);
+ ret = -2;
+
+ break;
+ }
+
+ return ret;
+}
+/*
+ * [13] set_codec_cfg() implementations
+ */
+static int set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+ int ret = 0;
+
+ mfc_dbg("type: 0x%08x", type);
+
+ /*
+ MFC_DEC_SETCONF_FRAME_TAG,
+ ...
+ */
+
+ switch (type) {
+ /*
+ case MFC_DEC_SETCONF_EXTRA_BUFFER_NUM:
+ if (ctx->state >= INST_STATE_INIT)
+ return MFC_STATE_INVALID;
+
+ if ((usercfg->basic.values[0] >= 0) && (usercfg->basic.values[0] <= MFC_MAX_EXTRA_DPB)) {
+ dec_ctx->numextradpb = usercfg->basic.values[0];
+ } else {
+ dec_ctx->numextradpb = MFC_MAX_EXTRA_DPB;
+ mfc_warn("invalid extra dpb buffer number: %d", usercfg->basic.values[0]);
+ mfc_warn("set %d by default", MFC_MAX_EXTRA_DPB);
+ }
+
+ break;
+ */
+ case MFC_DEC_SETCONF_IS_LAST_FRAME:
+ mfc_dbg("ctx->state: 0x%08x", ctx->state);
+
+ if (ctx->state < INST_STATE_EXE) {
+ mfc_dbg("invalid instance state: 0x%08x\n", type);
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0] > 0)
+ dec_ctx->lastframe = 1;
+ else
+ dec_ctx->lastframe = 0;
+
+ break;
+ /*
+ case MFC_DEC_SETCONF_SLICE_ENABLE:
+ if (ctx->state >= INST_STATE_INIT)
+ return MFC_STATE_INVALID;
+
+ if (usercfg->basic.values[0] > 0)
+ dec_ctx->slice = 1;
+ else
+ dec_ctx->slice = 0;
+
+ break;
+ */
+ /*
+ case MFC_DEC_SETCONF_CRC_ENABLE:
+ if (ctx->state >= INST_STATE_INIT)
+ return MFC_STATE_INVALID;
+
+ if (usercfg->basic.values[0] > 0)
+ dec_ctx->crc = 1;
+ else
+ dec_ctx->crc = 0;
+
+ break;
+ */
+ case MFC_DEC_SETCONF_DPB_FLUSH:
+ if (ctx->state < INST_STATE_EXE) {
+ mfc_dbg("invalid instance state: 0x%08x\n", type);
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0] > 0) {
+ dec_ctx->dpbflush = 1;
+ }
+ break;
+
+ default:
+ mfc_dbg("not common cfg, try to codec specific: 0x%08x\n", type);
+ ret = 1;
+
+ break;
+ }
+
+ return ret;
+}
+
+static int h264_set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_h264 *h264 = (struct mfc_dec_h264 *)dec_ctx->d_priv;
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+ int ret;
+
+ mfc_dbg("type: 0x%08x", type);
+
+ ret = set_codec_cfg(ctx, type, arg);
+ if (ret <= 0)
+ return ret;
+
+ ret = 0;
+
+ switch (type) {
+ case MFC_DEC_SETCONF_DISPLAY_DELAY:
+ if (ctx->state >= INST_STATE_INIT) {
+ mfc_dbg("invalid instance state: 0x%08x\n", type);
+ return MFC_STATE_INVALID;
+ }
+
+ h264->dispdelay_en = 1;
+ if ((usercfg->basic.values[0] >= 0) && (usercfg->basic.values[0] <= MFC_MAX_DISP_DELAY)) {
+ h264->dispdelay_val = usercfg->basic.values[0];
+ } else {
+ h264->dispdelay_val = MFC_MAX_DISP_DELAY;
+ mfc_warn("invalid diplay delay count: %d", usercfg->basic.values[0]);
+ mfc_warn("set %d by default", MFC_MAX_DISP_DELAY);
+ }
+
+ break;
+
+ case MFC_DEC_SETCONF_SEI_PARSE:
+ mfc_dbg("ctx->state: 0x%08x", ctx->state);
+
+ if (ctx->state >= INST_STATE_INIT) {
+ mfc_dbg("invalid instance state: 0x%08x\n", type);
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0] > 0)
+ h264->sei_parse = 1;
+ else
+ h264->sei_parse = 0;
+
+ break;
+
+ default:
+ mfc_err("invalid set cfg type: 0x%08x\n", type);
+ ret = -2;
+
+ break;
+ }
+
+ return ret;
+}
+
+static int mpeg4_set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_mpeg4 *mpeg4 = (struct mfc_dec_mpeg4 *)dec_ctx->d_priv;
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+ int ret;
+
+ mfc_dbg("type: 0x%08x", type);
+
+ ret = set_codec_cfg(ctx, type, arg);
+ if (ret <= 0)
+ return ret;
+
+ ret = 0;
+
+ switch (type) {
+ case MFC_DEC_SETCONF_POST_ENABLE:
+ if (ctx->state >= INST_STATE_INIT)
+ return MFC_STATE_INVALID;
+
+ if (usercfg->basic.values[0] > 0)
+ mpeg4->postfilter = 1;
+ else
+ mpeg4->postfilter = 0;
+
+ break;
+/* JYSHIN
+ case MFC_DEC_SETCONF_PACKEDPB:
+ if (ctx->state < INST_STATE_OPEN)
+ return -1;
+
+ if (usercfg->basic.values[0] > 0)
+ mpeg4->packedpb = 1;
+ else
+ mpeg4->packedpb = 1;
+
+ break;
+*/
+ default:
+ mfc_err("invalid set cfg type: 0x%08x\n", type);
+ ret = -2;
+
+ break;
+ }
+
+ return ret;
+}
+
+static int fimv1_set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ struct mfc_dec_fimv1 *fimv1 = (struct mfc_dec_fimv1 *)dec_ctx->d_priv;
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+ int ret;
+
+ mfc_dbg("type: 0x%08x", type);
+
+ ret = set_codec_cfg(ctx, type, arg);
+ if (ret <= 0)
+ return ret;
+
+ ret = 0;
+
+ switch (type) {
+ case MFC_DEC_SETCONF_FIMV1_WIDTH_HEIGHT:
+ if (ctx->state >= INST_STATE_INIT)
+ return MFC_STATE_INVALID;
+
+ fimv1->width = usercfg->basic.values[0];
+ fimv1->height = usercfg->basic.values[1];
+
+ break;
+/* JYSHIN
+ case MFC_DEC_SETCONF_PACKEDPB:
+ if (ctx->state < INST_STATE_OPEN)
+ return -1;
+
+ if (usercfg->basic.[0] > 0)
+ fimv1->packedpb = 1;
+ else
+ fimv1->packedpb = 1;
+
+ break;
+*/
+ default:
+ mfc_err("invalid set cfg type: 0x%08x\n", type);
+ ret = -2;
+
+ break;
+ }
+
+ return ret;
+}
+
+static struct mfc_dec_info unknown_dec = {
+ .name = "UNKNOWN",
+ .codectype = UNKNOWN_TYPE,
+ .codecid = -1,
+ .d_priv_size = 0,
+ /*
+ * The unknown codec operations will be not call,
+ * unused default operations raise build warning.
+ */
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = pre_frame_start,
+ .post_frame_start = post_frame_start,
+ .multi_data_frame = multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info h264_dec = {
+ .name = "H264",
+ .codectype = H264_DEC,
+ .codecid = 0,
+ .d_priv_size = sizeof(struct mfc_dec_h264),
+ .c_ops = {
+ .alloc_ctx_buf = h264_alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = h264_pre_seq_start,
+ .post_seq_start = h264_post_seq_start,
+ .set_init_arg = h264_set_init_arg,
+ .set_codec_bufs = h264_set_codec_bufs,
+ .set_dpbs = h264_set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = h264_post_frame_start,
+ .multi_data_frame = NULL,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = h264_get_codec_cfg,
+ .set_codec_cfg = h264_set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info vc1_dec = {
+ .name = "VC1",
+ .codectype = VC1_DEC,
+ .codecid = 1,
+ .d_priv_size = 0,
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = vc1_post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = vc1_set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = NULL,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info mpeg4_dec = {
+ .name = "MPEG4",
+ .codectype = MPEG4_DEC,
+ .codecid = 2,
+ .d_priv_size = sizeof(struct mfc_dec_mpeg4),
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = mpeg4_pre_seq_start,
+ .post_seq_start = mpeg4_post_seq_start,
+ .set_init_arg = mpeg4_set_init_arg,
+ .set_codec_bufs = mpeg4_set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = NULL, /* FIXME: mpeg4_multi_data_frame */
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = mpeg4_set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info xvid_dec = {
+ .name = "XVID",
+ .codectype = XVID_DEC,
+ .codecid = 2,
+ .d_priv_size = sizeof(struct mfc_dec_mpeg4),
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = mpeg4_pre_seq_start,
+ .post_seq_start = mpeg4_post_seq_start,
+ .set_init_arg = mpeg4_set_init_arg,
+ .set_codec_bufs = mpeg4_set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = NULL, /* FIXME: mpeg4_multi_data_frame */
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = mpeg4_set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info mpeg1_dec = {
+ .name = "MPEG1",
+ .codectype = MPEG1_DEC,
+ .codecid = 3,
+ .d_priv_size = 0,
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = NULL,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = NULL,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info mpeg2_dec = {
+ .name = "MPEG2",
+ .codectype = MPEG2_DEC,
+ .codecid = 3,
+ .d_priv_size = 0,
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = NULL,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = NULL,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info h263_dec = {
+ .name = "H263",
+ .codectype = H263_DEC,
+ .codecid = 4,
+ .d_priv_size = 0,
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = h263_set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = NULL,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info vc1rcv_dec = {
+ .name = "VC1RCV",
+ .codectype = VC1RCV_DEC,
+ .codecid = 5,
+ .d_priv_size = 0,
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = vc1_post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = vc1_set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = NULL,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info fimv1_dec = {
+ .name = "FIMV1",
+ .codectype = FIMV1_DEC,
+ .codecid = 6,
+ .d_priv_size = sizeof(struct mfc_dec_fimv1),
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = fimv1_pre_seq_start,
+ .post_seq_start = fimv1_post_seq_start,
+ .set_init_arg = set_init_arg, /* FIMXE */
+ .set_codec_bufs = mpeg4_set_codec_bufs, /* FIXME */
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = mpeg4_multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = fimv1_set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info fimv2_dec = {
+ .name = "FIMV2",
+ .codectype = FIMV2_DEC,
+ .codecid = 7,
+ .d_priv_size = sizeof(struct mfc_dec_mpeg4),
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = mpeg4_pre_seq_start,
+ .post_seq_start = mpeg4_post_seq_start,
+ .set_init_arg = mpeg4_set_init_arg,
+ .set_codec_bufs = mpeg4_set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = mpeg4_multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = mpeg4_set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info fimv3_dec = {
+ .name = "FIMV3",
+ .codectype = FIMV3_DEC,
+ .codecid = 8,
+ .d_priv_size = sizeof(struct mfc_dec_mpeg4),
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = mpeg4_pre_seq_start,
+ .post_seq_start = mpeg4_post_seq_start,
+ .set_init_arg = mpeg4_set_init_arg,
+ .set_codec_bufs = mpeg4_set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = mpeg4_multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = mpeg4_set_codec_cfg,
+ },
+};
+
+static struct mfc_dec_info fimv4_dec = {
+ .name = "FIMV4",
+ .codectype = FIMV4_DEC,
+ .codecid = 9,
+ .d_priv_size = sizeof(struct mfc_dec_mpeg4),
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = alloc_desc_buf,
+ .pre_seq_start = mpeg4_pre_seq_start,
+ .post_seq_start = mpeg4_post_seq_start,
+ .set_init_arg = mpeg4_set_init_arg,
+ .set_codec_bufs = mpeg4_set_codec_bufs,
+ .set_dpbs = set_dpbs,
+ .pre_frame_start = NULL,
+ .post_frame_start = NULL,
+ .multi_data_frame = mpeg4_multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = mpeg4_set_codec_cfg,
+ },
+};
+
+static int CheckMPEG4StartCode(unsigned char *src_mem, unsigned int remainSize)
+{
+ unsigned int index = 0;
+
+ for (index = 0; index < remainSize-3; index++) {
+ if ((src_mem[index] == 0x00) && (src_mem[index+1] == 0x00) &&
+ (src_mem[index+2] == 0x01))
+ return index;
+ }
+
+ return -1;
+}
+
+static int CheckDecStartCode(unsigned char *src_mem,
+ unsigned int nstreamSize,
+ SSBSIP_MFC_CODEC_TYPE nCodecType)
+{
+ unsigned int index = 0;
+ /* Check Start Code within "isearchSize" bytes */
+ unsigned int isearchSize = 20;
+ unsigned int nShift = 0;
+ unsigned char nFlag = 0xFF;
+
+ if (nCodecType == H263_DEC) {
+ nFlag = 0x08;
+ nShift = 4;
+ } else if (nCodecType == MPEG4_DEC) {
+ nFlag = 0x01;
+ nShift = 0;
+ } else if (nCodecType == H264_DEC) {
+ nFlag = 0x01;
+ nShift = 0;
+ } else
+ nFlag = 0xFF;
+
+ /* Last frame detection from user */
+ if (nstreamSize == 0)
+ nFlag = 0xFF;
+
+ if (nFlag == 0xFF)
+ return 0;
+
+ if (nstreamSize > 3) {
+ if (nstreamSize > isearchSize) {
+ for (index = 0; index < isearchSize-3; index++) {
+ if ((src_mem[index] == 0x00) &&
+ (src_mem[index+1] == 0x00) &&
+ ((src_mem[index+2] >> nShift) == nFlag))
+ return index;
+ }
+ } else {
+ for (index = 0; index < nstreamSize - 3; index++) {
+ if ((src_mem[index] == 0x00) &&
+ (src_mem[index+1] == 0x00) &&
+ ((src_mem[index+2] >> nShift) == nFlag))
+ return index;
+ }
+ }
+ }
+
+ return -1;
+}
+
+void mfc_init_decoders(void)
+{
+ list_add_tail(&unknown_dec.list, &mfc_decoders);
+
+ list_add_tail(&h264_dec.list, &mfc_decoders);
+ list_add_tail(&vc1_dec.list, &mfc_decoders);
+ list_add_tail(&mpeg4_dec.list, &mfc_decoders);
+ list_add_tail(&xvid_dec.list, &mfc_decoders);
+ list_add_tail(&mpeg1_dec.list, &mfc_decoders);
+ list_add_tail(&mpeg2_dec.list, &mfc_decoders);
+ list_add_tail(&h263_dec.list, &mfc_decoders);
+ list_add_tail(&vc1rcv_dec.list, &mfc_decoders);
+ list_add_tail(&fimv1_dec.list, &mfc_decoders);
+ list_add_tail(&fimv2_dec.list, &mfc_decoders);
+ list_add_tail(&fimv3_dec.list, &mfc_decoders);
+ list_add_tail(&fimv4_dec.list, &mfc_decoders);
+
+ /* FIXME: 19, 20 */
+}
+
+static int mfc_set_decoder(struct mfc_inst_ctx *ctx, SSBSIP_MFC_CODEC_TYPE codectype)
+{
+ struct list_head *pos;
+ struct mfc_dec_info *decoder;
+ struct mfc_dec_ctx *dec_ctx;
+
+ ctx->codecid = -1;
+
+ /* find and set codec private */
+ list_for_each(pos, &mfc_decoders) {
+ decoder = list_entry(pos, struct mfc_dec_info, list);
+
+ if (decoder->codectype == codectype) {
+ if (decoder->codecid < 0)
+ break;
+
+ /* Allocate Decoder context memory */
+ dec_ctx = kzalloc(sizeof(struct mfc_dec_ctx), GFP_KERNEL);
+ if (!dec_ctx) {
+ mfc_err("failed to allocate codec private\n");
+ return -ENOMEM;
+ }
+ ctx->c_priv = dec_ctx;
+
+ /* Allocate Decoder context private memory */
+ dec_ctx->d_priv = kzalloc(decoder->d_priv_size, GFP_KERNEL);
+ if (!dec_ctx->d_priv) {
+ mfc_err("failed to allocate decoder private\n");
+ kfree(dec_ctx);
+ ctx->c_priv = NULL;
+ return -ENOMEM;
+ }
+
+ ctx->codecid = decoder->codecid;
+ ctx->type = DECODER;
+ ctx->c_ops = (struct codec_operations *)&decoder->c_ops;
+
+ break;
+ }
+ }
+
+ if (ctx->codecid < 0)
+ mfc_err("couldn't find proper decoder codec type: %d\n", codectype);
+
+ return ctx->codecid;
+}
+
+static void mfc_set_stream_info(
+ struct mfc_inst_ctx *ctx,
+ unsigned int addr,
+ unsigned int size,
+ unsigned int ofs)
+{
+
+ if (ctx->buf_cache_type == CACHE) {
+ flush_all_cpu_caches();
+ outer_flush_all();
+ }
+
+ write_reg(addr, MFC_SI_CH1_ES_ADR);
+ write_reg(size, MFC_SI_CH1_ES_SIZE);
+
+ /* FIXME: IOCTL_MFC_GET_IN_BUF size */
+ write_reg(MFC_CPB_SIZE, MFC_SI_CH1_CPB_SIZE);
+
+ write_reg(ctx->descbufofs, MFC_SI_CH1_DESC_ADR);
+ write_reg(ctx->descbufsize, MFC_SI_CH1_DESC_SIZE);
+
+ /* FIXME: right position */
+ write_shm(ctx, ofs, START_BYTE_NUM);
+}
+
+int mfc_init_decoding(struct mfc_inst_ctx *ctx, union mfc_args *args)
+{
+ struct mfc_dec_init_arg *init_arg = (struct mfc_dec_init_arg *)args;
+ struct mfc_dec_ctx *dec_ctx = NULL;
+ struct mfc_pre_cfg *precfg;
+ struct list_head *pos, *nxt;
+ int ret;
+ long mem_ofs;
+
+ ret = mfc_set_decoder(ctx, init_arg->in_codec_type);
+ if (ret < 0) {
+ mfc_err("failed to setup decoder codec\n");
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_codec_setup;
+ }
+
+ dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+
+ dec_ctx->streamaddr = init_arg->in_strm_buf;
+ dec_ctx->streamsize = init_arg->in_strm_size;
+
+ mfc_dbg("stream size: %d", init_arg->in_strm_size);
+
+ dec_ctx->crc = init_arg->in_crc;
+ dec_ctx->pixelcache = init_arg->in_pixelcache;
+ dec_ctx->slice = 0;
+ mfc_warn("Slice Mode disabled forcefully\n");
+ dec_ctx->numextradpb = init_arg->in_numextradpb;
+ dec_ctx->dpbflush = 0;
+ dec_ctx->ispackedpb = init_arg->in_packed_PB;
+
+ /*
+ * assign pre configuration values to instance context
+ */
+ list_for_each_safe(pos, nxt, &ctx->presetcfgs) {
+ precfg = list_entry(pos, struct mfc_pre_cfg, list);
+
+ if (ctx->c_ops->set_codec_cfg) {
+ ret = ctx->c_ops->set_codec_cfg(ctx, precfg->type, precfg->values);
+ if (ret < 0)
+ mfc_warn("cannot set preset config type: 0x%08x: %d",
+ precfg->type, ret);
+ }
+ }
+
+ mfc_set_inst_state(ctx, INST_STATE_SETUP);
+
+ /*
+ * allocate context buffer
+ */
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((!ctx->drm_flag) && (ctx->c_ops->alloc_ctx_buf)) {
+#else
+ if (ctx->c_ops->alloc_ctx_buf) {
+#endif
+ if (ctx->c_ops->alloc_ctx_buf(ctx) < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_ctx_buf;
+ }
+ }
+
+ /* [crc, pixelcache] */
+ ret = mfc_cmd_inst_open(ctx);
+ if (ret < 0)
+ goto err_inst_open;
+
+ mfc_set_inst_state(ctx, INST_STATE_OPEN);
+
+ if (init_shm(ctx) < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_shm_init;
+ }
+
+ /*
+ * allocate descriptor buffer
+ */
+ if (ctx->c_ops->alloc_desc_buf) {
+ if (ctx->c_ops->alloc_desc_buf(ctx) < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_desc_buf;
+ }
+ }
+
+ /*
+ * execute pre sequence start operation
+ * [slice]
+ */
+ if (ctx->c_ops->pre_seq_start) {
+ if (ctx->c_ops->pre_seq_start(ctx) < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_pre_seq;
+ }
+ }
+
+ /* FIXME: move to pre_seq_start */
+ mem_ofs = mfc_mem_ext_ofs(dec_ctx->streamaddr, dec_ctx->streamsize, PORT_A);
+ if (mem_ofs < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_mem_ofs;
+ } else {
+ mfc_set_stream_info(ctx, mem_ofs >> 11, dec_ctx->streamsize, 0);
+ }
+
+ ret = mfc_cmd_seq_start(ctx);
+ if (ret < 0)
+ goto err_seq_start;
+
+ /* [numextradpb] */
+ if (ctx->c_ops->post_seq_start) {
+ if (ctx->c_ops->post_seq_start(ctx) < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_post_seq;
+ }
+ }
+
+ if (ctx->height > MAX_VER_SIZE) {
+ if (ctx->height > MAX_HOR_SIZE) {
+ mfc_err("Not support resolution: %dx%d\n",
+ ctx->width, ctx->height);
+ goto err_chk_res;
+ }
+
+ if (ctx->width > MAX_VER_SIZE) {
+ mfc_err("Not support resolution: %dx%d\n",
+ ctx->width, ctx->height);
+ goto err_chk_res;
+ }
+ } else {
+ if (ctx->width > MAX_HOR_SIZE) {
+ mfc_err("Not support resolution: %dx%d\n",
+ ctx->width, ctx->height);
+ goto err_chk_res;
+ }
+ }
+
+ if (ctx->c_ops->set_init_arg) {
+ if (ctx->c_ops->set_init_arg(ctx, (void *)init_arg) < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_set_arg;
+ }
+ }
+
+ if (dec_ctx->numtotaldpb < 7)
+ dec_ctx->numtotaldpb = 7;
+
+ mfc_dbg("H: %d, W: %d, DPB_Count: %d", ctx->width, ctx->height,
+ dec_ctx->numtotaldpb);
+
+#if defined(CONFIG_BUSFREQ)
+#if defined(CONFIG_CPU_EXYNOS4210)
+ /* Fix MFC & Bus Frequency for better performance */
+ if (atomic_read(&ctx->dev->busfreq_lock_cnt) == 0) {
+ exynos4_busfreq_lock(DVFS_LOCK_ID_MFC, BUS_L1);
+ mfc_dbg("Bus FREQ locked to L1\n");
+ }
+ atomic_inc(&ctx->dev->busfreq_lock_cnt);
+ ctx->busfreq_flag = true;
+#else
+ /* Lock MFC & Bus FREQ for high resolution */
+ if (ctx->width >= MAX_HOR_RES || ctx->height >= MAX_VER_RES) {
+ if (atomic_read(&ctx->dev->busfreq_lock_cnt) == 0) {
+ exynos4_busfreq_lock(DVFS_LOCK_ID_MFC, BUS_L0);
+ mfc_dbg("Bus FREQ locked to L0\n");
+ }
+
+ atomic_inc(&ctx->dev->busfreq_lock_cnt);
+ ctx->busfreq_flag = true;
+ }
+#endif
+#endif
+
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_EXYNOS4_CPUFREQ)
+ if ((ctx->width >= 1280 && ctx->height >= 720)
+ || (ctx->width >= 720 && ctx->height >= 1280)) {
+ if (atomic_read(&ctx->dev->cpufreq_lock_cnt) == 0) {
+ if (0 == ctx->dev->cpufreq_level) /* 500MHz */
+ exynos_cpufreq_get_level(500000, &ctx->dev->cpufreq_level);
+ exynos_cpufreq_lock(DVFS_LOCK_ID_MFC, ctx->dev->cpufreq_level);
+ mfc_dbg("[%s] CPU Freq Locked 500MHz!\n", __func__);
+ }
+ atomic_inc(&ctx->dev->cpufreq_lock_cnt);
+ ctx->cpufreq_flag = true;
+ }
+#endif
+
+#ifdef CONFIG_BUSFREQ_OPP
+ if (HD_MOVIE_SIZE_MULTIPLY_WIDTH_HEIGHT > (ctx->width * ctx->height)) {
+ if (atomic_read(&ctx->dev->dmcthreshold_lock_cnt) == 0) {
+ mfc_info("Implement set dmc_max_threshold\n");
+ if (soc_is_exynos4212()) {
+ dmc_max_threshold =
+ EXYNOS4212_DMC_MAX_THRESHOLD + 5;
+ } else if (soc_is_exynos4412()) {
+ dmc_max_threshold =
+ EXYNOS4412_DMC_MAX_THRESHOLD + 5;
+ } else {
+ pr_err("Unsupported model.\n");
+ return -EINVAL;
+ }
+ }
+ atomic_inc(&ctx->dev->dmcthreshold_lock_cnt);
+ ctx->dmcthreshold_flag = true;
+ }
+#endif
+ /*
+ * allocate & set codec buffers
+ */
+ if (ctx->c_ops->set_codec_bufs) {
+ if (ctx->c_ops->set_codec_bufs(ctx) < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_codec_bufs;
+ }
+ }
+
+ /*
+ * allocate & set DPBs
+ */
+ if (ctx->c_ops->set_dpbs) {
+ if (ctx->c_ops->set_dpbs(ctx) < 0) {
+ ret = MFC_DEC_INIT_FAIL;
+ goto err_dpbs_set;
+ }
+ }
+
+ ret = mfc_cmd_init_buffers(ctx);
+ if (ret < 0)
+ goto err_buf_init;
+
+ mfc_set_inst_state(ctx, INST_STATE_INIT);
+
+ while (!list_empty(&ctx->presetcfgs)) {
+ precfg = list_entry((&ctx->presetcfgs)->next,
+ struct mfc_pre_cfg, list);
+
+ mfc_dbg("remove used preset config [0x%08x]\n",
+ precfg->type);
+
+ list_del(&precfg->list);
+ kfree(precfg);
+ }
+ INIT_LIST_HEAD(&ctx->presetcfgs);
+
+ mfc_print_buf();
+
+ return MFC_OK;
+
+err_buf_init:
+ mfc_free_buf_type(ctx->id, MBT_DPB);
+
+err_dpbs_set:
+ mfc_free_buf_type(ctx->id, MBT_CODEC);
+
+err_codec_bufs:
+#if defined(CONFIG_BUSFREQ)
+ /* Release MFC & Bus Frequency lock for High resolution */
+ if (ctx->busfreq_flag == true) {
+ atomic_dec(&ctx->dev->busfreq_lock_cnt);
+ ctx->busfreq_flag = false;
+
+ if (atomic_read(&ctx->dev->busfreq_lock_cnt) == 0) {
+ exynos4_busfreq_lock_free(DVFS_LOCK_ID_MFC);
+ mfc_dbg("Bus FREQ released\n");
+ }
+ }
+#endif
+
+err_set_arg:
+err_chk_res:
+err_post_seq:
+err_seq_start:
+#ifdef DUMP_STREAM
+ mfc_fw_debug();
+ dump_stream(dec_ctx->streamaddr, dec_ctx->streamsize);
+#endif
+
+err_mem_ofs:
+err_pre_seq:
+ mfc_free_buf_type(ctx->id, MBT_DESC);
+
+err_desc_buf:
+ mfc_free_buf_type(ctx->id, MBT_SHM);
+
+ ctx->shm = NULL;
+ ctx->shmofs = 0;
+
+err_shm_init:
+ mfc_cmd_inst_close(ctx);
+
+ ctx->state = INST_STATE_SETUP;
+
+err_inst_open:
+ mfc_free_buf_type(ctx->id, MBT_CTX);
+
+err_ctx_buf:
+ if (dec_ctx->d_priv)
+ kfree(dec_ctx->d_priv);
+
+ kfree(dec_ctx);
+ ctx->c_priv = NULL;
+
+ ctx->codecid = -1;
+ ctx->type = 0;
+ ctx->c_ops = NULL;
+
+ ctx->state = INST_STATE_CREATE;
+
+err_codec_setup:
+ return ret;
+}
+
+int mfc_change_resolution(struct mfc_inst_ctx *ctx, struct mfc_dec_exe_arg *exe_arg)
+{
+ int ret;
+
+ mfc_free_buf_type(ctx->id, MBT_DPB);
+
+ ret = mfc_cmd_seq_start(ctx);
+ if (ret < 0)
+ return ret;
+
+ /* [numextradpb] */
+ if (ctx->c_ops->post_seq_start) {
+ if (ctx->c_ops->post_seq_start(ctx) < 0)
+ return MFC_DEC_INIT_FAIL;
+ }
+
+ if (ctx->height > MAX_VER_SIZE) {
+ if (ctx->height > MAX_HOR_SIZE) {
+ mfc_err("Not support resolution: %dx%d\n",
+ ctx->width, ctx->height);
+ return MFC_DEC_INIT_FAIL;
+ }
+
+ if (ctx->width > MAX_VER_SIZE) {
+ mfc_err("Not support resolution: %dx%d\n",
+ ctx->width, ctx->height);
+ return MFC_DEC_INIT_FAIL;
+ }
+ } else {
+ if (ctx->width > MAX_HOR_SIZE) {
+ mfc_err("Not support resolution: %dx%d\n",
+ ctx->width, ctx->height);
+ return MFC_DEC_INIT_FAIL;
+ }
+ }
+
+ exe_arg->out_img_width = ctx->width;
+ exe_arg->out_img_height = ctx->height;
+ exe_arg->out_buf_width = ALIGN(ctx->width, ALIGN_W);
+ exe_arg->out_buf_height = ALIGN(ctx->height, ALIGN_H);
+
+ /*
+ * allocate & set DPBs
+ */
+ if (ctx->c_ops->set_dpbs) {
+ if (ctx->c_ops->set_dpbs(ctx) < 0)
+ return MFC_DEC_INIT_FAIL;
+ }
+
+ ret = mfc_cmd_init_buffers(ctx);
+ if (ret < 0)
+ return ret;
+
+ return MFC_OK;
+}
+
+int mfc_check_resolution_change(struct mfc_inst_ctx *ctx, struct mfc_dec_exe_arg *exe_arg)
+{
+ int resol_status;
+
+ if (exe_arg->out_display_status != DISP_S_DECODING)
+ return 0;
+
+ resol_status = (read_reg(MFC_SI_DISPLAY_STATUS) >> DISP_RC_SHIFT) & DISP_RC_MASK;
+
+ if (resol_status == DISP_RC_INC || resol_status == DISP_RC_DEC) {
+ ctx->resolution_status = RES_SET_CHANGE;
+ mfc_dbg("Change Resolution status: %d\n", resol_status);
+ }
+
+ return 0;
+}
+
+static int mfc_decoding_frame(struct mfc_inst_ctx *ctx, struct mfc_dec_exe_arg *exe_arg, int *consumed)
+{
+ int start_ofs = *consumed;
+ int display_luma_addr;
+ int display_chroma_addr;
+ int display_frame_type;
+ int display_frame_tag;
+ unsigned char *stream_vir;
+ int ret;
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ unsigned long mem_ofs;
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ void *ump_handle;
+#endif
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (!ctx->drm_flag) {
+#endif
+ /* Check Frame Start code */
+ stream_vir = phys_to_virt(exe_arg->in_strm_buf + start_ofs);
+ ret = CheckDecStartCode(stream_vir, exe_arg->in_strm_size,
+ exe_arg->in_codec_type);
+ if (ret < 0) {
+ mfc_err("Frame Check start Code Failed\n");
+ /* FIXME: Need to define proper error */
+ return MFC_FRM_BUF_SIZE_FAIL;
+ }
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ }
+#endif
+
+ /* Set Frame Tag */
+ write_shm(ctx, dec_ctx->frametag, SET_FRAME_TAG);
+
+ /* FIXME: */
+ write_reg(0xFFFFFFFF, MFC_SI_CH1_RELEASE_BUF);
+ if (dec_ctx->dpbflush) {
+ unsigned int reg;
+ reg = read_reg(MFC_SI_CH1_DPB_CONF_CTRL);
+ reg &= ~(1<<14);
+ reg |= (1<<14);
+ write_reg(reg, MFC_SI_CH1_DPB_CONF_CTRL); /* for DPB Flush*/
+ /*clear dbp flush in context*/
+ dec_ctx->dpbflush = 0;
+ }
+
+ mem_ofs = mfc_mem_ext_ofs(exe_arg->in_strm_buf, exe_arg->in_strm_size,
+ PORT_A);
+ if (mem_ofs < 0)
+ return MFC_DEC_EXE_ERR;
+ else
+ mfc_set_stream_info(ctx, mem_ofs >> 11, exe_arg->in_strm_size,
+ start_ofs);
+
+ /* lastframe: mfc_dec_cfg */
+ ret = mfc_cmd_frame_start(ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ctx->c_ops->post_frame_start) {
+ if (ctx->c_ops->post_frame_start(ctx) < 0)
+ return MFC_DEC_EXE_ERR;
+ }
+
+ /* update display status information */
+ dec_ctx->dispstatus = read_reg(MFC_SI_DISPLAY_STATUS) & DISP_S_MASK;
+
+ /* get decode status, frame type */
+ dec_ctx->decstatus = read_reg(MFC_SI_DECODE_STATUS) & DEC_S_MASK;
+ dec_ctx->decframetype = read_reg(MFC_SI_FRAME_TYPE) & DEC_FRM_MASK;
+
+ if (dec_ctx->dispstatus == DISP_S_DECODING) {
+ display_luma_addr = 0;
+ display_chroma_addr = 0;
+
+ display_frame_type = DISP_FRM_X;
+ display_frame_tag = read_shm(ctx, GET_FRAME_TAG_TOP);
+ } else {
+ display_luma_addr = read_reg(MFC_SI_DISPLAY_Y_ADR);
+ display_chroma_addr = read_reg(MFC_SI_DISPLAY_C_ADR);
+
+ display_frame_type = get_disp_frame_type();
+ display_frame_tag = read_shm(ctx, GET_FRAME_TAG_TOP);
+
+ if (dec_ctx->ispackedpb) {
+ if ((dec_ctx->decframetype == DEC_FRM_P) || (dec_ctx->decframetype == DEC_FRM_I)) {
+ if (display_frame_type == DISP_FRM_N)
+ display_frame_type = dec_ctx->predispframetype;
+ } else {
+ if (dec_ctx->predisplumaaddr != 0) {
+ display_luma_addr = dec_ctx->predisplumaaddr;
+ display_chroma_addr = dec_ctx->predispchromaaddr;
+ display_frame_type = dec_ctx->predispframetype;
+ /* over write frame tag */
+ display_frame_tag = dec_ctx->predispframetag;
+ }
+ }
+
+ /* save the display addr */
+ dec_ctx->predisplumaaddr = read_reg(MFC_SI_DISPLAY_Y_ADR);
+ dec_ctx->predispchromaaddr = read_reg(MFC_SI_DISPLAY_C_ADR);
+
+ /* save the display frame type */
+ if (get_disp_frame_type() != DISP_FRM_N) {
+ dec_ctx->predispframetype = get_disp_frame_type();
+ /* Set Frame Tag */
+ dec_ctx->predispframetag =
+ read_shm(ctx, GET_FRAME_TAG_TOP);
+ }
+
+ mfc_dbg("pre_luma_addr: 0x%08x, pre_chroma_addr:"
+ "0x%08x, pre_disp_frame_type: %d\n",
+ (dec_ctx->predisplumaaddr << 11),
+ (dec_ctx->predispchromaaddr << 11),
+ dec_ctx->predispframetype);
+ }
+ }
+
+ /* handle ImmeidatelyDisplay for Seek, I frame only */
+ if (dec_ctx->immediatelydisplay) {
+ mfc_dbg("Immediately display\n");
+ dec_ctx->dispstatus = dec_ctx->decstatus;
+ /* update frame tag information with current ID */
+ exe_arg->out_frametag_top = dec_ctx->frametag;
+ /* FIXME : need to check this */
+ exe_arg->out_frametag_bottom = 0;
+
+ if (dec_ctx->decstatus == DEC_S_DD) {
+ mfc_dbg("Immediately display status: DEC_S_DD\n");
+ display_luma_addr = read_reg(MFC_SI_DECODE_Y_ADR);
+ display_chroma_addr = read_reg(MFC_SI_DECODE_C_ADR);
+ }
+
+ display_frame_type = dec_ctx->decframetype;
+
+ /* clear Immediately Display in decode context */
+ dec_ctx->immediatelydisplay = 0;
+ } else {
+ /* Get Frame Tag top and bottom */
+ exe_arg->out_frametag_top = display_frame_tag;
+ exe_arg->out_frametag_bottom = read_shm(ctx, GET_FRAME_TAG_BOT);
+ }
+
+ mfc_dbg("decode y: 0x%08x, c: 0x%08x\n",
+ read_reg(MFC_SI_DECODE_Y_ADR) << 11,
+ read_reg(MFC_SI_DECODE_C_ADR) << 11);
+
+ exe_arg->out_display_status = dec_ctx->dispstatus;
+
+ exe_arg->out_display_Y_addr = (display_luma_addr << 11);
+ exe_arg->out_display_C_addr = (display_chroma_addr << 11);
+
+ exe_arg->out_disp_pic_frame_type = display_frame_type;
+
+ exe_arg->out_y_offset = mfc_mem_data_ofs(display_luma_addr << 11, 1);
+ exe_arg->out_c_offset = mfc_mem_data_ofs(display_chroma_addr << 11, 1);
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ exe_arg->out_y_secure_id = 0;
+ exe_arg->out_c_secure_id = 0;
+
+ ump_handle = mfc_get_buf_ump_handle(out_display_Y_addr << 11);
+ if (ump_handle != NULL)
+ exe_arg->out_y_secure_id = mfc_ump_get_id(ump_handle);
+
+ ump_handle = mfc_get_buf_ump_handle(out_display_C_addr << 11);
+ if (ump_handle != NULL)
+ exe_arg->out_c_secure_id = mfc_ump_get_id(ump_handle);
+
+ mfc_dbg("secure IDs Y: 0x%08x, C:0x%08x\n", exe_arg->out_y_secure_id,
+ exe_arg->out_c_secure_id);
+#elif defined(CONFIG_S5P_VMEM)
+ exe_arg->out_y_cookie = s5p_getcookie((void *)(out_display_Y_addr << 11));
+ exe_arg->out_c_cookie = s5p_getcookie((void *)(out_display_C_addr << 11));
+
+ mfc_dbg("cookie Y: 0x%08x, C:0x%08x\n", exe_arg->out_y_cookie,
+ exe_arg->out_c_cookie);
+#endif
+
+ exe_arg->out_pic_time_top = read_shm(ctx, PIC_TIME_TOP);
+ exe_arg->out_pic_time_bottom = read_shm(ctx, PIC_TIME_BOT);
+
+ exe_arg->out_consumed_byte = read_reg(MFC_SI_FRM_COUNT);
+
+ if (ctx->codecid == H264_DEC) {
+ exe_arg->out_crop_right_offset = (read_shm(ctx, CROP_INFO1) >> 16) & 0xFFFF;
+ exe_arg->out_crop_left_offset = read_shm(ctx, CROP_INFO1) & 0xFFFF;
+ exe_arg->out_crop_bottom_offset = (read_shm(ctx, CROP_INFO2) >> 16) & 0xFFFF;
+ exe_arg->out_crop_top_offset = read_shm(ctx, CROP_INFO2) & 0xFFFF;
+
+ mfc_dbg("crop info t: %d, r: %d, b: %d, l: %d\n",
+ exe_arg->out_crop_top_offset,
+ exe_arg->out_crop_right_offset,
+ exe_arg->out_crop_bottom_offset,
+ exe_arg->out_crop_left_offset);
+ }
+/*
+ mfc_dbg("decode frame type: %d\n", dec_ctx->decframetype);
+ mfc_dbg("display frame type: %d\n", exe_arg->out_disp_pic_frame_type);
+ mfc_dbg("display y: 0x%08x, c: 0x%08x\n",
+ exe_arg->out_display_Y_addr, exe_arg->out_display_C_addr);
+ */
+
+ mfc_dbg("decode frame type: %d\n", dec_ctx->decframetype);
+ mfc_dbg("display frame type: %d,%d\n",
+ exe_arg->out_disp_pic_frame_type,
+ exe_arg->out_frametag_top);
+ mfc_dbg("display y: 0x%08x, c: 0x%08x\n",
+ exe_arg->out_display_Y_addr,
+ exe_arg->out_display_C_addr);
+
+ *consumed = read_reg(MFC_SI_FRM_COUNT);
+ mfc_dbg("stream size: %d, consumed: %d\n",
+ exe_arg->in_strm_size, *consumed);
+
+ return MFC_OK;
+}
+
+int mfc_exec_decoding(struct mfc_inst_ctx *ctx, union mfc_args *args)
+{
+ struct mfc_dec_exe_arg *exe_arg;
+ int ret;
+ int consumed = 0;
+ struct mfc_dec_ctx *dec_ctx = (struct mfc_dec_ctx *)ctx->c_priv;
+ int sec_try_tag; /* tag store for second try */
+
+ exe_arg = (struct mfc_dec_exe_arg *)args;
+
+ /* set pre-decoding informations */
+ dec_ctx->streamaddr = exe_arg->in_strm_buf;
+ dec_ctx->streamsize = exe_arg->in_strm_size;
+ dec_ctx->frametag = exe_arg->in_frametag;
+ dec_ctx->immediatelydisplay = exe_arg->in_immediately_disp;
+
+ mfc_set_inst_state(ctx, INST_STATE_EXE);
+
+ ret = mfc_decoding_frame(ctx, exe_arg, &consumed);
+ sec_try_tag = exe_arg->out_frametag_top;
+
+ mfc_set_inst_state(ctx, INST_STATE_EXE_DONE);
+
+ if (ret == MFC_OK) {
+ mfc_check_resolution_change(ctx, exe_arg);
+ if (ctx->resolution_status == RES_SET_CHANGE) {
+ ret = mfc_decoding_frame(ctx, exe_arg, &consumed);
+ } else if ((ctx->resolution_status == RES_WAIT_FRAME_DONE) &&
+ (exe_arg->out_display_status == DISP_S_FINISH)) {
+ exe_arg->out_display_status = DISP_S_RES_CHANGE;
+ ret = mfc_change_resolution(ctx, exe_arg);
+ if (ret != MFC_OK)
+ return ret;
+ ctx->resolution_status = RES_NO_CHANGE;
+ }
+
+ if ((dec_ctx->ispackedpb) &&
+ (dec_ctx->decframetype == DEC_FRM_P) &&
+ (exe_arg->in_strm_size - consumed > 4)) {
+ unsigned char *stream_vir;
+ int offset = 0;
+
+ mfc_dbg("[%s] strmsize : %d consumed : %d\n", __func__,
+ exe_arg->in_strm_size, consumed);
+
+ stream_vir = phys_to_virt(exe_arg->in_strm_buf);
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (!ctx->drm_flag)
+#endif
+ mfc_mem_cache_inv((void *)stream_vir,
+ exe_arg->in_strm_size);
+
+ offset = CheckMPEG4StartCode(stream_vir+consumed,
+ dec_ctx->streamsize - consumed);
+ if (offset > 4)
+ consumed += offset;
+
+ exe_arg->in_strm_size -= consumed;
+ dec_ctx->frametag = exe_arg->in_frametag;
+ dec_ctx->immediatelydisplay =
+ exe_arg->in_immediately_disp;
+
+ mfc_set_inst_state(ctx, INST_STATE_EXE);
+
+ ret = mfc_decoding_frame(ctx, exe_arg, &consumed);
+ exe_arg->out_frametag_top = sec_try_tag;
+
+ mfc_set_inst_state(ctx, INST_STATE_EXE_DONE);
+ }
+ }
+
+ /*
+ if (ctx->c_ops->set_dpbs) {
+ if (ctx->c_ops->set_dpbs(ctx) < 0)
+ return MFC_DEC_INIT_FAIL;
+ }
+ */
+
+ return ret;
+}
diff --git a/drivers/media/video/samsung/mfc5x/mfc_dec.h b/drivers/media/video/samsung/mfc5x/mfc_dec.h
new file mode 100644
index 0000000..f59795b
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_dec.h
@@ -0,0 +1,223 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_dec.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Decoder interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_DEC_H
+#define __MFC_DEC_H __FILE__
+
+#include <linux/list.h>
+
+#include "mfc.h"
+#include "mfc_interface.h"
+#include "mfc_inst.h"
+
+/* display status */
+/* cropping information */
+#define DISP_CROP_MASK 0x1
+#define DISP_CROP_SHIFT 6
+
+/* resolution change */
+#define DISP_RC_MASK 0x3
+#define DISP_RC_SHIFT 4
+#define DISP_RC_NO 0
+#define DISP_RC_INC 1
+#define DISP_RC_DEC 2
+
+/* progressive/interface */
+#define DISP_PI_MASK 0x1
+#define DISP_PI_SHIFT 3
+#define DISP_PI_PROGRESSIVE 0
+#define DISP_PI_INTERFACE 1
+
+#define DISP_S_MASK 0x7
+enum disp_status {
+ DISP_S_DECODING = 0,
+ DISP_S_DD = 1,
+ DISP_S_DISPLAY = 2,
+ DISP_S_FINISH = 3,
+ DISP_S_RES_CHANGE = 4, /* not H/W bit */
+};
+
+/* decoding status */
+/* CRC */
+#define DEC_CRC_G_MASK 0x1
+#define DEC_CRC_G_SHIFT 5
+
+#define DEC_CRC_N_MASK 0x1
+#define DEC_CRC_N_SHIFT 4
+#define DEC_CRC_TWO 0
+#define DEC_CRC_FOUR 1
+
+/* progressive/interface */
+#define DEC_PI_MASK 0x1
+#define DEC_PI_SHIFT 3
+#define DEC_PI_PROGRESSIVE 0
+#define DEC_PI_INTERFACE 1
+
+#define DEC_S_MASK 0x7
+enum dec_status {
+ DEC_S_DECODING = 0,
+ DEC_S_DD = 1,
+ DEC_S_DISPLAY = 2,
+ DEC_S_FINISH = 3,
+ DEC_S_NO = 4,
+};
+
+/* decode frame type in SFR */
+#define DEC_FRM_MASK 0x7
+enum dec_frame {
+ DEC_FRM_N = 0,
+ DEC_FRM_I = 1,
+ DEC_FRM_P = 2,
+ DEC_FRM_B = 3,
+ DEC_FRM_OTHER = 4,
+};
+
+/* display frame type in SHM */
+#define DISP_IDR_MASK 0x1
+#define DISP_IDR_SHIFT 5
+
+#define DISP_FRM_MASK 0x7
+#define DISP_FRM_SHIFT 2
+enum disp_frame {
+ DISP_FRM_X = -1, /* not H/W bit */
+ DISP_FRM_N = 0,
+ DISP_FRM_I = 1,
+ DISP_FRM_P = 2,
+ DISP_FRM_B = 3,
+ DISP_FRM_OTHER = 4,
+};
+#define get_disp_frame_type() ((read_shm(ctx, DISP_PIC_FRAME_TYPE) >> DISP_FRM_SHIFT) & DISP_FRM_MASK)
+
+#define DISP_CODED_MASK 0x3
+
+enum dec_pc {
+ DPC_ONLY_P = 0,
+ DPC_ONLY_B = 1,
+ DPC_BOTH_P_B = 2,
+ DPC_DISABLE = 3,
+};
+
+struct mfc_dec_ctx {
+ unsigned int lumasize; /* C */
+ unsigned int chromasize; /* C */
+
+ /* init */
+ unsigned int crc; /* I */
+ enum dec_pc pixelcache; /* I */
+ unsigned int slice; /* I */
+
+ unsigned int numextradpb; /* I */
+ unsigned int nummindpb; /* H */
+ unsigned int numtotaldpb; /* C */
+
+ unsigned int level; /* H */
+ unsigned int profile; /* H */
+
+ /* init | exec */
+ unsigned long streamaddr; /* I */
+ unsigned int streamsize; /* I */
+ unsigned int frametag; /* I */
+
+ /* exec */
+ unsigned int consumed; /* H */
+ int predisplumaaddr; /* H */
+ int predispchromaaddr; /* H */
+ int predispframetype; /* H */
+ int predispframetag; /* H */
+
+ enum dec_frame decframetype; /* H */
+
+ enum disp_status dispstatus; /* H */
+ enum dec_status decstatus; /* H */
+
+ unsigned int lastframe; /* I */
+
+ unsigned int dpbflush; /* I */
+ /* etc */
+ unsigned int immediatelydisplay;
+
+ /* init | exec */
+ unsigned int ispackedpb; /* I */
+
+ void *d_priv;
+};
+
+/* decoder private data */
+struct mfc_dec_h264 {
+ /* init */
+ unsigned int mvsize; /* C */
+
+ unsigned int dispdelay_en; /* I */
+ unsigned int dispdelay_val; /* I */
+
+ /* init | exec */
+ unsigned int crop_r_ofs; /* H */
+ unsigned int crop_l_ofs; /* H */
+ unsigned int crop_b_ofs; /* H */
+ unsigned int crop_t_ofs; /* H */
+
+ unsigned int sei_parse; /* H */
+ struct mfc_frame_packing fp; /* H */
+};
+
+struct mfc_dec_mpeg4 {
+ /* init */
+ unsigned int postfilter; /* I */
+
+ unsigned int aspect_ratio; /* H */
+ unsigned int ext_par_width; /* H */
+ unsigned int ext_par_height; /* H */
+
+ /* init | exec */
+ unsigned int packedpb; /* I */
+};
+
+struct mfc_dec_fimv1 {
+ /* init */
+ unsigned int postfilter; /* I */
+
+ unsigned int aspect_ratio; /* H */
+ unsigned int ext_par_width; /* H */
+ unsigned int ext_par_height; /* H */
+
+ unsigned int width; /* I */
+ unsigned int height; /* I */
+
+ /* init | exec */
+ unsigned int packedpb; /* I */
+};
+
+int mfc_init_decoding(struct mfc_inst_ctx *ctx, union mfc_args *args);
+/*
+int mfc_init_decoding(struct mfc_inst_ctx *ctx, struct mfc_dec_init_arg *init_arg);
+*/
+int mfc_exec_decoding(struct mfc_inst_ctx *ctx, union mfc_args *args);
+/*
+int mfc_exec_decoding(struct mfc_inst_ctx *ctx, struct mfc_dec_exe_arg *exe_arg);
+*/
+
+/*---------------------------------------------------------------------------*/
+
+struct mfc_dec_info {
+ struct list_head list;
+ const char *name;
+ SSBSIP_MFC_CODEC_TYPE codectype;
+ int codecid;
+ unsigned int d_priv_size;
+
+ const struct codec_operations c_ops;
+};
+
+void mfc_init_decoders(void);
+
+#endif /* __MFC_CMD_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_dev.c b/drivers/media/video/samsung/mfc5x/mfc_dev.c
new file mode 100644
index 0000000..e3a0b60
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_dev.c
@@ -0,0 +1,1684 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_dev.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Driver interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/firmware.h>
+#include <linux/proc_fs.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/clk.h>
+#endif
+#include <linux/pm_qos_params.h>
+
+#ifdef CONFIG_BUSFREQ_OPP
+#include <mach/busfreq_exynos4.h>
+#endif
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+#include <mach/dev.h>
+#endif
+#include <plat/cpu.h>
+
+#if defined(CONFIG_BUSFREQ) || defined(CONFIG_EXYNOS4_CPUFREQ)
+#include <mach/cpufreq.h>
+#endif
+#include <mach/regs-pmu.h>
+
+#include <asm/uaccess.h>
+
+#include "mfc_dev.h"
+#include "mfc_interface.h"
+#include "mfc_reg.h"
+#include "mfc_log.h"
+#include "mfc_ctrl.h"
+#include "mfc_buf.h"
+#include "mfc_inst.h"
+#include "mfc_pm.h"
+#include "mfc_dec.h"
+#include "mfc_enc.h"
+#include "mfc_mem.h"
+#include "mfc_cmd.h"
+
+#ifdef SYSMMU_MFC_ON
+#include <plat/sysmmu.h>
+#endif
+
+#define MFC_MINOR 252
+#define MFC_FW_NAME "mfc_fw.bin"
+
+static struct mfc_dev *mfcdev;
+static struct proc_dir_entry *mfc_proc_entry;
+
+#define MFC_PROC_ROOT "mfc"
+#define MFC_PROC_TOTAL_INSTANCE_NUMBER "total_instance_number"
+
+#ifdef CONFIG_BUSFREQ
+static struct pm_qos_request_list bus_qos_pm_qos_req;
+#endif
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+#define MFC_DRM_MAGIC_SIZE 0x10
+#define MFC_DRM_MAGIC_CHUNK0 0x13cdbf16
+#define MFC_DRM_MAGIC_CHUNK1 0x8b803342
+#define MFC_DRM_MAGIC_CHUNK2 0x5e87f4f5
+#define MFC_DRM_MAGIC_CHUNK3 0x3bd05317
+
+static int check_magic(unsigned char *addr)
+{
+ if (((u32)*(u32 *)(addr) == MFC_DRM_MAGIC_CHUNK0) &&
+ ((u32)*(u32 *)(addr + 0x4) == MFC_DRM_MAGIC_CHUNK1) &&
+ ((u32)*(u32 *)(addr + 0x8) == MFC_DRM_MAGIC_CHUNK2) &&
+ ((u32)*(u32 *)(addr + 0xC) == MFC_DRM_MAGIC_CHUNK3))
+ return 0;
+ else if (((u32)*(u32 *)(addr+0x10) == MFC_DRM_MAGIC_CHUNK0) &&
+ ((u32)*(u32 *)(addr + 0x14) == MFC_DRM_MAGIC_CHUNK1) &&
+ ((u32)*(u32 *)(addr + 0x18) == MFC_DRM_MAGIC_CHUNK2) &&
+ ((u32)*(u32 *)(addr + 0x1C) == MFC_DRM_MAGIC_CHUNK3))
+ return 0x10;
+ else
+ return -1;
+}
+
+static inline void clear_magic(unsigned char *addr)
+{
+ memset((void *)addr, 0x00, MFC_DRM_MAGIC_SIZE);
+}
+#endif
+
+static int get_free_inst_id(struct mfc_dev *dev)
+{
+ int slot = 0;
+
+ while (dev->inst_ctx[slot]) {
+ slot++;
+ if (slot >= MFC_MAX_INSTANCE_NUM)
+ return -1;
+ }
+
+ return slot;
+}
+
+static int mfc_open(struct inode *inode, struct file *file)
+{
+ struct mfc_inst_ctx *mfc_ctx;
+ int ret;
+ enum mfc_ret_code retcode;
+ int inst_id;
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ struct mfc_alloc_buffer *alloc;
+#endif
+
+ /* prevent invalid reference */
+ file->private_data = NULL;
+
+ mutex_lock(&mfcdev->lock);
+#if SUPPORT_SLICE_ENCODING
+ mfcdev->frame_working_flag = 1;
+ mfcdev->frame_sys = 0;
+#endif
+
+ if (!mfcdev->fw.state) {
+ if (mfcdev->fw.requesting) {
+ printk(KERN_INFO "MFC F/W request is on-going, try again\n");
+ ret = -ENODEV;
+ goto err_fw_state;
+ }
+
+ printk(KERN_INFO "MFC F/W is not existing, requesting...\n");
+ ret = request_firmware(&mfcdev->fw.info, MFC_FW_NAME, mfcdev->device);
+
+ if (ret < 0) {
+ printk(KERN_INFO "failed to copy MFC F/W during open\n");
+ ret = -ENODEV;
+ goto err_fw_state;
+ }
+
+ if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ mfcdev->fw.state = mfc_load_firmware(mfcdev->fw.info->data, mfcdev->fw.info->size);
+ if (!mfcdev->fw.state) {
+ printk(KERN_ERR "failed to load MFC F/W, MFC will not working\n");
+ ret = -ENODEV;
+ goto err_fw_state;
+ } else {
+ printk(KERN_INFO "MFC F/W loaded successfully (size: %d)\n", mfcdev->fw.info->size);
+ }
+ }
+ }
+
+ if (atomic_read(&mfcdev->inst_cnt) == 0) {
+ /* reload F/W for first instance again */
+ if (soc_is_exynos4210()) {
+ mfcdev->fw.state = mfc_load_firmware(mfcdev->fw.info->data, mfcdev->fw.info->size);
+ if (!mfcdev->fw.state) {
+ printk(KERN_ERR "failed to reload MFC F/W, MFC will not working\n");
+ ret = -ENODEV;
+ goto err_fw_state;
+ } else {
+ printk(KERN_INFO "MFC F/W reloaded successfully (size: %d)\n", mfcdev->fw.info->size);
+ }
+ }
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ dev_lock(mfcdev->bus_dev, mfcdev->device, 133133);
+#endif
+#ifdef CONFIG_BUSFREQ
+ pm_qos_add_request(&bus_qos_pm_qos_req, PM_QOS_BUS_QOS, 1);
+#endif
+
+ ret = mfc_power_on();
+ if (ret < 0) {
+ mfc_err("power enable failed\n");
+ goto err_pwr_enable;
+ }
+
+#ifndef CONFIG_PM_RUNTIME
+#ifdef SYSMMU_MFC_ON
+ mfc_clock_on(mfcdev);
+
+ s5p_sysmmu_enable(mfcdev->device);
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ vcm_set_pgtable_base(VCM_DEV_MFC);
+#else /* CONFIG_S5P_VMEM or kernel virtual memory allocator */
+ s5p_sysmmu_set_tablebase_pgd(mfcdev->device,
+ __pa(swapper_pg_dir));
+
+ /*
+ * RMVME: the power-gating work really (on <-> off),
+ * all TBL entry was invalidated already when the power off
+ */
+ s5p_sysmmu_tlb_invalidate(mfcdev->device, SYSMMU_MFC_R);
+#endif
+ mfc_clock_off(mfcdev);
+#endif
+#endif
+ /* MFC hardware initialization */
+ retcode = mfc_start(mfcdev);
+ if (retcode != MFC_OK) {
+ mfc_err("MFC H/W init failed: %d\n", retcode);
+ ret = -ENODEV;
+ goto err_start_hw;
+ }
+ }
+
+ if (atomic_read(&mfcdev->inst_cnt) >= MFC_MAX_INSTANCE_NUM) {
+ mfc_err("exceed max instance number, too many instance opened already\n");
+ ret = -EINVAL;
+ goto err_inst_cnt;
+ }
+
+ inst_id = get_free_inst_id(mfcdev);
+ if (inst_id < 0) {
+ mfc_err("failed to get instance ID\n");
+ ret = -EINVAL;
+ goto err_inst_id;
+ }
+
+ mfc_ctx = mfc_create_inst();
+ if (!mfc_ctx) {
+ mfc_err("failed to create instance context\n");
+ ret = -ENOMEM;
+ goto err_inst_ctx;
+ }
+
+ atomic_inc(&mfcdev->inst_cnt);
+ mfcdev->inst_ctx[inst_id] = mfc_ctx;
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (check_magic(mfcdev->drm_info.addr) >= 0) {
+ mfc_info("DRM instance starting\n");
+ clear_magic(mfcdev->drm_info.addr + check_magic(mfcdev->drm_info.addr));
+ mfc_ctx->drm_flag = 1;
+ mfc_set_buf_alloc_scheme(MBS_FIRST_FIT);
+ } else {
+ mfc_ctx->drm_flag = 0;
+ }
+#endif
+
+ mfc_ctx->id = inst_id;
+ mfc_ctx->dev = mfcdev;
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (mfc_ctx->drm_flag) {
+ alloc = _mfc_alloc_buf(mfc_ctx, MFC_CTX_SIZE_L, ALIGN_2KB, MBT_CTX | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed to alloc context buffer\n");
+ ret = -ENOMEM;
+ goto err_drm_ctx;
+ }
+
+ mfc_ctx->ctxbufofs = mfc_mem_base_ofs(alloc->real) >> 11;
+ mfc_ctx->ctxbufsize = alloc->size;
+ memset((void *)alloc->addr, 0, alloc->size);
+ mfc_mem_cache_clean((void *)alloc->addr, alloc->size);
+ }
+#endif
+
+ file->private_data = (struct mfc_inst_ctx *)mfc_ctx;
+
+#if SUPPORT_SLICE_ENCODING
+ if (atomic_read(&mfcdev->inst_cnt) == 1) {
+ mfcdev->slice_encoding_flag = 0;
+ mfcdev->slice_sys = 0;
+ mfcdev->wait_slice_timeout = 0;
+ mfcdev->wait_frame_timeout = 0;
+ }
+ mfc_ctx->slice_flag = 0;
+ mfcdev->frame_sys = 1;
+ mfcdev->frame_working_flag = 0;
+ if (mfcdev->wait_frame_timeout == 1)
+ wake_up(&mfcdev->wait_frame);
+#endif
+
+ mfc_info("MFC instance [%d:%d] opened", mfc_ctx->id,
+ atomic_read(&mfcdev->inst_cnt));
+
+ mutex_unlock(&mfcdev->lock);
+
+ return 0;
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+err_drm_ctx:
+#endif
+err_inst_ctx:
+err_inst_id:
+err_inst_cnt:
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+#endif
+err_start_hw:
+ if (atomic_read(&mfcdev->inst_cnt) == 0) {
+ if (mfc_power_off() < 0)
+ mfc_err("power disable failed\n");
+ }
+
+err_pwr_enable:
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ dev_unlock(mfcdev->bus_dev, mfcdev->device);
+#endif
+
+err_fw_state:
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+#endif
+ mutex_unlock(&mfcdev->lock);
+
+ return ret;
+}
+
+static int mfc_release(struct inode *inode, struct file *file)
+{
+ struct mfc_inst_ctx *mfc_ctx;
+ struct mfc_dev *dev;
+ int ret;
+
+ mfc_ctx = (struct mfc_inst_ctx *)file->private_data;
+ if (!mfc_ctx)
+ return -EINVAL;
+
+ dev = mfc_ctx->dev;
+
+ mutex_lock(&dev->lock);
+#if SUPPORT_SLICE_ENCODING
+ dev->frame_working_flag = 1;
+ dev->frame_sys = 0;
+ if (dev->slice_encoding_flag == 1) {
+ mutex_unlock(&dev->lock);
+ dev->wait_slice_timeout = 1;
+ if (wait_event_timeout(dev->wait_slice, dev->slice_sys,
+ SLICE_ENC_TIMEOUT) == 0) {
+ mfc_err("Slice encoding done timeout : %d\n",
+ dev->slice_sys);
+ dev->slice_encoding_flag = 0;
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ }
+ mutex_lock(&dev->lock);
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ }
+#endif
+
+#if defined(CONFIG_BUSFREQ)
+ /* Release MFC & Bus Frequency lock for High resolution */
+ if (mfc_ctx->busfreq_flag == true) {
+ atomic_dec(&dev->busfreq_lock_cnt);
+ mfc_ctx->busfreq_flag = false;
+ if (atomic_read(&dev->busfreq_lock_cnt) == 0) {
+ /* release Freq lock back to normal */
+ exynos4_busfreq_lock_free(DVFS_LOCK_ID_MFC);
+ mfc_dbg("[%s] Bus Freq lock Released Normal!\n", __func__);
+ }
+ }
+#endif
+
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_EXYNOS4_CPUFREQ)
+ /* Release MFC & CPU Frequency lock for High resolution */
+ if (mfc_ctx->cpufreq_flag == true) {
+ atomic_dec(&dev->cpufreq_lock_cnt);
+ mfc_ctx->cpufreq_flag = false;
+ if (atomic_read(&dev->cpufreq_lock_cnt) == 0) {
+ /* release Freq lock back to normal */
+ exynos_cpufreq_lock_free(DVFS_LOCK_ID_MFC);
+ mfc_dbg("[%s] CPU Freq lock Released Normal!\n", __func__);
+ }
+ }
+#endif
+
+#ifdef CONFIG_BUSFREQ_OPP
+ if (mfc_ctx->dmcthreshold_flag == true) {
+ atomic_dec(&dev->dmcthreshold_lock_cnt);
+ mfc_ctx->dmcthreshold_flag = false;
+ if (atomic_read(&dev->dmcthreshold_lock_cnt) == 0) {
+ mfc_info("[%s] Restore dmc_max_threshold\n", __func__);
+ if (soc_is_exynos4212()) {
+ dmc_max_threshold =
+ EXYNOS4212_DMC_MAX_THRESHOLD;
+ } else if (soc_is_exynos4412()) {
+ dmc_max_threshold =
+ EXYNOS4412_DMC_MAX_THRESHOLD;
+ } else {
+ pr_err("Unsupported model.\n");
+ return -EINVAL;
+ }
+ }
+ }
+#endif
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (mfc_ctx->drm_flag) {
+ mfc_set_buf_alloc_scheme(MBS_FIRST_FIT);
+ }
+#endif
+ mfc_info("MFC instance [%d:%d] released\n", mfc_ctx->id,
+ atomic_read(&mfcdev->inst_cnt));
+
+ file->private_data = NULL;
+
+ dev->inst_ctx[mfc_ctx->id] = NULL;
+ atomic_dec(&dev->inst_cnt);
+
+ mfc_destroy_inst(mfc_ctx);
+
+ if (atomic_read(&dev->inst_cnt) == 0) {
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ dev_unlock(mfcdev->bus_dev, mfcdev->device);
+#endif
+#ifdef CONFIG_BUSFREQ
+ pm_qos_remove_request(&bus_qos_pm_qos_req);
+#endif
+#if SUPPORT_SLICE_ENCODING
+ dev->slice_encoding_flag = 0;
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ dev->wait_frame_timeout = 0;
+#endif
+ ret = mfc_power_off();
+ if (ret < 0) {
+ mfc_err("power disable failed\n");
+ goto err_pwr_disable;
+ }
+ } else {
+#if defined(SYSMMU_MFC_ON) && !defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ mfc_clock_on(mfcdev);
+
+ s5p_sysmmu_tlb_invalidate(dev->device);
+
+ mfc_clock_off(mfcdev);
+#endif
+ }
+
+ ret = 0;
+#if SUPPORT_SLICE_ENCODING
+ dev->frame_sys = 1;
+ dev->frame_working_flag = 0;
+ if (mfcdev->wait_frame_timeout == 1)
+ wake_up(&dev->wait_frame);
+#endif
+
+err_pwr_disable:
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+/* FIXME: add request firmware ioctl */
+static long mfc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+
+ struct mfc_inst_ctx *mfc_ctx;
+ int ret, ex_ret;
+ struct mfc_common_args in_param;
+ struct mfc_buf_alloc_arg buf_arg;
+ struct mfc_config_arg *cfg_arg;
+ int port;
+
+ struct mfc_dev *dev;
+ int i;
+
+ mfc_ctx = (struct mfc_inst_ctx *)file->private_data;
+ if (!mfc_ctx)
+ return -EINVAL;
+
+ dev = mfc_ctx->dev;
+
+ mutex_lock(&dev->lock);
+
+ ret = copy_from_user(&in_param, (struct mfc_common_args *)arg,
+ sizeof(struct mfc_common_args));
+ if (ret < 0) {
+ mfc_err("failed to copy parameters\n");
+ ret = -EIO;
+ in_param.ret_code = MFC_INVALID_PARAM_FAIL;
+ goto out_ioctl;
+ }
+
+ mutex_unlock(&dev->lock);
+
+ /* FIXME: add locking */
+
+ mfc_dbg("cmd: 0x%08x\n", cmd);
+
+ switch (cmd) {
+
+ case IOCTL_MFC_DEC_INIT:
+ mutex_lock(&dev->lock);
+#if SUPPORT_SLICE_ENCODING
+ dev->frame_working_flag = 1;
+ dev->frame_sys = 0;
+ if (dev->slice_encoding_flag == 1) {
+ mutex_unlock(&dev->lock);
+ dev->wait_slice_timeout = 1;
+ if (wait_event_timeout(dev->wait_slice, dev->slice_sys,
+ SLICE_ENC_TIMEOUT) == 0) {
+ mfc_err("Slice encoding done timeout : %d\n",
+ dev->slice_sys);
+ dev->slice_encoding_flag = 0;
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ ret = -EINVAL;
+
+ mutex_lock(&dev->lock);
+ break;
+ }
+ mutex_lock(&dev->lock);
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ }
+#endif
+ if (mfc_chk_inst_state(mfc_ctx, INST_STATE_CREATE) < 0) {
+ mfc_err("IOCTL_MFC_DEC_INIT invalid state: 0x%08x\n",
+ mfc_ctx->state);
+ in_param.ret_code = MFC_STATE_INVALID;
+ ret = -EINVAL;
+
+ mutex_unlock(&dev->lock);
+ break;
+ }
+
+ mfc_clock_on(mfcdev);
+ in_param.ret_code = mfc_init_decoding(mfc_ctx, &(in_param.args));
+ ret = in_param.ret_code;
+ mfc_clock_off(mfcdev);
+#if SUPPORT_SLICE_ENCODING
+ dev->frame_sys = 1;
+ dev->frame_working_flag = 0;
+ if (dev->wait_frame_timeout == 1)
+ wake_up(&dev->wait_frame);
+#endif
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_ENC_INIT:
+ mutex_lock(&dev->lock);
+#if SUPPORT_SLICE_ENCODING
+ dev->frame_working_flag = 1;
+ dev->frame_sys = 0;
+ if (dev->slice_encoding_flag == 1) {
+ mutex_unlock(&dev->lock);
+ dev->wait_slice_timeout = 1;
+ if (wait_event_timeout(dev->wait_slice, dev->slice_sys,
+ SLICE_ENC_TIMEOUT) == 0) {
+ mfc_err("Slice encoding done timeout : %d\n",
+ dev->slice_sys);
+ dev->slice_encoding_flag = 0;
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ ret = -EINVAL;
+
+ mutex_lock(&dev->lock);
+ break;
+ }
+ mutex_lock(&dev->lock);
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ }
+#endif
+
+ if (mfc_chk_inst_state(mfc_ctx, INST_STATE_CREATE) < 0) {
+ mfc_err("IOCTL_MFC_ENC_INIT invalid state: 0x%08x\n",
+ mfc_ctx->state);
+ in_param.ret_code = MFC_STATE_INVALID;
+ ret = -EINVAL;
+
+ mutex_unlock(&dev->lock);
+ break;
+ }
+
+ mfc_clock_on(mfcdev);
+ in_param.ret_code = mfc_init_encoding(mfc_ctx, &(in_param.args));
+ ret = in_param.ret_code;
+ mfc_clock_off(mfcdev);
+#if SUPPORT_SLICE_ENCODING
+ dev->frame_sys = 1;
+ dev->frame_working_flag = 0;
+ if (dev->wait_frame_timeout == 1)
+ wake_up(&dev->wait_frame);
+#endif
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_DEC_EXE:
+ mutex_lock(&dev->lock);
+#if SUPPORT_SLICE_ENCODING
+ dev->frame_working_flag = 1;
+ dev->frame_sys = 0;
+ if (dev->slice_encoding_flag == 1) {
+ mutex_unlock(&dev->lock);
+ dev->wait_slice_timeout = 1;
+ if (wait_event_timeout(dev->wait_slice, dev->slice_sys,
+ SLICE_ENC_TIMEOUT) == 0) {
+ mfc_err("Slice encoding done timeout : %d\n",
+ dev->slice_sys);
+ dev->slice_encoding_flag = 0;
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ ret = -EINVAL;
+
+ mutex_lock(&dev->lock);
+ break;
+ }
+ mutex_lock(&dev->lock);
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ }
+#endif
+
+ if (mfc_ctx->state < INST_STATE_INIT) {
+ mfc_err("IOCTL_MFC_DEC_EXE invalid state: 0x%08x\n",
+ mfc_ctx->state);
+ in_param.ret_code = MFC_STATE_INVALID;
+ ret = -EINVAL;
+
+ mutex_unlock(&dev->lock);
+ break;
+ }
+
+ mfc_clock_on(mfcdev);
+ in_param.ret_code = mfc_exec_decoding(mfc_ctx, &(in_param.args));
+ ret = in_param.ret_code;
+ mfc_clock_off(mfcdev);
+#if SUPPORT_SLICE_ENCODING
+ dev->frame_sys = 1;
+ dev->frame_working_flag = 0;
+ if (dev->wait_frame_timeout == 1)
+ wake_up(&dev->wait_frame);
+#endif
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_ENC_EXE:
+ mutex_lock(&dev->lock);
+#if SUPPORT_SLICE_ENCODING
+ if (mfc_ctx->slice_flag == 0) {
+ dev->frame_working_flag = 1;
+ dev->frame_sys = 0;
+ }
+
+ if ((dev->slice_encoding_flag == 1)
+ && (mfc_ctx->slice_flag == 0)) {
+ mutex_unlock(&dev->lock);
+ dev->wait_slice_timeout = 1;
+ if (wait_event_timeout(dev->wait_slice, dev->slice_sys,
+ SLICE_ENC_TIMEOUT) == 0) {
+ mfc_err("Slice encoding done timeout : %d\n",
+ dev->slice_sys);
+ dev->slice_encoding_flag = 0;
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ ret = -EINVAL;
+
+ mutex_lock(&dev->lock);
+ break;
+ }
+ mutex_lock(&dev->lock);
+ dev->slice_sys = 0;
+ dev->wait_slice_timeout = 0;
+ }
+
+ if ((dev->frame_working_flag == 1) && (mfc_ctx->slice_flag == 1)
+ && (dev->slice_encoding_flag == 0)) {
+ mutex_unlock(&dev->lock);
+ dev->wait_frame_timeout = 1;
+ if (wait_event_timeout(dev->wait_frame, dev->frame_sys,
+ SLICE_ENC_TIMEOUT) == 0) {
+ mfc_err("frame working done timeout : %d\n",
+ dev->frame_sys);
+ dev->frame_working_flag = 0;
+ dev->frame_sys = 0;
+ dev->wait_frame_timeout = 0;
+ ret = -EINVAL;
+
+ mutex_lock(&dev->lock);
+ break;
+ }
+ mutex_lock(&dev->lock);
+ dev->frame_sys = 0;
+ dev->wait_frame_timeout = 0;
+ }
+#endif
+
+ if (mfc_ctx->state < INST_STATE_INIT) {
+ mfc_err("IOCTL_MFC_DEC_EXE invalid state: 0x%08x\n",
+ mfc_ctx->state);
+ in_param.ret_code = MFC_STATE_INVALID;
+ ret = -EINVAL;
+
+ mutex_unlock(&dev->lock);
+ break;
+ }
+
+ mfc_clock_on(mfcdev);
+ in_param.ret_code = mfc_exec_encoding(mfc_ctx, &(in_param.args));
+ ret = in_param.ret_code;
+ mfc_clock_off(mfcdev);
+#if SUPPORT_SLICE_ENCODING
+ if (mfc_ctx->slice_flag == 0) {
+ dev->frame_sys = 1;
+ dev->frame_working_flag = 0;
+ if (dev->wait_frame_timeout == 1)
+ wake_up(&dev->wait_frame);
+ }
+#endif
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_GET_IN_BUF:
+ mutex_lock(&dev->lock);
+
+ if (in_param.args.mem_alloc.type == ENCODER) {
+ buf_arg.type = ENCODER;
+ port = 1;
+ } else {
+ buf_arg.type = DECODER;
+ port = 0;
+ }
+
+ /* FIXME: consider the size */
+ buf_arg.size = in_param.args.mem_alloc.buff_size;
+ /*
+ buf_arg.mapped = in_param.args.mem_alloc.mapped_addr;
+ */
+ /* FIXME: encodeing linear: 2KB, tile: 8KB */
+ buf_arg.align = ALIGN_2KB;
+
+ if (buf_arg.type == ENCODER)
+ in_param.ret_code = mfc_alloc_buf(mfc_ctx, &buf_arg, MBT_DPB | port);
+ else
+ in_param.ret_code = mfc_alloc_buf(mfc_ctx, &buf_arg, MBT_CPB | port);
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ in_param.args.mem_alloc.secure_id = buf_arg.secure_id;
+#elif defined(CONFIG_S5P_VMEM)
+ in_param.args.mem_alloc.cookie = buf_arg.cookie;
+#else
+ in_param.args.mem_alloc.offset = buf_arg.offset;
+#endif
+ ret = in_param.ret_code;
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_FREE_BUF:
+ mutex_lock(&dev->lock);
+
+ in_param.ret_code =
+ mfc_free_buf(mfc_ctx, in_param.args.mem_free.key);
+ ret = in_param.ret_code;
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_GET_REAL_ADDR:
+ mutex_lock(&dev->lock);
+
+ in_param.args.real_addr.addr =
+ mfc_get_buf_real(mfc_ctx->id, in_param.args.real_addr.key);
+
+ mfc_dbg("real addr: 0x%08x", in_param.args.real_addr.addr);
+
+ if (in_param.args.real_addr.addr)
+ in_param.ret_code = MFC_OK;
+ else
+ in_param.ret_code = MFC_MEM_INVALID_ADDR_FAIL;
+
+ ret = in_param.ret_code;
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_GET_MMAP_SIZE:
+ if (mfc_chk_inst_state(mfc_ctx, INST_STATE_CREATE) < 0) {
+ mfc_err("IOCTL_MFC_GET_MMAP_SIZE invalid state: \
+ 0x%08x\n", mfc_ctx->state);
+ in_param.ret_code = MFC_STATE_INVALID;
+ ret = -EINVAL;
+
+ break;
+ }
+
+ in_param.ret_code = MFC_OK;
+ ret = 0;
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ for (i = 0; i < MFC_MAX_MEM_CHUNK_NUM; i++)
+ ret += mfc_mem_data_size(i);
+
+ ret += mfc_mem_hole_size();
+#else
+ for (i = 0; i < dev->mem_ports; i++)
+ ret += mfc_mem_data_size(i);
+#endif
+
+ break;
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ case IOCTL_MFC_SET_IN_BUF:
+ if (in_param.args.mem_alloc.type == ENCODER) {
+ buf_arg.secure_id = in_param.args.mem_alloc.secure_id;
+ buf_arg.align = ALIGN_2KB;
+ port = 1;
+ ret = mfc_vcm_bind_from_others(mfc_ctx, &buf_arg, MBT_OTHER | port);
+ } else {
+ in_param.args.real_addr.addr =
+ mfc_ump_get_virt(in_param.args.real_addr.key);
+
+ mfc_dbg("real addr: 0x%08x", in_param.args.real_addr.addr);
+
+ if (in_param.args.real_addr.addr)
+ in_param.ret_code = MFC_OK;
+ else
+ in_param.ret_code = MFC_MEM_INVALID_ADDR_FAIL;
+
+ ret = in_param.ret_code;
+ }
+
+ break;
+#endif
+
+ case IOCTL_MFC_SET_CONFIG:
+ /* FIXME: mfc_chk_inst_state*/
+ /* RMVME: need locking ? */
+ mutex_lock(&dev->lock);
+
+ /* in_param.ret_code = mfc_set_config(mfc_ctx, &(in_param.args)); */
+
+ cfg_arg = (struct mfc_config_arg *)&in_param.args;
+
+ in_param.ret_code = mfc_set_inst_cfg(mfc_ctx, cfg_arg->type,
+ (void *)&cfg_arg->args);
+ ret = in_param.ret_code;
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_GET_CONFIG:
+ /* FIXME: mfc_chk_inst_state */
+ /* RMVME: need locking ? */
+ mutex_lock(&dev->lock);
+
+ cfg_arg = (struct mfc_config_arg *)&in_param.args;
+
+ in_param.ret_code = mfc_get_inst_cfg(mfc_ctx, cfg_arg->type,
+ (void *)&cfg_arg->args);
+ ret = in_param.ret_code;
+
+ mutex_unlock(&dev->lock);
+ break;
+
+ case IOCTL_MFC_SET_BUF_CACHE:
+ mfc_ctx->buf_cache_type = in_param.args.mem_alloc.buf_cache_type;
+ in_param.ret_code = MFC_OK;
+ break;
+
+ default:
+ mfc_err("failed to execute ioctl cmd: 0x%08x\n", cmd);
+
+ in_param.ret_code = MFC_INVALID_PARAM_FAIL;
+ ret = -EINVAL;
+ }
+
+out_ioctl:
+ ex_ret = copy_to_user((struct mfc_common_args *)arg,
+ &in_param,
+ sizeof(struct mfc_common_args));
+ if (ex_ret < 0) {
+ mfc_err("Outparm copy to user error\n");
+ ret = -EIO;
+ }
+
+ mfc_dbg("return = %d\n", ret);
+
+ return ret;
+}
+
+static void mfc_vm_open(struct vm_area_struct *vma)
+{
+ /* FIXME:
+ struct mfc_inst_ctx *mfc_ctx = (struct mfc_inst_ctx *)vma->vm_private_data;
+
+ mfc_dbg("id: %d\n", mfc_ctx->id);
+ */
+
+ /* FIXME: atomic_inc(mapped count) */
+}
+
+static void mfc_vm_close(struct vm_area_struct *vma)
+{
+ /* FIXME:
+ struct mfc_inst_ctx *mfc_ctx = (struct mfc_inst_ctx *)vma->vm_private_data;
+
+ mfc_dbg("id: %d\n", mfc_ctx->id);
+ */
+
+ /* FIXME: atomic_dec(mapped count) */
+}
+
+static int mfc_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+
+ /* FIXME:
+ struct mfc_inst_ctx *mfc_ctx = (struct mfc_inst_ctx *)vma->vm_private_data;
+ struct page *pg = NULL;
+
+ mfc_dbg("id: %d, pgoff: 0x%08lx, user: 0x%08lx\n",
+ mfc_ctx->id, vmf->pgoff, (unsigned long)(vmf->virtual_address));
+
+ if (mfc_ctx == NULL)
+ return VM_FAULT_SIGBUS;
+
+ mfc_dbg("addr: 0x%08lx\n",
+ (unsigned long)(_mfc_get_buf_addr(mfc_ctx->id, vmf->virtual_address)));
+
+ pg = vmalloc_to_page(_mfc_get_buf_addr(mfc_ctx->id, vmf->virtual_address));
+
+ if (!pg)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = pg;
+ */
+
+ return 0;
+}
+
+static const struct vm_operations_struct mfc_vm_ops = {
+ .open = mfc_vm_open,
+ .close = mfc_vm_close,
+ .fault = mfc_vm_fault,
+};
+
+static int mfc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long user_size = vma->vm_end - vma->vm_start;
+ unsigned long real_size;
+ struct mfc_inst_ctx *mfc_ctx;
+#if !(defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(CONFIG_S5P_VMEM))
+ /* mmap support */
+ unsigned long pfn;
+ unsigned long remap_offset, remap_size;
+ struct mfc_dev *dev;
+#ifdef SYSMMU_MFC_ON
+ /* kernel virtual memory allocator */
+ char *ptr;
+ unsigned long start, size;
+#endif
+#endif
+ mfc_ctx = (struct mfc_inst_ctx *)file->private_data;
+ if (!mfc_ctx)
+ return -EINVAL;
+
+#if !(defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(CONFIG_S5P_VMEM))
+ dev = mfc_ctx->dev;
+#endif
+
+ mfc_dbg("vm_start: 0x%08lx, vm_end: 0x%08lx, size: %ld(%ldMB)\n",
+ vma->vm_start, vma->vm_end, user_size, (user_size >> 20));
+
+ real_size = (unsigned long)(mfc_mem_data_size(0) + mfc_mem_data_size(1));
+
+ mfc_dbg("port 0 size: %d, port 1 size: %d, total: %ld\n",
+ mfc_mem_data_size(0),
+ mfc_mem_data_size(1),
+ real_size);
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ real_size += mfc_mem_hole_size();
+#endif
+
+ /*
+ * if memory size required from appl. mmap() is bigger than max data memory
+ * size allocated in the driver.
+ */
+ if (user_size > real_size) {
+ mfc_err("user requeste mem(%ld) is bigger than available mem(%ld)\n",
+ user_size, real_size);
+ return -EINVAL;
+ }
+#ifdef SYSMMU_MFC_ON
+#if (defined(CONFIG_VIDEO_MFC_VCM_UMP) || defined(CONFIG_S5P_VMEM))
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &mfc_vm_ops;
+ vma->vm_private_data = mfc_ctx;
+
+ mfc_ctx->userbase = vma->vm_start;
+#else /* not CONFIG_VIDEO_MFC_VCM_UMP && not CONFIG_S5P_VMEM */
+ /* kernel virtual memory allocator */
+ if (dev->mem_ports == 1) {
+ remap_offset = 0;
+ remap_size = user_size;
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /*
+ * Port 0 mapping for stream buf & frame buf (chroma + MV + luma)
+ */
+ ptr = (char *)mfc_mem_data_base(0);
+ start = remap_offset;
+ size = remap_size;
+ while (size > 0) {
+ pfn = vmalloc_to_pfn(ptr);
+ if (remap_pfn_range(vma, vma->vm_start + start, pfn,
+ PAGE_SIZE, vma->vm_page_prot)) {
+
+ mfc_err("failed to remap port 0\n");
+ return -EAGAIN;
+ }
+
+ start += PAGE_SIZE;
+ ptr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ } else {
+ remap_offset = 0;
+ remap_size = min((unsigned long)mfc_mem_data_size(0), user_size);
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /*
+ * Port 0 mapping for stream buf & frame buf (chroma + MV)
+ */
+ ptr = (char *)mfc_mem_data_base(0);
+ start = remap_offset;
+ size = remap_size;
+ while (size > 0) {
+ pfn = vmalloc_to_pfn(ptr);
+ if (remap_pfn_range(vma, vma->vm_start + start, pfn,
+ PAGE_SIZE, vma->vm_page_prot)) {
+
+ mfc_err("failed to remap port 0\n");
+ return -EAGAIN;
+ }
+
+ start += PAGE_SIZE;
+ ptr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ remap_offset = remap_size;
+ remap_size = min((unsigned long)mfc_mem_data_size(1),
+ user_size - remap_offset);
+
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /*
+ * Port 1 mapping for frame buf (luma)
+ */
+ ptr = (void *)mfc_mem_data_base(1);
+ start = remap_offset;
+ size = remap_size;
+ while (size > 0) {
+ pfn = vmalloc_to_pfn(ptr);
+ if (remap_pfn_range(vma, vma->vm_start + start, pfn,
+ PAGE_SIZE, vma->vm_page_prot)) {
+
+ mfc_err("failed to remap port 1\n");
+ return -EAGAIN;
+ }
+
+ start += PAGE_SIZE;
+ ptr += PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+ }
+
+ mfc_ctx->userbase = vma->vm_start;
+
+ mfc_dbg("user request mem = %ld, available data mem = %ld\n",
+ user_size, real_size);
+
+ if ((remap_offset + remap_size) < real_size)
+ mfc_warn("The MFC reserved memory dose not mmap fully [%ld: %ld]\n",
+ real_size, (remap_offset + remap_size));
+#endif /* end of CONFIG_VIDEO_MFC_VCM_UMP */
+#else /* not SYSMMU_MFC_ON */
+ /* early allocator */
+ /* CMA or bootmem(memblock) */
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ if (mfc_ctx->buf_cache_type == NO_CACHE)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mfc_info("MFC buffers are %scacheable\n",
+ mfc_ctx->buf_cache_type ? "" : "non-");
+
+ remap_offset = 0;
+ remap_size = min((unsigned long)mfc_mem_data_size(0), user_size);
+ /*
+ * Chunk 0 mapping
+ */
+ if (remap_size <= 0) {
+ mfc_err("invalid remap size of chunk 0\n");
+ return -EINVAL;
+ }
+
+ pfn = __phys_to_pfn(mfc_mem_data_base(0));
+ if (remap_pfn_range(vma, vma->vm_start + remap_offset, pfn,
+ remap_size, vma->vm_page_prot)) {
+
+ mfc_err("failed to remap chunk 0\n");
+ return -EINVAL;
+ }
+
+ /* skip the hole between the chunk */
+ remap_offset += remap_size;
+ remap_size = min((unsigned long)mfc_mem_hole_size(),
+ user_size - remap_offset);
+
+ remap_offset += remap_size;
+ remap_size = min((unsigned long)mfc_mem_data_size(1),
+ user_size - remap_offset);
+ /*
+ * Chunk 1 mapping if it's available
+ */
+ if (remap_size > 0) {
+ pfn = __phys_to_pfn(mfc_mem_data_base(1));
+ if (remap_pfn_range(vma, vma->vm_start + remap_offset, pfn,
+ remap_size, vma->vm_page_prot)) {
+
+ mfc_err("failed to remap chunk 1\n");
+ return -EINVAL;
+ }
+ }
+#else
+ vma->vm_flags |= VM_RESERVED | VM_IO;
+ if (mfc_ctx->buf_cache_type == NO_CACHE)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mfc_info("MFC buffers are %scacheable\n",
+ mfc_ctx->buf_cache_type ? "" : "non-");
+
+ if (dev->mem_ports == 1) {
+ remap_offset = 0;
+ remap_size = min((unsigned long)mfc_mem_data_size(0), user_size);
+ /*
+ * Port 0 mapping for stream buf & frame buf (chroma + MV + luma)
+ */
+ pfn = __phys_to_pfn(mfc_mem_data_base(0));
+ if (remap_pfn_range(vma, vma->vm_start + remap_offset, pfn,
+ remap_size, vma->vm_page_prot)) {
+
+ mfc_err("failed to remap port 0\n");
+ return -EINVAL;
+ }
+ } else {
+ remap_offset = 0;
+ remap_size = min((unsigned long)mfc_mem_data_size(0), user_size);
+ /*
+ * Port 0 mapping for stream buf & frame buf (chroma + MV)
+ */
+ pfn = __phys_to_pfn(mfc_mem_data_base(0));
+ if (remap_pfn_range(vma, vma->vm_start + remap_offset, pfn,
+ remap_size, vma->vm_page_prot)) {
+
+ mfc_err("failed to remap port 0\n");
+ return -EINVAL;
+ }
+
+ remap_offset = remap_size;
+ remap_size = min((unsigned long)mfc_mem_data_size(1),
+ user_size - remap_offset);
+ /*
+ * Port 1 mapping for frame buf (luma)
+ */
+ pfn = __phys_to_pfn(mfc_mem_data_base(1));
+ if (remap_pfn_range(vma, vma->vm_start + remap_offset, pfn,
+ remap_size, vma->vm_page_prot)) {
+
+ mfc_err("failed to remap port 1\n");
+ return -EINVAL;
+ }
+ }
+#endif
+ mfc_ctx->userbase = vma->vm_start;
+
+ mfc_dbg("user request mem = %ld, available data mem = %ld\n",
+ user_size, real_size);
+
+ if ((remap_offset + remap_size) < real_size)
+ mfc_warn("The MFC reserved memory dose not mmap fully [%ld: %ld]\n",
+ real_size, (remap_offset + remap_size));
+#endif /* end of SYSMMU_MFC_ON */
+ return 0;
+}
+
+static const struct file_operations mfc_fops = {
+ .owner = THIS_MODULE,
+ .open = mfc_open,
+ .release = mfc_release,
+ .unlocked_ioctl = mfc_ioctl,
+ .mmap = mfc_mmap,
+};
+
+static struct miscdevice mfc_miscdev = {
+ .minor = MFC_MINOR,
+ .name = MFC_DEV_NAME,
+ .fops = &mfc_fops,
+};
+
+static void mfc_firmware_request_complete_handler(const struct firmware *fw,
+ void *context)
+{
+ if (fw != NULL) {
+ mfcdev->fw.info = fw;
+
+ mfcdev->fw.state = mfc_load_firmware(mfcdev->fw.info->data,
+ mfcdev->fw.info->size);
+ if (mfcdev->fw.state)
+ printk(KERN_INFO "MFC F/W loaded successfully (size: %d)\n", fw->size);
+ else
+ printk(KERN_ERR "failed to load MFC F/W, MFC will not working\n");
+ } else {
+ printk(KERN_INFO "failed to copy MFC F/W during init\n");
+ }
+
+ mfcdev->fw.requesting = 0;
+}
+
+static int proc_read_inst_number(char *buf, char **start,
+ off_t off, int count,
+ int *eof, void *data)
+{
+ int len = 0;
+
+ len += sprintf(buf + len, "%d\n", atomic_read(&mfcdev->inst_cnt));
+
+ return len;
+}
+
+/* FIXME: check every exception case (goto) */
+static int __devinit mfc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ mfcdev = kzalloc(sizeof(struct mfc_dev), GFP_KERNEL);
+ if (unlikely(mfcdev == NULL)) {
+ dev_err(&pdev->dev, "failed to allocate control memory\n");
+ return -ENOMEM;
+ }
+
+ mfc_proc_entry = proc_mkdir(MFC_PROC_ROOT, NULL);
+
+ if (!mfc_proc_entry) {
+ dev_err(&pdev->dev, "unable to create /proc/%s\n",
+ MFC_PROC_ROOT);
+ kfree(mfcdev);
+ return -ENOMEM;
+ }
+
+ if (!create_proc_read_entry(MFC_PROC_TOTAL_INSTANCE_NUMBER, 0,
+ mfc_proc_entry, proc_read_inst_number, NULL)) {
+ dev_err(&pdev->dev, "unable to create /proc/%s/%s\n",
+ MFC_PROC_ROOT, MFC_PROC_TOTAL_INSTANCE_NUMBER);
+ ret = -ENOMEM;
+ goto err_proc;
+ }
+
+ /* init. control structure */
+ sprintf(mfcdev->name, "%s", MFC_DEV_NAME);
+
+ mutex_init(&mfcdev->lock);
+ init_waitqueue_head(&mfcdev->wait_sys);
+ init_waitqueue_head(&mfcdev->wait_codec[0]);
+ init_waitqueue_head(&mfcdev->wait_codec[1]);
+#if SUPPORT_SLICE_ENCODING
+ init_waitqueue_head(&mfcdev->wait_slice);
+ init_waitqueue_head(&mfcdev->wait_frame);
+#endif
+ atomic_set(&mfcdev->inst_cnt, 0);
+#if defined(CONFIG_BUSFREQ)
+ atomic_set(&mfcdev->busfreq_lock_cnt, 0);
+#endif
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_EXYNOS4_CPUFREQ)
+ atomic_set(&mfcdev->cpufreq_lock_cnt, 0);
+ mfcdev->cpufreq_level = 0;
+#endif
+#ifdef CONFIG_BUSFREQ_OPP
+ atomic_set(&mfcdev->dmcthreshold_lock_cnt, 0);
+#endif
+ mfcdev->device = &pdev->dev;
+#if SUPPORT_SLICE_ENCODING
+ mfcdev->slice_encoding_flag = 0;
+ mfcdev->slice_sys = 0;
+ mfcdev->frame_sys = 0;
+ mfcdev->wait_slice_timeout = 0;
+ mfcdev->wait_frame_timeout = 0;
+#endif
+
+ platform_set_drvdata(pdev, mfcdev);
+
+ /* get the memory region */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "no memory resource specified\n");
+ ret = -ENOENT;
+ goto err_mem_res;
+ }
+
+ mfcdev->reg.rsrc_start = res->start;
+ mfcdev->reg.rsrc_len = resource_size(res);
+
+ /* request mem region for MFC register (0x0000 ~ 0xE008) */
+ res = request_mem_region(mfcdev->reg.rsrc_start,
+ mfcdev->reg.rsrc_len, pdev->name);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "failed to get memory region\n");
+ ret = -ENOENT;
+ goto err_mem_req;
+ }
+
+ /* ioremap for MFC register */
+ mfcdev->reg.base = ioremap(mfcdev->reg.rsrc_start, mfcdev->reg.rsrc_len);
+
+ if (unlikely(!mfcdev->reg.base)) {
+ dev_err(&pdev->dev, "failed to ioremap memory region\n");
+ ret = -EINVAL;
+ goto err_mem_map;
+ }
+
+ init_reg(mfcdev->reg.base);
+
+ mfcdev->irq = platform_get_irq(pdev, 0);
+ if (unlikely(mfcdev->irq < 0)) {
+ dev_err(&pdev->dev, "no irq resource specified\n");
+ ret = -ENOENT;
+ goto err_irq_res;
+ }
+
+ ret = request_irq(mfcdev->irq, mfc_irq, IRQF_DISABLED, mfcdev->name, mfcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to allocate irq (%d)\n", ret);
+ goto err_irq_req;
+ }
+
+ /*
+ * initialize PM(power, clock) interface
+ */
+ ret = mfc_init_pm(mfcdev);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to init. MFC PM interface\n");
+ goto err_pm_if;
+ }
+
+ /*
+ * initialize memory manager
+ */
+ ret = mfc_init_mem_mgr(mfcdev);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to init. MFC memory manager\n");
+ goto err_mem_mgr;
+ }
+
+ /*
+ * loading firmware
+ */
+ mfcdev->fw.requesting = 1;
+ ret = request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_HOTPLUG,
+ MFC_FW_NAME,
+ &pdev->dev,
+ GFP_KERNEL,
+ pdev,
+ mfc_firmware_request_complete_handler);
+ if (ret) {
+ mfcdev->fw.requesting = 0;
+ dev_err(&pdev->dev, "could not load firmware (err=%d)\n", ret);
+ goto err_fw_req;
+ }
+
+#if defined(SYSMMU_MFC_ON) && defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ ret = vcm_activate(mfcdev->vcm_info.sysmmu_vcm);
+ if (ret < 0) {
+ mfc_err("failed to activate VCM: %d", ret);
+
+ goto err_act_vcm;
+ }
+#endif
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* To lock bus frequency in OPP mode */
+ mfcdev->bus_dev = dev_get("exynos-busfreq");
+#endif
+ /*
+ * initialize buffer manager
+ */
+ ret = mfc_init_buf();
+ if (ret < 0) {
+ printk(KERN_ERR "failed to init. MFC buffer manager\n");
+ goto err_buf_mgr;
+ }
+
+ /* FIXME: final dec & enc */
+ mfc_init_decoders();
+ mfc_init_encoders();
+
+ ret = misc_register(&mfc_miscdev);
+ if (ret) {
+ mfc_err("MFC can't misc register on minor=%d\n", MFC_MINOR);
+ goto err_misc_reg;
+ }
+
+ if ((soc_is_exynos4212() && (samsung_rev() < EXYNOS4212_REV_1_0)) ||
+ (soc_is_exynos4412() && (samsung_rev() < EXYNOS4412_REV_1_1)))
+ mfc_pd_enable();
+
+ disable_irq(mfcdev->irq);
+
+ mfc_info("MFC(Multi Function Codec - FIMV v5.x) registered successfully\n");
+
+ return 0;
+
+err_misc_reg:
+ mfc_final_buf();
+
+err_buf_mgr:
+#ifdef SYSMMU_MFC_ON
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ mfc_clock_on(mfcdev);
+
+ vcm_deactivate(mfcdev->vcm_info.sysmmu_vcm);
+
+ mfc_clock_off(mfcdev);
+
+err_act_vcm:
+#endif
+ mfc_clock_on(mfcdev);
+
+ s5p_sysmmu_disable(mfcdev->device);
+
+ mfc_clock_off(mfcdev);
+#endif
+ if (mfcdev->fw.info)
+ release_firmware(mfcdev->fw.info);
+
+err_fw_req:
+ /* FIXME: make kenel dump when probe fail */
+ mfc_clock_on(mfcdev);
+
+ mfc_final_mem_mgr(mfcdev);
+
+ mfc_clock_off(mfcdev);
+
+err_mem_mgr:
+ mfc_final_pm(mfcdev);
+
+err_pm_if:
+ free_irq(mfcdev->irq, mfcdev);
+
+err_irq_req:
+err_irq_res:
+ iounmap(mfcdev->reg.base);
+
+err_mem_map:
+ release_mem_region(mfcdev->reg.rsrc_start, mfcdev->reg.rsrc_len);
+
+err_mem_req:
+err_mem_res:
+ platform_set_drvdata(pdev, NULL);
+ mutex_destroy(&mfcdev->lock);
+ remove_proc_entry(MFC_PROC_TOTAL_INSTANCE_NUMBER, mfc_proc_entry);
+err_proc:
+ remove_proc_entry(MFC_PROC_ROOT, NULL);
+ kfree(mfcdev);
+
+ return ret;
+}
+
+/* FIXME: check mfc_remove funtionalilty */
+static int __devexit mfc_remove(struct platform_device *pdev)
+{
+ struct mfc_dev *dev = platform_get_drvdata(pdev);
+
+ /* FIXME: close all instance? or check active instance? */
+
+ misc_deregister(&mfc_miscdev);
+
+ mfc_final_buf();
+#ifdef SYSMMU_MFC_ON
+ mfc_clock_on(mfcdev);
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ vcm_deactivate(mfcdev->vcm_info.sysmmu_vcm);
+#endif
+
+ s5p_sysmmu_disable(mfcdev->device);
+
+ mfc_clock_off(mfcdev);
+#endif
+ if (dev->fw.info)
+ release_firmware(dev->fw.info);
+ mfc_final_mem_mgr(dev);
+ mfc_final_pm(dev);
+ free_irq(dev->irq, dev);
+ iounmap(dev->reg.base);
+ release_mem_region(dev->reg.rsrc_start, dev->reg.rsrc_len);
+ platform_set_drvdata(pdev, NULL);
+ mutex_destroy(&dev->lock);
+ remove_proc_entry(MFC_PROC_TOTAL_INSTANCE_NUMBER, mfc_proc_entry);
+ remove_proc_entry(MFC_PROC_ROOT, NULL);
+ kfree(dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int mfc_suspend(struct device *dev)
+{
+ struct mfc_dev *m_dev = platform_get_drvdata(to_platform_device(dev));
+ int ret;
+
+ if (atomic_read(&m_dev->inst_cnt) == 0)
+ return 0;
+
+ mutex_lock(&m_dev->lock);
+
+ ret = mfc_sleep(m_dev);
+
+ mutex_unlock(&m_dev->lock);
+
+ if (ret != MFC_OK)
+ return ret;
+
+ return 0;
+}
+
+static int mfc_resume(struct device *dev)
+{
+ struct mfc_dev *m_dev = platform_get_drvdata(to_platform_device(dev));
+ int ret;
+
+ if (atomic_read(&m_dev->inst_cnt) == 0)
+ return 0;
+
+#ifdef SYSMMU_MFC_ON
+ mfc_clock_on(dev);
+
+ s5p_sysmmu_enable(dev);
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ vcm_set_pgtable_base(VCM_DEV_MFC);
+#else /* CONFIG_S5P_VMEM or kernel virtual memory allocator */
+ s5p_sysmmu_set_tablebase_pgd(dev, __pa(swapper_pg_dir));
+#endif
+
+ mfc_clock_off(mfcdev);
+#endif
+
+ mutex_lock(&m_dev->lock);
+
+ if (soc_is_exynos4210())
+ mfc_pd_enable();
+
+ ret = mfc_wakeup(m_dev);
+
+ mutex_unlock(&m_dev->lock);
+
+ if (ret != MFC_OK)
+ return ret;
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int mfc_runtime_suspend(struct device *dev)
+{
+ struct mfc_dev *m_dev = platform_get_drvdata(to_platform_device(dev));
+
+ atomic_set(&m_dev->pm.power, 0);
+
+ return 0;
+}
+
+static int mfc_runtime_idle(struct device *dev)
+{
+ return 0;
+}
+
+static int mfc_runtime_resume(struct device *dev)
+{
+ struct mfc_dev *m_dev = platform_get_drvdata(to_platform_device(dev));
+ int pre_power;
+
+ pre_power = atomic_read(&m_dev->pm.power);
+ atomic_set(&m_dev->pm.power, 1);
+
+#ifdef SYSMMU_MFC_ON
+ if (pre_power == 0) {
+ mfc_clock_on(dev);
+
+ s5p_sysmmu_enable(dev);
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ vcm_set_pgtable_base(VCM_DEV_MFC);
+#else /* CONFIG_S5P_VMEM or kernel virtual memory allocator */
+ s5p_sysmmu_set_tablebase_pgd(dev, __pa(swapper_pg_dir));
+#endif
+
+ mfc_clock_off(dev);
+ }
+#endif
+
+ return 0;
+}
+#endif
+
+#else
+#define mfc_suspend NULL
+#define mfc_resume NULL
+#ifdef CONFIG_PM_RUNTIME
+#define mfc_runtime_idle NULL
+#define mfc_runtime_suspend NULL
+#define mfc_runtime_resume NULL
+#endif
+#endif
+
+static const struct dev_pm_ops mfc_pm_ops = {
+ .suspend = mfc_suspend,
+ .resume = mfc_resume,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_idle = mfc_runtime_idle,
+ .runtime_suspend = mfc_runtime_suspend,
+ .runtime_resume = mfc_runtime_resume,
+#endif
+};
+
+static struct platform_driver mfc_driver = {
+ .probe = mfc_probe,
+ .remove = __devexit_p(mfc_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = MFC_DEV_NAME,
+ .pm = &mfc_pm_ops,
+ },
+};
+
+static int __init mfc_init(void)
+{
+ if (platform_driver_register(&mfc_driver) != 0) {
+ printk(KERN_ERR "FIMV MFC platform device registration failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit mfc_exit(void)
+{
+ platform_driver_unregister(&mfc_driver);
+ mfc_info("FIMV MFC(Multi Function Codec) V5.x exit\n");
+}
+
+module_init(mfc_init);
+module_exit(mfc_exit);
+
+MODULE_AUTHOR("Jeongtae, Park");
+MODULE_AUTHOR("Jaeryul, Oh");
+MODULE_DESCRIPTION("FIMV MFC(Multi Function Codec) V5.x Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/mfc5x/mfc_dev.h b/drivers/media/video/samsung/mfc5x/mfc_dev.h
new file mode 100644
index 0000000..c82c26c
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_dev.h
@@ -0,0 +1,130 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_dev.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Driver interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_DEV_H
+#define __MFC_DEV_H __FILE__
+
+#include <linux/mutex.h>
+#include <linux/firmware.h>
+
+#include "mfc_inst.h"
+
+#define MFC_DEV_NAME "s3c-mfc"
+#define MFC_NAME_LEN 16
+
+struct mfc_reg {
+ resource_size_t rsrc_start;
+ resource_size_t rsrc_len;
+ void __iomem *base;
+};
+
+struct mfc_pm {
+ char pd_name[MFC_NAME_LEN];
+ char clk_name[MFC_NAME_LEN];
+ struct clk *clock;
+ atomic_t power;
+#ifdef CONFIG_PM_RUNTIME
+ struct device *device;
+#endif
+};
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+struct mfc_vcm {
+ struct vcm *sysmmu_vcm;
+ unsigned long *sysmmu_pgd;
+};
+#endif
+
+struct mfc_mem {
+ unsigned long base; /* phys. or virt. addr for MFC */
+ size_t size; /* total size */
+ unsigned char *addr; /* kernel virtual address space */
+#if (defined(SYSMMU_MFC_ON) && !defined(CONFIG_VIDEO_MFC_VCM_UMP) && !defined(CONFIG_S5P_VMEM))
+ void *vmalloc_addr; /* not aligned vmalloc alloc. addr */
+#endif
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ struct vcm_res *vcm_s;
+#endif
+};
+
+struct mfc_fw {
+ const struct firmware *info;
+ int requesting;
+ int state;
+ int ver;
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ struct vcm_mmu_res *vcm_s;
+ struct vcm_res *vcm_k;
+#elif defined(CONFIG_S5P_VMEM)
+ int vmem_cookie;
+#endif
+};
+
+struct mfc_dev {
+ char name[MFC_NAME_LEN];
+ struct mfc_reg reg;
+ int irq;
+ struct mfc_pm pm;
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ struct mfc_vcm vcm_info;
+#endif
+ int mem_ports;
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ struct mfc_mem mem_infos[MFC_MAX_MEM_CHUNK_NUM];
+ struct mfc_mem drm_info;
+#else
+ struct mfc_mem mem_infos[MFC_MAX_MEM_PORT_NUM];
+#endif
+
+ atomic_t inst_cnt;
+ struct mfc_inst_ctx *inst_ctx[MFC_MAX_INSTANCE_NUM];
+
+ struct mutex lock;
+ wait_queue_head_t wait_sys;
+ int irq_sys;
+ /* FIXME: remove or use 2 codec channel */
+ wait_queue_head_t wait_codec[2];
+ int irq_codec[2];
+
+ struct mfc_fw fw;
+
+ struct s5p_vcm_mmu *_vcm_mmu;
+
+ struct device *device;
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ struct device *bus_dev;
+#endif
+#if defined(CONFIG_BUSFREQ)
+ atomic_t busfreq_lock_cnt; /* Bus frequency Lock count */
+#endif
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_EXYNOS4_CPUFREQ)
+ atomic_t cpufreq_lock_cnt; /* CPU frequency Lock count */
+ int cpufreq_level; /* CPU frequency leve */
+#endif
+#ifdef CONFIG_BUSFREQ_OPP
+ atomic_t dmcthreshold_lock_cnt; /* dmc max threshold Lock count */
+#endif
+#if SUPPORT_SLICE_ENCODING
+ int slice_encoding_flag;
+ wait_queue_head_t wait_slice;
+ int slice_sys;
+ int wait_slice_timeout;
+ int frame_working_flag;
+ wait_queue_head_t wait_frame;
+ int frame_sys;
+ int wait_frame_timeout;
+#endif
+};
+
+#endif /* __MFC_DEV_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_enc.c b/drivers/media/video/samsung/mfc5x/mfc_enc.c
new file mode 100644
index 0000000..53f24c9
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_enc.c
@@ -0,0 +1,1792 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_enc.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Encoder interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#if defined(CONFIG_BUSFREQ) || defined(CONFIG_EXYNOS4_CPUFREQ)
+#include <mach/cpufreq.h>
+#endif
+#include <mach/regs-mfc.h>
+
+#include "mfc_enc.h"
+#include "mfc_cmd.h"
+#include "mfc_log.h"
+
+#include "mfc_shm.h"
+#include "mfc_reg.h"
+#include "mfc_mem.h"
+#include "mfc_buf.h"
+#include "mfc_interface.h"
+
+static LIST_HEAD(mfc_encoders);
+
+/*
+ * [1] alloc_ctx_buf() implementations
+ */
+ static int alloc_ctx_buf(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_CTX_SIZE, ALIGN_2KB, MBT_CTX | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc context buffer\n");
+
+ return -1;
+ }
+
+ ctx->ctxbufofs = mfc_mem_base_ofs(alloc->real) >> 11;
+ ctx->ctxbufsize = alloc->size;
+
+ memset((void *)alloc->addr, 0, alloc->size);
+
+ mfc_mem_cache_clean((void *)alloc->addr, alloc->size);
+
+ return 0;
+}
+
+/*
+ * [2] get_init_arg() implementations
+ */
+int get_init_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ struct mfc_enc_init_arg *init_arg;
+ struct mfc_enc_ctx *enc_ctx;
+ unsigned int reg;
+
+ init_arg = (struct mfc_enc_init_arg *)arg;
+ enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ enc_ctx->inputformat = init_arg->cmn.in_frame_map;
+
+ /* Check input stream mode NV12_LINEAR OR NV12_TILE */
+ if (init_arg->cmn.in_frame_map == NV12_TILE)
+ enc_ctx->framemap = 3; /* MFC_ENC_MAP_FOR_CUR 0: Linear mode 3: Tile mode */
+ else
+ enc_ctx->framemap = 0; /* Default is Linear mode */
+#if SUPPORT_SLICE_ENCODING
+ enc_ctx->outputmode = init_arg->cmn.in_output_mode ? 1 : 0;
+#endif
+
+ /* width */
+ write_reg(init_arg->cmn.in_width, MFC_ENC_HSIZE_PX);
+ /* height */
+ write_reg(init_arg->cmn.in_height, MFC_ENC_VSIZE_PX);
+
+ /* FIXME: MFC_B_RECON_*_ADR */
+ write_reg(0, MFC_ENC_B_RECON_WRITE_ON);
+
+ /* multi-slice control 0 / 1 / 3 */
+ /* multi-slice MB number or multi-slice bit size */
+ if (init_arg->cmn.in_ms_mode == 1) {
+ write_reg((0 << 1) | 0x1, MFC_ENC_MSLICE_CTRL);
+ write_reg(init_arg->cmn.in_ms_arg & 0xFFFF, MFC_ENC_MSLICE_MB);
+ } else if (init_arg->cmn.in_ms_mode == 2) {
+ write_reg((1 << 1) | 0x1, MFC_ENC_MSLICE_CTRL);
+ if (init_arg->cmn.in_ms_arg < 1900)
+ init_arg->cmn.in_ms_arg = 1900;
+ write_reg(init_arg->cmn.in_ms_arg, MFC_ENC_MSLICE_BIT);
+ } else {
+ write_reg(0, MFC_ENC_MSLICE_CTRL);
+ write_reg(0, MFC_ENC_MSLICE_MB);
+ write_reg(0, MFC_ENC_MSLICE_BIT);
+ }
+#if SUPPORT_SLICE_ENCODING
+ /* slice interface */
+ write_reg((enc_ctx->outputmode) << 31, MFC_ENC_SI_CH1_INPUT_FLUSH);
+#endif
+
+ /* cyclic intra refresh */
+ write_reg(init_arg->cmn.in_mb_refresh & 0xFFFF, MFC_ENC_CIR_CTRL);
+ /* memory structure of the current frame - 0 -> Linear or 3 -> Tile mode */
+ write_reg(enc_ctx->framemap, MFC_ENC_MAP_FOR_CUR);
+
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ if (init_arg->cmn.in_frame_map == NV21_LINEAR)
+ write_reg(1, MFC_ENC_NV21_SEL);
+ else if (init_arg->cmn.in_frame_map == NV12_LINEAR)
+ write_reg(0, MFC_ENC_NV21_SEL);
+#endif
+
+ /* padding control & value */
+ reg = read_reg(MFC_ENC_PADDING_CTRL);
+ if (init_arg->cmn.in_pad_ctrl_on > 0) {
+ /** enable */
+ reg |= (1 << 31);
+ /** cr value */
+ reg &= ~(0xFF << 16);
+ reg |= ((init_arg->cmn.in_cr_pad_val & 0xFF) << 16);
+ /** cb value */
+ reg &= ~(0xFF << 8);
+ reg |= ((init_arg->cmn.in_cb_pad_val & 0xFF) << 8);
+ /** y value */
+ reg &= ~(0xFF << 0);
+ reg |= ((init_arg->cmn.in_y_pad_val & 0xFF) << 0);
+ } else {
+ /** disable & all value clear */
+ reg = 0;
+ }
+ write_reg(reg, MFC_ENC_PADDING_CTRL);
+
+ /* reaction coefficient */
+ if (init_arg->cmn.in_rc_fr_en > 0) {
+ if (init_arg->cmn.in_rc_rpara != 0)
+ write_reg(init_arg->cmn.in_rc_rpara & 0xFFFF, MFC_ENC_RC_RPARA);
+ } else {
+ write_reg(0, MFC_ENC_RC_RPARA);
+ }
+
+ /* FIXME: update shm parameters? */
+
+ return 0;
+}
+
+int h263_get_init_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ struct mfc_enc_init_arg *init_arg;
+ struct mfc_enc_init_h263_arg *init_h263_arg;
+ unsigned int reg;
+ unsigned int shm;
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;;
+
+ get_init_arg(ctx, arg);
+
+ init_arg = (struct mfc_enc_init_arg *)arg;
+ init_h263_arg = &init_arg->codec.h263;
+
+ enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ enc_ctx->numdpb = 2;
+
+ /* pictype : number of B, IDR period */
+ reg = read_reg(MFC_ENC_PIC_TYPE_CTRL);
+ /** enable - 0 / 1*/
+ reg |= (1 << 18);
+ /** numbframe - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ /** idrperiod - 0 ~ */
+ reg &= ~(0xFFFF << 0);
+ reg |= ((init_arg->cmn.in_gop_num & 0xFFFF) << 0);
+ write_reg(reg, MFC_ENC_PIC_TYPE_CTRL);
+
+ /* rate control config. */
+ reg = read_reg(MFC_ENC_RC_CONFIG);
+ /** frame-level rate control */
+ reg &= ~(0x1 << 9);
+ reg |= ((init_arg->cmn.in_rc_fr_en & 0x1) << 9);
+ /** macroblock-level rate control */
+ reg &= ~(0x1 << 8);
+ /** frame QP */
+ if (init_arg->cmn.in_vop_quant < 1)
+ init_arg->cmn.in_vop_quant = 1;
+ else if (init_arg->cmn.in_vop_quant > 31)
+ init_arg->cmn.in_vop_quant = 31;
+ reg &= ~(0x3F << 0);
+ reg |= ((init_arg->cmn.in_vop_quant & 0x3F) << 0);
+ write_reg(reg, MFC_ENC_RC_CONFIG);
+
+ /* frame rate and bit rate */
+ if (init_arg->cmn.in_rc_fr_en > 0) {
+ if (init_h263_arg->in_rc_framerate != 0)
+ write_reg(init_h263_arg->in_rc_framerate * 1000,
+ MFC_ENC_RC_FRAME_RATE);
+
+ if (init_arg->cmn.in_rc_bitrate != 0)
+ write_reg(init_arg->cmn.in_rc_bitrate,
+ MFC_ENC_RC_BIT_RATE);
+ } else {
+ write_reg(0, MFC_ENC_RC_FRAME_RATE);
+ write_reg(0, MFC_ENC_RC_BIT_RATE);
+ }
+
+ /* max & min value of QP */
+ reg = read_reg(MFC_ENC_RC_QBOUND);
+ /** max QP */
+ if (init_arg->cmn.in_rc_qbound_max < 1)
+ init_arg->cmn.in_rc_qbound_max = 1;
+ else if (init_arg->cmn.in_rc_qbound_max > 31)
+ init_arg->cmn.in_rc_qbound_max = 31;
+ reg &= ~(0x3F << 8);
+ reg |= ((init_arg->cmn.in_rc_qbound_max & 0x3F) << 8);
+ /** min QP */
+ if (init_arg->cmn.in_rc_qbound_min < 1)
+ init_arg->cmn.in_rc_qbound_min = 1;
+ else if (init_arg->cmn.in_rc_qbound_min > 31)
+ init_arg->cmn.in_rc_qbound_min = 31;
+ if (init_arg->cmn.in_rc_qbound_min > init_arg->cmn.in_rc_qbound_max)
+ init_arg->cmn.in_rc_qbound_min = init_arg->cmn.in_rc_qbound_max;
+ reg &= ~(0x3F << 0);
+ reg |= ((init_arg->cmn.in_rc_qbound_min & 0x3F) << 0);
+ write_reg(reg, MFC_ENC_RC_QBOUND);
+
+ if (init_arg->cmn.in_rc_fr_en == 0) {
+ shm = read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF << 0);
+ shm |= ((init_arg->cmn.in_vop_quant_p & 0x3F) << 0);
+ write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+
+ return 0;
+}
+
+int mpeg4_get_init_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ struct mfc_enc_init_arg *init_arg;
+ struct mfc_enc_init_mpeg4_arg *init_mpeg4_arg;
+ unsigned int reg;
+ unsigned int shm;
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ get_init_arg(ctx, arg);
+
+ init_arg = (struct mfc_enc_init_arg *)arg;
+ init_mpeg4_arg = &init_arg->codec.mpeg4;
+
+ if (init_mpeg4_arg->in_bframenum > 0)
+ enc_ctx->numdpb = 4;
+ else
+ enc_ctx->numdpb = 2;
+
+ /* profile & level */
+ reg = read_reg(MFC_ENC_PROFILE);
+ /** level */
+ reg &= ~(0xFF << 8);
+ reg |= ((init_mpeg4_arg->in_level & 0xFF) << 8);
+ /** profile - 0 ~ 2 */
+ reg &= ~(0x3 << 0);
+ reg |= ((init_mpeg4_arg->in_profile & 0x3) << 0);
+ write_reg(reg, MFC_ENC_PROFILE);
+
+ /* pictype : number of B, IDR period */
+ reg = read_reg(MFC_ENC_PIC_TYPE_CTRL);
+ /** enable - 0 / 1*/
+ reg |= (1 << 18);
+ /** numbframe - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= ((init_mpeg4_arg->in_bframenum & 0x3) << 16);
+ /** idrperiod - 0 ~ */
+ reg &= ~(0xFFFF << 0);
+ reg |= ((init_arg->cmn.in_gop_num & 0xFFFF) << 0);
+ write_reg(reg, MFC_ENC_PIC_TYPE_CTRL);
+
+ /* rate control config. */
+ reg = read_reg(MFC_ENC_RC_CONFIG);
+ /** frame-level rate control */
+ reg &= ~(0x1 << 9);
+ reg |= ((init_arg->cmn.in_rc_fr_en & 0x1) << 9);
+ /** macroblock-level rate control */
+ reg &= ~(0x1 << 8);
+ /** frame QP */
+ if (init_arg->cmn.in_vop_quant < 1)
+ init_arg->cmn.in_vop_quant = 1;
+ else if (init_arg->cmn.in_vop_quant > 31)
+ init_arg->cmn.in_vop_quant = 31;
+ reg &= ~(0x3F << 0);
+ reg |= ((init_arg->cmn.in_vop_quant & 0x3F) << 0);
+ write_reg(reg, MFC_ENC_RC_CONFIG);
+
+ /* frame rate and bit rate */
+ if (init_arg->cmn.in_rc_fr_en > 0) {
+ if (init_mpeg4_arg->in_VopTimeIncreament > 0)
+ write_reg((init_mpeg4_arg->in_TimeIncreamentRes /
+ init_mpeg4_arg->in_VopTimeIncreament) * 1000,
+ MFC_ENC_RC_FRAME_RATE);
+
+ if (init_arg->cmn.in_rc_bitrate != 0)
+ write_reg(init_arg->cmn.in_rc_bitrate,
+ MFC_ENC_RC_BIT_RATE);
+ } else {
+ write_reg(0, MFC_ENC_RC_FRAME_RATE);
+ write_reg(0, MFC_ENC_RC_BIT_RATE);
+ }
+
+ /* max & min value of QP */
+ reg = read_reg(MFC_ENC_RC_QBOUND);
+ /** max QP */
+ if (init_arg->cmn.in_rc_qbound_max < 1)
+ init_arg->cmn.in_rc_qbound_max = 1;
+ else if (init_arg->cmn.in_rc_qbound_max > 31)
+ init_arg->cmn.in_rc_qbound_max = 31;
+ reg &= ~(0x3F << 8);
+ reg |= ((init_arg->cmn.in_rc_qbound_max & 0x3F) << 8);
+ /** min QP */
+ if (init_arg->cmn.in_rc_qbound_min < 1)
+ init_arg->cmn.in_rc_qbound_min = 1;
+ else if (init_arg->cmn.in_rc_qbound_min > 31)
+ init_arg->cmn.in_rc_qbound_min = 31;
+ if (init_arg->cmn.in_rc_qbound_min > init_arg->cmn.in_rc_qbound_max)
+ init_arg->cmn.in_rc_qbound_min = init_arg->cmn.in_rc_qbound_max;
+ reg &= ~(0x3F << 0);
+ reg |= ((init_arg->cmn.in_rc_qbound_min & 0x3F) << 0);
+ write_reg(reg, MFC_ENC_RC_QBOUND);
+
+ write_reg(init_mpeg4_arg->in_quart_pixel, MFC_ENC_MPEG4_QUART_PXL);
+
+ if (init_arg->cmn.in_rc_fr_en == 0) {
+ shm = read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF << 0);
+ shm |= ((init_mpeg4_arg->in_vop_quant_b & 0x3F) << 6);
+ shm |= ((init_arg->cmn.in_vop_quant_p & 0x3F) << 0);
+ write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+
+ return 0;
+}
+
+int h264_get_init_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ struct mfc_enc_init_arg *init_arg;
+ struct mfc_enc_init_h264_arg *init_h264_arg;
+ unsigned int reg;
+ unsigned int shm;
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ get_init_arg(ctx, arg);
+
+ init_arg = (struct mfc_enc_init_arg *)arg;
+ init_h264_arg = &init_arg->codec.h264;
+
+ if ((init_h264_arg->in_bframenum > 0) || (init_h264_arg->in_ref_num_p > 1))
+ enc_ctx->numdpb = 4;
+ else
+ enc_ctx->numdpb = 2;
+
+ /* height */
+ if (init_h264_arg->in_interlace_mode)
+ write_reg(init_arg->cmn.in_height >> 1, MFC_ENC_VSIZE_PX);
+ else
+ write_reg(init_arg->cmn.in_height, MFC_ENC_VSIZE_PX);
+
+ /* profile & level */
+ reg = read_reg(MFC_ENC_PROFILE);
+ /** level */
+ reg &= ~(0xFF << 8);
+ reg |= ((init_h264_arg->in_level & 0xFF) << 8);
+ /** profile - 0 ~ 2 */
+ reg &= ~(0x3 << 0);
+ reg |= ((init_h264_arg->in_profile & 0x3) << 0);
+ /* set constraint_set0_flag */
+ reg |= (1 << 3);
+ write_reg(reg, MFC_ENC_PROFILE);
+
+ /* interface - 0 / 1 */
+ write_reg(init_h264_arg->in_interlace_mode & 0x1, MFC_ENC_PIC_STRUCT);
+
+ /* loopfilter disable - 0 ~ 2 */
+ write_reg((init_h264_arg->in_deblock_dis & 0x3), MFC_ENC_LF_CTRL);
+
+ /* loopfilter alpha & C0 offset - -6 ~ 6 */
+ write_reg((init_h264_arg->in_deblock_alpha_c0 & 0x1F) * 2, MFC_ENC_ALPHA_OFF);
+
+ /* loopfilter beta offset - -6 ~ 6 */
+ write_reg((init_h264_arg->in_deblock_beta & 0x1F) * 2, MFC_ENC_BETA_OFF);
+
+ /* pictype : number of B, IDR period */
+ reg = read_reg(MFC_ENC_PIC_TYPE_CTRL);
+ /** enable - 0 / 1*/
+ reg |= (1 << 18);
+ /** numbframe - 0 ~ 2 */
+ reg &= ~(0x3 << 16);
+ reg |= ((init_h264_arg->in_bframenum & 0x3) << 16);
+ /** idrperiod - 0 ~ */
+ reg &= ~(0xFFFF << 0);
+ reg |= ((init_arg->cmn.in_gop_num & 0xFFFF) << 0);
+ write_reg(reg, MFC_ENC_PIC_TYPE_CTRL);
+
+ /* rate control config. */
+ reg = read_reg(MFC_ENC_RC_CONFIG);
+ /** frame-level rate control */
+ reg &= ~(0x1 << 9);
+ reg |= ((init_arg->cmn.in_rc_fr_en & 0x1) << 9);
+ /** macroblock-level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= ((init_h264_arg->in_rc_mb_en & 0x1) << 8);
+ /** frame QP */
+ if (init_arg->cmn.in_vop_quant < 1)
+ init_arg->cmn.in_vop_quant = 1;
+ else if (init_arg->cmn.in_vop_quant > 51)
+ init_arg->cmn.in_vop_quant = 51;
+ reg &= ~(0x3F << 0);
+ reg |= ((init_arg->cmn.in_vop_quant & 0x3F) << 0);
+ write_reg(reg, MFC_ENC_RC_CONFIG);
+
+ /* frame rate and bit rate */
+ if (init_arg->cmn.in_rc_fr_en > 0) {
+ if (init_h264_arg->in_rc_framerate != 0)
+ write_reg(init_h264_arg->in_rc_framerate * 1000,
+ MFC_ENC_RC_FRAME_RATE);
+
+ if (init_arg->cmn.in_rc_bitrate != 0)
+ write_reg(init_arg->cmn.in_rc_bitrate,
+ MFC_ENC_RC_BIT_RATE);
+ } else {
+ write_reg(0, MFC_ENC_RC_FRAME_RATE);
+ write_reg(0, MFC_ENC_RC_BIT_RATE);
+ }
+
+ /* max & min value of QP */
+ reg = read_reg(MFC_ENC_RC_QBOUND);
+ /** max QP */
+ if (init_arg->cmn.in_rc_qbound_max < 1)
+ init_arg->cmn.in_rc_qbound_max = 1;
+ else if (init_arg->cmn.in_rc_qbound_max > 51)
+ init_arg->cmn.in_rc_qbound_max = 51;
+ reg &= ~(0x3F << 8);
+ reg |= ((init_arg->cmn.in_rc_qbound_max & 0x3F) << 8);
+ /** min QP */
+ if (init_arg->cmn.in_rc_qbound_min < 1)
+ init_arg->cmn.in_rc_qbound_min = 1;
+ else if (init_arg->cmn.in_rc_qbound_min > 51)
+ init_arg->cmn.in_rc_qbound_min = 51;
+ if (init_arg->cmn.in_rc_qbound_min > init_arg->cmn.in_rc_qbound_max)
+ init_arg->cmn.in_rc_qbound_min = init_arg->cmn.in_rc_qbound_max;
+ reg &= ~(0x3F << 0);
+ reg |= ((init_arg->cmn.in_rc_qbound_min & 0x3F) << 0);
+ write_reg(reg, MFC_ENC_RC_QBOUND);
+
+ /* macroblock adaptive scaling features */
+ if (init_h264_arg->in_rc_mb_en > 0) {
+ reg = read_reg(MFC_ENC_RC_MB_CTRL);
+ /** dark region */
+ reg &= ~(0x1 << 3);
+ reg |= ((init_h264_arg->in_rc_mb_dark_dis & 0x1) << 3);
+ /** smooth region */
+ reg &= ~(0x1 << 2);
+ reg |= ((init_h264_arg->in_rc_mb_smooth_dis & 0x1) << 2);
+ /** static region */
+ reg &= ~(0x1 << 1);
+ reg |= ((init_h264_arg->in_rc_mb_static_dis & 0x1) << 1);
+ /** high activity region */
+ reg &= ~(0x1 << 0);
+ reg |= ((init_h264_arg->in_rc_mb_activity_dis & 0x1) << 0);
+ write_reg(reg, MFC_ENC_RC_MB_CTRL);
+ }
+
+ /* entropy coding mode 0: CAVLC, 1: CABAC */
+ write_reg(init_h264_arg->in_symbolmode & 0x1, MFC_ENC_H264_ENTRP_MODE);
+
+ /* number of ref. picture */
+ reg = read_reg(MFC_ENC_H264_NUM_OF_REF);
+ /** num of ref. pictures of P */
+ reg &= ~(0x3 << 5);
+ reg |= ((init_h264_arg->in_ref_num_p & 0x3) << 5);
+ write_reg(reg, MFC_ENC_H264_NUM_OF_REF);
+
+ /* 8x8 transform enable */
+ write_reg(init_h264_arg->in_transform8x8_mode & 0x1, MFC_ENC_H264_TRANS_FLAG);
+
+ if ((init_arg->cmn.in_rc_fr_en == 0) && (init_h264_arg->in_rc_mb_en == 0)) {
+ shm = read_shm(ctx, P_B_FRAME_QP);
+ shm &= ~(0xFFF << 0);
+ shm |= ((init_h264_arg->in_vop_quant_b & 0x3F) << 6);
+ shm |= ((init_arg->cmn.in_vop_quant_p & 0x3F) << 0);
+ write_shm(ctx, shm, P_B_FRAME_QP);
+ }
+
+ return 0;
+}
+
+/*
+ * [3] pre_seq_start() implementations
+ */
+static int pre_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ /* Set stream buffer addr */
+ write_reg(mfc_mem_base_ofs(enc_ctx->streamaddr) >> 11, MFC_ENC_SI_CH1_SB_ADR);
+ write_reg(enc_ctx->streamsize, MFC_ENC_SI_CH1_SB_SIZE);
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ write_shm(ctx, 1, HW_VERSRION);
+#endif
+
+ return 0;
+}
+
+static int h264_pre_seq_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ struct mfc_enc_h264 *h264 = (struct mfc_enc_h264 *)enc_ctx->e_priv;
+ unsigned int shm;
+
+ pre_seq_start(ctx);
+
+ /*
+ unsigned int reg;
+ */
+
+ #if 0
+ /* MFC fw 9/30, set the QP for P/B */
+ if (mfc_ctx->MfcCodecType == H263_ENC)
+ init_arg->in_vop_quant_b = 0;
+ write_shm((init_arg->
+ in_vop_quant_p) | (init_arg->in_vop_quant_b << 6),
+ mfc_ctx->shared_mem_vir_addr + 0x70);
+
+ /* MFC fw 11/10 */
+ if (mfc_ctx->MfcCodecType == H264_ENC) {
+ write_shm((mfc_ctx->vui_enable << 15) |
+ (mfc_ctx->hier_p_enable << 4) |
+ (mfc_ctx->frameSkipEnable << 1),
+ mfc_ctx->shared_mem_vir_addr + 0x28);
+ if (mfc_ctx->vui_enable)
+ write_shm((mfc_ctx->
+ vui_info.aspect_ratio_idc & 0xff),
+ mfc_ctx->shared_mem_vir_addr + 0x74);
+ /* MFC fw 2010/04/09 */
+ if (mfc_ctx->hier_p_enable)
+ write_shm((mfc_ctx->hier_p_qp.t3_frame_qp << 12) |
+ (mfc_ctx->hier_p_qp.t2_frame_qp << 6) |
+ (mfc_ctx->hier_p_qp.t0_frame_qp),
+ mfc_ctx->shared_mem_vir_addr + 0xe0);
+ } else
+ write_shm((mfc_ctx->frameSkipEnable << 1),
+ mfc_ctx->shared_mem_vir_addr + 0x28);
+
+ /* MFC fw 10/30, set vop_time_resolution, frame_delta */
+ if (mfc_ctx->MfcCodecType == MPEG4_ENC)
+ write_shm((1 << 31) |
+ (init_arg->in_TimeIncreamentRes << 16) |
+ (init_arg->in_VopTimeIncreament),
+ mfc_ctx->shared_mem_vir_addr + 0x30);
+
+ if ((mfc_ctx->MfcCodecType == H264_ENC)
+ && (mfc_ctx->h264_i_period_enable)) {
+ write_shm((1 << 16) | (mfc_ctx->h264_i_period),
+ mfc_ctx->shared_mem_vir_addr + 0x9c);
+ }
+ #endif
+
+ write_shm(ctx, h264->sei_gen << 1, SEI_ENABLE);
+
+ if (h264->change & CHG_FRAME_PACKING) {
+ /* change type value to meet standard */
+ shm = (h264->fp.arrangement_type - 3) & 0x3;
+ /* only valid when type is temporal interleaving (5) */
+ shm |= ((h264->fp.current_frame_is_frame0_flag & 0x1) << 2);
+ write_shm(ctx, shm, FRAME_PACK_ENC_INFO);
+
+ h264->change &= ~(CHG_FRAME_PACKING);
+ }
+
+ return 0;
+}
+
+/*
+ * [4] post_seq_start() implementations
+ */
+static int post_seq_start(struct mfc_inst_ctx *ctx)
+{
+ /*
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ int i;
+ */
+
+ /*
+ unsigned int shm;
+ */
+
+ /*
+ mfc_dbg("header size: %d", read_reg(MFC_ENC_SI_STRM_SIZE));
+
+ for (i = 0; i < read_reg(MFC_ENC_SI_STRM_SIZE); i++)
+ mfc_dbg("0x%02x", (unsigned char)(*(enc_ctx->kstrmaddr + i)));
+ */
+
+ return 0;
+}
+
+/*
+ * [5] set_init_arg() implementations
+ */
+static int set_init_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ struct mfc_enc_init_arg *init_arg = (struct mfc_enc_init_arg *)arg;
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ void *ump_handle;
+#endif
+
+ init_arg->cmn.out_header_size = read_reg(MFC_ENC_SI_STRM_SIZE);
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ init_arg->cmn.out_u_addr.strm_ref_y = 0;
+ ump_handle = mfc_get_buf_ump_handle(enc_ctx->streamaddr);
+
+ mfc_dbg("secure id: 0x%08x", mfc_ump_get_id(ump_handle));
+
+ if (ump_handle != NULL)
+ init_arg->cmn.out_u_addr.strm_ref_y = mfc_ump_get_id(ump_handle);
+ init_arg->cmn.out_u_addr.mv_ref_yc = 0;
+
+#elif defined(CONFIG_S5P_VMEM)
+ mfc_dbg("cookie: 0x%08x", s5p_getcookie((void *)(enc_ctx->streamaddr)));
+
+ init_arg->cmn.out_u_addr.strm_ref_y = s5p_getcookie((void *)(enc_ctx->streamaddr));
+ init_arg->cmn.out_u_addr.mv_ref_yc = 0;
+#else
+ init_arg->cmn.out_u_addr.strm_ref_y = mfc_mem_data_ofs(enc_ctx->streamaddr, 1);
+ init_arg->cmn.out_u_addr.mv_ref_yc = 0;
+ init_arg->cmn.out_p_addr.strm_ref_y = enc_ctx->streamaddr;
+ init_arg->cmn.out_p_addr.mv_ref_yc = 0;
+#endif
+
+ /*
+ init_arg->cmn.out_buf_size.strm_ref_y = 0;
+ init_arg->cmn.out_buf_size.mv_ref_yc = 0;
+
+ init_arg->cmn.out_p_addr.strm_ref_y = 0;
+ init_arg->cmn.out_p_addr.mv_ref_yc = 0;
+ */
+
+ return 0;
+}
+
+/*
+ * [6] set_codec_bufs() implementations
+ */
+static int set_codec_bufs(struct mfc_inst_ctx *ctx)
+{
+ return 0;
+}
+
+static int h264_set_codec_bufs(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_ENC_UPMV_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_MV_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_ENC_COLFLG_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_COLZERO_FLAG_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_ENC_INTRAMD_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_INTRA_MD_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_ENC_INTRAPRED_SIZE, ALIGN_2KB, MBT_CODEC | PORT_B);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_UP_INTRA_PRED_ADR);
+
+ alloc = _mfc_alloc_buf(ctx, MFC_ENC_NBORINFO_SIZE, ALIGN_2KB, MBT_CODEC | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc codec buffer\n");
+
+ return -1;
+ }
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_NBOR_INFO_ADR);
+
+ return 0;
+}
+
+/*
+ * [7] set_dpbs() implementations
+ */
+#if 0
+static int set_dpbs(struct mfc_inst_ctx *ctx)
+{
+ return 0;
+}
+#endif
+
+/*
+ * [8] pre_frame_start() implementations
+ */
+static int pre_frame_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ if (enc_ctx->setflag == 1) {
+ if (enc_ctx->FrameTypeCngTag == 1) {
+ mfc_dbg("Encoding Param Setting - Frame Type : %d\n", enc_ctx->forceframe);
+
+ write_reg(enc_ctx->forceframe, MFC_ENC_SI_CH1_FRAME_INS);
+ }
+
+ if (enc_ctx->FrameRateCngTag == 1) {
+ mfc_dbg("Encoding Param Setting - Frame rate : %d\n", enc_ctx->framerate);
+
+ write_shm(ctx, 1000 * enc_ctx->framerate, NEW_RC_FRAME_RATE);
+ write_shm(ctx, ((1 << 31)|(enc_ctx->framerate << 16)|(1 & 0xFFFF)), VOP_TIMING);
+ write_reg(1000 * enc_ctx->framerate, MFC_ENC_RC_FRAME_RATE);
+ write_shm(ctx, (0x1 << 1), ENC_PARAM_CHANGE);
+ }
+
+ if (enc_ctx->BitRateCngTag == 1) {
+ mfc_dbg("Encoding Param Setting - Bit rate : %d\n", enc_ctx->bitrate);
+
+ write_shm(ctx, enc_ctx->bitrate, NEW_RC_BIT_RATE);
+ write_reg(enc_ctx->bitrate, MFC_ENC_RC_BIT_RATE);
+ write_shm(ctx, (0x1 << 2), ENC_PARAM_CHANGE);
+ }
+ }
+
+ return 0;
+}
+
+static int h264_pre_frame_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ struct mfc_enc_h264 *h264 = (struct mfc_enc_h264 *)enc_ctx->e_priv;
+ unsigned int shm;
+
+ pre_frame_start(ctx);
+
+ if (h264->change & CHG_FRAME_PACKING) {
+ /* change type value to meet standard */
+ shm = (h264->fp.arrangement_type - 3) & 0x3;
+ /* only valid when type is temporal interleaving (5) */
+ shm |= ((h264->fp.current_frame_is_frame0_flag & 0x1) << 2);
+ write_shm(ctx, shm, FRAME_PACK_ENC_INFO);
+
+ h264->change &= ~(CHG_FRAME_PACKING);
+ }
+
+ return 0;
+}
+/*
+ * [9] post_frame_start() implementations
+ */
+static int post_frame_start(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ if (enc_ctx->setflag == 1) {
+ enc_ctx->setflag = 0;
+
+ enc_ctx->FrameTypeCngTag = 0;
+ enc_ctx->FrameRateCngTag = 0;
+ enc_ctx->BitRateCngTag = 0;
+
+ write_shm(ctx, 0, ENC_PARAM_CHANGE); /*RC_BIT_RATE_CHANGE = 4*/
+ write_reg(0, MFC_ENC_SI_CH1_FRAME_INS);
+ }
+
+ return 0;
+}
+
+/*
+ * [10] multi_frame_start() implementations
+ */
+static int multi_data_frame(struct mfc_inst_ctx *ctx)
+{
+ return 0;
+}
+
+/*
+ * [11] set_exe_arg() implementations
+ */
+static int set_exe_arg(struct mfc_inst_ctx *ctx, void *arg)
+{
+ return 0;
+}
+
+/*
+ * [12] get_codec_cfg() implementations
+ */
+static int get_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ /*struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;*/
+ int ret = 0;
+
+ mfc_dbg("type: 0x%08x", type);
+
+ /*
+ MFC_ENC_GETCONF_FRAME_TAG = ENC_GET,
+ ...
+ */
+
+ switch (type) {
+
+ default:
+ mfc_dbg("not common cfg, try to codec specific: 0x%08x\n", type);
+ ret = 1;
+
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * [13] set_codec_cfg() implementations
+ */
+static int set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+ int ret = 0;
+
+ mfc_dbg("type: 0x%08x", type);
+ /*
+ MFC_ENC_SETCONF_FRAME_TYPE = ENC_SET,
+ MFC_ENC_SETCONF_CHANGE_FRAME_RATE,
+ MFC_ENC_SETCONF_CHANGE_BIT_RATE,
+ MFC_ENC_SETCONF_FRAME_TAG,
+ MFC_ENC_SETCONF_ALLOW_FRAME_SKIP,
+ MFC_ENC_SETCONF_VUI_INFO,
+ MFC_ENC_SETCONF_I_PERIOD,
+ MFC_ENC_SETCONF_HIER_P,
+ ...
+ */
+
+ switch (type) {
+ case MFC_ENC_SETCONF_FRAME_TYPE:
+ mfc_dbg("MFC_ENC_SETCONF_FRAME_TYPE : %d\n", ctx->state);
+
+ if (ctx->state < INST_STATE_INIT) {
+ mfc_err("MFC_ENC_SETCONF_CHANGE_FRAME_TYPE : state is invalid\n");
+ return MFC_STATE_INVALID;
+ }
+
+ if ((usercfg->basic.values[0] >= DONT_CARE) && (usercfg->basic.values[0] <= NOT_CODED)) {
+ mfc_dbg("Frame Type : %d\n", usercfg->basic.values[0]);
+ enc_ctx->forceframe = usercfg->basic.values[0];
+ enc_ctx->FrameTypeCngTag = 1;
+ enc_ctx->setflag = 1;
+ } else {
+ mfc_warn("FRAME_TYPE should be between 0 and 2\n");
+ }
+
+ break;
+
+ case MFC_ENC_SETCONF_CHANGE_FRAME_RATE:
+ mfc_dbg("MFC_ENC_SETCONF_CHANGE_FRAME_RATE : %d\n", ctx->state);
+
+ if (ctx->state < INST_STATE_INIT) {
+ mfc_err("MFC_ENC_SETCONF_CHANGE_FRAME_RATE : state is invalid\n");
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0] > 0) {
+ mfc_dbg("Frame rate : %d\n", usercfg->basic.values[0]);
+ enc_ctx->framerate = usercfg->basic.values[0];
+ enc_ctx->FrameRateCngTag = 1;
+ enc_ctx->setflag = 1;
+ } else {
+ mfc_warn("MFCSetConfig, FRAME_RATE should be biger than 0\n");
+ }
+
+ break;
+
+ case MFC_ENC_SETCONF_CHANGE_BIT_RATE:
+ mfc_dbg("MFC_ENC_SETCONF_CHANGE_BIT_RATE : %d\n", ctx->state);
+
+ if (ctx->state < INST_STATE_INIT) {
+ mfc_err("MFC_ENC_SETCONF_CHANGE_BIT_RATE : state is invalid\n");
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0] > 0) {
+ mfc_dbg("Bit rate : %d\n", usercfg->basic.values[0]);
+ enc_ctx->bitrate = usercfg->basic.values[0];
+ enc_ctx->BitRateCngTag = 1;
+ enc_ctx->setflag = 1;
+ } else {
+ mfc_warn("MFCSetConfig, BIT_RATE should be biger than 0\n");
+ }
+
+ break;
+
+ case MFC_ENC_SETCONF_ALLOW_FRAME_SKIP:
+ mfc_dbg("MFC_ENC_SETCONF_ALLOW_FRAME_SKIP : %d\n", ctx->state);
+
+ if ((ctx->state < INST_STATE_CREATE) || (ctx->state > INST_STATE_EXE)) {
+ mfc_err("MFC_ENC_SETCONF_ALLOW_FRAME_SKIP : state is invalid\n");
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0] > 0) {
+ mfc_dbg("Allow_frame_skip enable : %d\n", usercfg->basic.values[0]);
+ enc_ctx->frame_skip_enable = usercfg->basic.values[0];
+ if (enc_ctx->frame_skip_enable == 2)
+ enc_ctx->frameskip = usercfg->basic.values[1];
+ enc_ctx->FrameSkipCngTag = 1;
+ enc_ctx->setflag = 1;
+ }
+
+ break;
+
+ case MFC_ENC_SETCONF_VUI_INFO:
+ mfc_dbg("MFC_ENC_SETCONF_VUI_INFO : %d\n", ctx->state);
+
+ if ((ctx->state < INST_STATE_CREATE) || (ctx->state > INST_STATE_EXE)) {
+ mfc_err("MFC_ENC_SETCONF_VUI_INFO_SET : state is invalid\n");
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0] > 0) {
+ mfc_dbg("VUI_info enable : %d\n", usercfg->basic.values[1]);
+ enc_ctx->vuiinfoval = usercfg->basic.values[0];
+ if (enc_ctx->vuiinfoval == 255)
+ enc_ctx->vuiextendsar = usercfg->basic.values[1];
+ enc_ctx->vui_info_enable = 1;
+ enc_ctx->VUIInfoCngTag = 1;
+ enc_ctx->setflag = 1;
+ }
+
+ break;
+
+ case MFC_ENC_SETCONF_I_PERIOD:
+ mfc_dbg("MFC_ENC_SETCONF_I_PERIOD : %d\n", ctx->state);
+
+ if ((ctx->state < INST_STATE_CREATE) || (ctx->state > INST_STATE_EXE)) {
+ mfc_err("MFC_ENC_SETCONF_I_PERIOD_CHANGE : state is invalid\n");
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0]) {
+ mfc_dbg("I_PERIOD value : %d\n", usercfg->basic.values[0]);
+ enc_ctx->iperiodval = usercfg->basic.values[0];
+ enc_ctx->IPeriodCngTag = 1;
+ enc_ctx->setflag = 1;
+ }
+
+ break;
+
+ case MFC_ENC_SETCONF_HIER_P:
+ mfc_dbg("MFC_ENC_SETCONF_FRAME_TYPE : %d\n", ctx->state);
+
+ if ((ctx->state < INST_STATE_CREATE) || (ctx->state > INST_STATE_EXE)) {
+ mfc_err("MFC_ENC_SETCONF_HIER_P_SET : state is invalid\n");
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0]) {
+ mfc_dbg("HIER_P enable : %d\n", usercfg->basic.values[0]);
+ enc_ctx->hier_p_enable = usercfg->basic.values[0];
+ enc_ctx->HierPCngTag = 1;
+ enc_ctx->setflag = 1;
+ }
+
+ break;
+
+ default:
+ mfc_dbg("not common cfg, try to codec specific: 0x%08x\n", type);
+ ret = 1;
+
+ break;
+ }
+
+ return ret;
+}
+
+static int h264_set_codec_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ struct mfc_enc_h264 *h264 = (struct mfc_enc_h264 *)enc_ctx->e_priv;
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+ int ret;
+
+ mfc_dbg("type: 0x%08x", type);
+ mfc_dbg("ctx->state: 0x%08x", ctx->state);
+
+ ret = set_codec_cfg(ctx, type, arg);
+ if (ret <= 0)
+ return ret;
+
+ ret = 0;
+
+ switch (type) {
+ case MFC_ENC_SETCONF_SEI_GEN:
+ mfc_dbg("ctx->state: 0x%08x", ctx->state);
+
+ if (ctx->state >= INST_STATE_INIT) {
+ mfc_dbg("invalid instance state: 0x%08x\n", type);
+ return MFC_STATE_INVALID;
+ }
+
+ if (usercfg->basic.values[0] > 0)
+ h264->sei_gen = 1;
+ else
+ h264->sei_gen = 0;
+
+ break;
+
+ case MFC_ENC_SETCONF_FRAME_PACKING:
+ if (ctx->state >= INST_STATE_EXE) {
+ mfc_dbg("invalid instance state: 0x%08x\n", type);
+ return MFC_STATE_INVALID;
+ }
+
+ if ((usercfg->basic.values[0] < 3) || (usercfg->basic.values[0] > 5)) {
+ mfc_err("invalid param: FRAME_PACKING: %d\n",
+ usercfg->basic.values[0]);
+ return MFC_ENC_GET_CONF_FAIL;
+ }
+
+ h264->fp.arrangement_type = usercfg->basic.values[0] & 0x7F;
+ h264->fp.current_frame_is_frame0_flag = usercfg->basic.values[1] & 0x1;
+
+ h264->change |= CHG_FRAME_PACKING;
+
+ break;
+ default:
+ mfc_dbg("invalid set cfg type: 0x%08x\n", type);
+ ret = -2;
+
+ break;
+ }
+
+ return ret;
+}
+
+static struct mfc_enc_info unknown_enc = {
+ .name = "UNKNOWN",
+ .codectype = UNKNOWN_TYPE,
+ .codecid = -1,
+ .e_priv_size = 0,
+ /*
+ * The unknown codec operations will be not call,
+ * unused default operations raise build warning.
+ */
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = NULL,
+ .get_init_arg = get_init_arg,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = set_codec_bufs,
+ .set_dpbs = NULL,
+ .get_exe_arg = NULL,
+ .pre_frame_start = pre_frame_start,
+ .post_frame_start = post_frame_start,
+ .multi_data_frame = multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+static struct mfc_enc_info h264_enc = {
+ .name = "H264",
+ .codectype = H264_ENC,
+ .codecid = 16,
+ .e_priv_size = sizeof(struct mfc_enc_h264),
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = NULL,
+ .get_init_arg = h264_get_init_arg,
+ .pre_seq_start = h264_pre_seq_start,
+ .post_seq_start = post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = h264_set_codec_bufs,
+ .set_dpbs = NULL,
+ .get_exe_arg = NULL,
+ .pre_frame_start = h264_pre_frame_start,
+ .post_frame_start = post_frame_start,
+ .multi_data_frame = multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = h264_set_codec_cfg,
+ },
+};
+
+static struct mfc_enc_info mpeg4_enc = {
+ .name = "MPEG4",
+ .codectype = MPEG4_ENC,
+ .codecid = 17,
+ .e_priv_size = 0,
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = NULL,
+ .get_init_arg = mpeg4_get_init_arg,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = h264_set_codec_bufs,
+ .set_dpbs = NULL,
+ .get_exe_arg = NULL,
+ .pre_frame_start = pre_frame_start,
+ .post_frame_start = post_frame_start,
+ .multi_data_frame = multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+static struct mfc_enc_info h263_enc = {
+ .name = "H263",
+ .codectype = H263_ENC,
+ .codecid = 18,
+ .e_priv_size = 0,
+ .c_ops = {
+ .alloc_ctx_buf = alloc_ctx_buf,
+ .alloc_desc_buf = NULL,
+ .get_init_arg = h263_get_init_arg,
+ .pre_seq_start = pre_seq_start,
+ .post_seq_start = post_seq_start,
+ .set_init_arg = set_init_arg,
+ .set_codec_bufs = h264_set_codec_bufs,
+ .set_dpbs = NULL,
+ .get_exe_arg = NULL,
+ .pre_frame_start = pre_frame_start,
+ .post_frame_start = post_frame_start,
+ .multi_data_frame = multi_data_frame,
+ .set_exe_arg = set_exe_arg,
+ .get_codec_cfg = get_codec_cfg,
+ .set_codec_cfg = set_codec_cfg,
+ },
+};
+
+void mfc_init_encoders(void)
+{
+ list_add_tail(&unknown_enc.list, &mfc_encoders);
+
+ list_add_tail(&h264_enc.list, &mfc_encoders);
+ list_add_tail(&mpeg4_enc.list, &mfc_encoders);
+ list_add_tail(&h263_enc.list, &mfc_encoders);
+}
+
+static int mfc_set_encoder(struct mfc_inst_ctx *ctx, SSBSIP_MFC_CODEC_TYPE codectype)
+{
+ struct list_head *pos;
+ struct mfc_enc_info *encoder;
+ struct mfc_enc_ctx *enc_ctx;
+
+ ctx->codecid = -1;
+
+ /* find and set codec private */
+ list_for_each(pos, &mfc_encoders) {
+ encoder = list_entry(pos, struct mfc_enc_info, list);
+
+ if (encoder->codectype == codectype) {
+ if (encoder->codecid < 0)
+ break;
+
+ /* Allocate Encoder Context memory */
+ enc_ctx = kzalloc(sizeof(struct mfc_enc_ctx), GFP_KERNEL);
+ if (!enc_ctx) {
+ mfc_err("failed to allocate codec private\n");
+ return -ENOMEM;
+ }
+ ctx->c_priv = enc_ctx;
+
+ /* Allocate Encoder context private memory */
+ enc_ctx->e_priv = kzalloc(encoder->e_priv_size, GFP_KERNEL);
+ if (!enc_ctx->e_priv) {
+ mfc_err("failed to allocate encoder private\n");
+ kfree(enc_ctx);
+ ctx->c_priv = NULL;
+ return -ENOMEM;
+ }
+
+ ctx->codecid = encoder->codecid;
+ ctx->type = ENCODER;
+ ctx->c_ops = (struct codec_operations *)&encoder->c_ops;
+
+ break;
+ }
+ }
+
+ if (ctx->codecid < 0)
+ mfc_err("couldn't find proper encoder codec type: %d\n", codectype);
+
+ return ctx->codecid;
+}
+
+int set_strm_ref_buf(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_alloc_buffer *alloc;
+ int i;
+ /*
+ unsigned int reg;
+ */
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ /* width: 128B align, height: 32B align, size: 8KB align */
+ enc_ctx->lumasize = ALIGN(ctx->width, ALIGN_W) * ALIGN(ctx->height, ALIGN_H);
+ enc_ctx->lumasize = ALIGN(enc_ctx->lumasize, ALIGN_8KB);
+ enc_ctx->chromasize = ALIGN(ctx->width + 16, ALIGN_W) * ALIGN((ctx->height >> 1) + 4, ALIGN_H);
+ enc_ctx->chromasize = ALIGN(enc_ctx->chromasize, ALIGN_8KB);
+
+ /*
+ * allocate stream buffer
+ */
+ alloc = _mfc_alloc_buf(ctx, MFC_STRM_SIZE, ALIGN_2KB, MBT_CPB | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc stream buffer\n");
+
+ return -1;
+ }
+
+ enc_ctx->streamaddr = alloc->real;
+ enc_ctx->streamsize = MFC_STRM_SIZE;
+
+ /* FIXME: temp. */
+ enc_ctx->kstrmaddr = alloc->addr;
+
+ for (i = 0; i < 2; i++) {
+ /*
+ * allocate Y0, Y1 ref buffer
+ */
+ alloc = _mfc_alloc_buf(ctx, enc_ctx->lumasize, ALIGN_2KB, MBT_DPB | PORT_A);
+ if (alloc == NULL) {
+ mfc_err("failed alloc luma ref buffer\n");
+
+ return -1;
+ }
+ /*
+ * set luma ref buffer address
+ */
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_ENC_REF0_LUMA_ADR + (4 * i));
+ }
+
+ if (enc_ctx->numdpb == 4) {
+ for (i = 0; i < 2; i++) {
+ /*
+ * allocate Y2, Y3 ref buffer
+ */
+ alloc = _mfc_alloc_buf(ctx, enc_ctx->lumasize, ALIGN_2KB, MBT_DPB | PORT_B);
+ if (alloc == NULL) {
+ mfc_err("failed alloc luma ref buffer\n");
+
+ return -1;
+ }
+ /*
+ * set luma ref buffer address
+ */
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_ENC_REF2_LUMA_ADR + (4 * i));
+ }
+ }
+
+ /*
+ * allocate C0 ~ C3 ref buffer
+ */
+ for (i = 0; i < enc_ctx->numdpb; i++) {
+ alloc = _mfc_alloc_buf(ctx, enc_ctx->chromasize, ALIGN_2KB, MBT_DPB | PORT_B);
+ if (alloc == NULL) {
+ mfc_err("failed alloc chroma ref buffer\n");
+
+ return -1;
+ }
+ /*
+ * set chroma ref buffer address
+ */
+ write_reg(mfc_mem_base_ofs(alloc->real) >> 11, MFC_ENC_REF0_CHROMA_ADR + (4 * i));
+ }
+
+ return 0;
+}
+
+int mfc_init_encoding(struct mfc_inst_ctx *ctx, union mfc_args *args)
+{
+ struct mfc_enc_init_arg *init_arg = (struct mfc_enc_init_arg *)args;
+ struct mfc_enc_ctx *enc_ctx = NULL;
+ struct mfc_pre_cfg *precfg;
+ struct list_head *pos, *nxt;
+ int ret;
+ unsigned char *in_vir;
+
+ ret = mfc_set_encoder(ctx, init_arg->cmn.in_codec_type);
+ if (ret < 0) {
+ mfc_err("failed to setup encoder codec\n");
+ ret = MFC_ENC_INIT_FAIL;
+ goto err_handling;
+ }
+
+ ctx->width = init_arg->cmn.in_width;
+ ctx->height = init_arg->cmn.in_height;
+
+ if (ctx->height > MAX_VER_SIZE) {
+ if (ctx->height > MAX_HOR_SIZE) {
+ mfc_err("Not support resolution: %dx%d\n",
+ ctx->width, ctx->height);
+ goto err_handling;
+ }
+
+ if (ctx->width > MAX_VER_SIZE) {
+ mfc_err("Not support resolutioni: %dx%d\n",
+ ctx->width, ctx->height);
+ goto err_handling;
+ }
+ } else {
+ if (ctx->width > MAX_HOR_SIZE) {
+ mfc_err("Not support resolution: %dx%d\n",
+ ctx->width, ctx->height);
+ goto err_handling;
+ }
+ }
+
+ enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ enc_ctx->pixelcache = init_arg->cmn.in_pixelcache;
+
+ /*
+ * assign pre configuration values to instance context
+ */
+ list_for_each_safe(pos, nxt, &ctx->presetcfgs) {
+ precfg = list_entry(pos, struct mfc_pre_cfg, list);
+
+ if (ctx->c_ops->set_codec_cfg) {
+ ret = ctx->c_ops->set_codec_cfg(ctx, precfg->type, precfg->values);
+ if (ret < 0)
+ mfc_warn("cannot set preset config type: 0x%08x: %d",
+ precfg->type, ret);
+ }
+
+ list_del(&precfg->list);
+ kfree(precfg);
+ }
+ INIT_LIST_HEAD(&ctx->presetcfgs);
+
+ mfc_set_inst_state(ctx, INST_STATE_SETUP);
+
+ /*
+ * allocate context buffer
+ */
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((ctx->c_ops->alloc_ctx_buf) && (!ctx->drm_flag)) {
+#else
+ if (ctx->c_ops->alloc_ctx_buf) {
+#endif
+ if (ctx->c_ops->alloc_ctx_buf(ctx) < 0) {
+ mfc_err("Context buffer allocation Failed");
+ ret = MFC_ENC_INIT_FAIL;
+ goto err_handling;
+ }
+ }
+
+ /* [pixelcache] */
+ ret = mfc_cmd_inst_open(ctx);
+ if (ret < 0) {
+ mfc_err("Open Instance Failed");
+ goto err_handling;
+ }
+
+ mfc_set_inst_state(ctx, INST_STATE_OPEN);
+
+ if (init_shm(ctx) < 0) {
+ mfc_err("Shared Memory Initialization Failed");
+ ret = MFC_ENC_INIT_FAIL;
+ goto err_handling;
+ }
+
+#if SUPPORT_SLICE_ENCODING
+ if (init_arg->cmn.in_output_mode == 1)
+ ctx->slice_flag = 1;
+#endif
+ /*
+ * get init. argumnets
+ */
+ if (ctx->c_ops->get_init_arg) {
+ if (ctx->c_ops->get_init_arg(ctx, (void *)init_arg) < 0) {
+ mfc_err("Get Initial Arguments Failed");
+ ret = MFC_ENC_INIT_FAIL;
+ goto err_handling;
+ }
+ }
+
+ /*
+ * allocate & set codec buffers
+ */
+ if (ctx->c_ops->set_codec_bufs) {
+ if (ctx->c_ops->set_codec_bufs(ctx) < 0) {
+ mfc_err("Set Codec Buffers Failed");
+ ret = MFC_ENC_INIT_FAIL;
+ goto err_handling;
+ }
+ }
+
+ set_strm_ref_buf(ctx);
+
+ /*
+ * execute pre sequence start operation
+ */
+ if (ctx->c_ops->pre_seq_start) {
+ if (ctx->c_ops->pre_seq_start) {
+ if (ctx->c_ops->pre_seq_start(ctx) < 0) {
+ mfc_err("Pre-Sequence Start Failed");
+ ret = MFC_ENC_INIT_FAIL;
+ goto err_handling;
+ }
+ }
+ }
+
+ if (enc_ctx->setflag == 1) {
+ if (enc_ctx->FrameSkipCngTag == 1) {
+ mfc_dbg("Encoding Param Setting - Allow_frame_skip enable : %d - number : %d \n",
+ enc_ctx->frame_skip_enable, enc_ctx->frameskip);
+
+ if (enc_ctx->frame_skip_enable == 2)
+ write_shm(ctx,
+ ((enc_ctx->frame_skip_enable << 1) | (enc_ctx->frameskip << 16) | read_shm(ctx, EXT_ENC_CONTROL)),
+ EXT_ENC_CONTROL);
+ else
+ write_shm(ctx, ((enc_ctx->frame_skip_enable << 1)|read_shm(ctx, EXT_ENC_CONTROL)), EXT_ENC_CONTROL);
+ }
+
+ if (enc_ctx->VUIInfoCngTag == 1) {
+ mfc_dbg("Encoding Param Setting - VUI_info enable : %d\n", enc_ctx->vui_info_enable);
+
+ write_shm(ctx, enc_ctx->vuiinfoval, ASPECT_RATIO_IDC);
+ write_shm(ctx, enc_ctx->vuiextendsar, EXTENDED_SAR);
+ write_shm(ctx, ((enc_ctx->vui_info_enable << 15)|read_shm(ctx, EXT_ENC_CONTROL)), EXT_ENC_CONTROL); /*ASPECT_RATIO_VUI_ENABLE = 1<<15*/
+ }
+
+ if (enc_ctx->IPeriodCngTag == 1) {
+ mfc_dbg("Encoding Param Setting - I_PERIOD : %d\n", enc_ctx->iperiodval);
+ write_shm(ctx, enc_ctx->iperiodval, NEW_I_PERIOD);
+ write_shm(ctx, ((1<<16)|enc_ctx->iperiodval), H264_I_PERIOD);
+ write_reg(enc_ctx->iperiodval, MFC_ENC_PIC_TYPE_CTRL);
+ write_shm(ctx, (0x1 << 0), ENC_PARAM_CHANGE);
+ }
+
+ if (enc_ctx->HierPCngTag == 1) {
+ mfc_dbg("Encoding Param Setting - HIER_P enable : %d\n", enc_ctx->hier_p_enable);
+
+ write_shm(ctx,
+ ((enc_ctx->hier_p_enable << 4) |
+ read_shm(ctx, EXT_ENC_CONTROL)),
+ EXT_ENC_CONTROL);
+ /*HIERARCHICAL_P_ENABLE = 1<<4*/
+ }
+ }
+
+ ret = mfc_cmd_seq_start(ctx);
+ if (ret < 0) {
+ mfc_err("Sequence Start Failed");
+ goto err_handling;
+ }
+
+ if (ctx->c_ops->post_seq_start) {
+ if (ctx->c_ops->post_seq_start(ctx) < 0) {
+ mfc_err("Post Sequence Start Failed");
+ ret = MFC_ENC_INIT_FAIL;
+ goto err_handling;
+ }
+ }
+
+ if (ctx->c_ops->set_init_arg) {
+ if (ctx->c_ops->set_init_arg(ctx, (void *)init_arg) < 0) {
+ mfc_err("Setting Initialized Arguments Failed");
+ ret = MFC_ENC_INIT_FAIL;
+ goto err_handling;
+ }
+ }
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((ctx->buf_cache_type == CACHE) && (!ctx->drm_flag)) {
+#else
+ if (ctx->buf_cache_type == CACHE) {
+#endif
+ in_vir = phys_to_virt(enc_ctx->streamaddr);
+ mfc_mem_cache_inv(in_vir, init_arg->cmn.out_header_size);
+ mfc_dbg("cache invalidate\n");
+ }
+#if defined(CONFIG_BUSFREQ)
+ /* Fix MFC & Bus Frequency for High resolution for better performance */
+ if (ctx->width >= MAX_HOR_RES || ctx->height >= MAX_VER_RES) {
+ if (atomic_read(&ctx->dev->busfreq_lock_cnt) == 0) {
+ /* For fixed MFC & Bus Freq to 200 & 400 MHz for 1080p Contents */
+ exynos4_busfreq_lock(DVFS_LOCK_ID_MFC, BUS_L0);
+ mfc_dbg("[%s] Bus Freq Locked L0\n", __func__);
+ }
+
+ atomic_inc(&ctx->dev->busfreq_lock_cnt);
+ ctx->busfreq_flag = true;
+ }
+#endif
+
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_EXYNOS4_CPUFREQ)
+ if ((ctx->width >= 320 && ctx->height >= 240)
+ || (ctx->width >= 240 && ctx->height >= 320)) {
+ if (atomic_read(&ctx->dev->cpufreq_lock_cnt) == 0) {
+ if (0 == ctx->dev->cpufreq_level) /* 500MHz */
+ exynos_cpufreq_get_level(500000,
+ &ctx->dev->cpufreq_level);
+ exynos_cpufreq_lock(DVFS_LOCK_ID_MFC,
+ ctx->dev->cpufreq_level);
+ mfc_dbg("[%s] CPU Freq Locked 500MHz!\n", __func__);
+ }
+ atomic_inc(&ctx->dev->cpufreq_lock_cnt);
+ ctx->cpufreq_flag = true;
+ }
+#endif
+
+ /*
+ * allocate & set DPBs
+ */
+ /*
+ if (ctx->c_ops->set_dpbs) {
+ if (ctx->c_ops->set_dpbs(ctx) < 0)
+ return MFC_ENC_INIT_FAIL;
+ }
+ */
+
+ /*
+ ret = mfc_cmd_init_buffers(ctx);
+ if (ret < 0)
+ return ret;
+ */
+
+ mfc_set_inst_state(ctx, INST_STATE_INIT);
+
+ if (enc_ctx->setflag == 1) {
+ enc_ctx->setflag = 0;
+ enc_ctx->FrameSkipCngTag = 0;
+ enc_ctx->VUIInfoCngTag = 0;
+ enc_ctx->HierPCngTag = 0;
+
+ if (enc_ctx->IPeriodCngTag == 1) {
+ write_shm(ctx, 0, ENC_PARAM_CHANGE);
+ enc_ctx->IPeriodCngTag = 0;
+ }
+ }
+
+ mfc_print_buf();
+
+ return MFC_OK;
+
+err_handling:
+ if (ctx->state > INST_STATE_CREATE) {
+ mfc_cmd_inst_close(ctx);
+ ctx->state = INST_STATE_CREATE;
+ }
+
+ mfc_free_buf_inst(ctx->id);
+
+ if (enc_ctx) {
+ kfree(enc_ctx->e_priv);
+ enc_ctx->e_priv = NULL;
+ }
+
+ if (ctx->c_priv) {
+ kfree(ctx->c_priv);
+ ctx->c_priv = NULL;
+ }
+
+ return ret;
+}
+
+static int mfc_encoding_frame(struct mfc_inst_ctx *ctx, struct mfc_enc_exe_arg *exe_arg)
+{
+ int ret;
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+ void *ump_handle;
+#endif
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+
+ /* Set Frame Tag */
+ write_shm(ctx, exe_arg->in_frametag, SET_FRAME_TAG);
+
+ /* Set stream buffer addr */
+ enc_ctx->streamaddr = mfc_mem_base_ofs(exe_arg->in_strm_st);
+ enc_ctx->streamsize = exe_arg->in_strm_end - exe_arg->in_strm_st;
+
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ if (enc_ctx->inputformat == NV21_LINEAR)
+ write_reg(1, MFC_ENC_NV21_SEL);
+ else if (enc_ctx->inputformat == NV12_LINEAR)
+ write_reg(0, MFC_ENC_NV21_SEL);
+#endif
+#if SUPPORT_SLICE_ENCODING
+ write_reg((enc_ctx->outputmode) << 31, MFC_ENC_SI_CH1_INPUT_FLUSH);
+#endif
+
+ write_reg(enc_ctx->streamaddr >> 11, MFC_ENC_SI_CH1_SB_ADR);
+ write_reg(enc_ctx->streamsize, MFC_ENC_SI_CH1_SB_SIZE);
+
+ #if 0
+ /* force I frame or Not-coded frame */
+ if (mfc_ctx->forceSetFrameType == I_FRAME)
+ write_reg(1, MFC_ENC_SI_CH1_FRAME_INS);
+ else if (mfc_ctx->forceSetFrameType == NOT_CODED)
+ write_reg(1 << 1, MFC_ENC_SI_CH1_FRAME_INS);
+
+ if (mfc_ctx->dynamic_framerate != 0) {
+ write_shm((1 << 1), mfc_ctx->shared_mem_vir_addr + 0x2c);
+ /* MFC fw 2010/04/09 */
+ write_shm(mfc_ctx->dynamic_framerate*SCALE_NUM,
+ mfc_ctx->shared_mem_vir_addr + 0x94);
+ if (mfc_ctx->MfcCodecType == MPEG4_ENC) {
+ time_increment_res = mfc_ctx->dynamic_framerate *
+ MPEG4_TIME_RES;
+ write_shm((1 << 31) |
+ (time_increment_res << 16) |
+ (MPEG4_TIME_RES),
+ mfc_ctx->shared_mem_vir_addr + 0x30);
+ }
+ }
+
+ if (mfc_ctx->dynamic_bitrate != 0) {
+ write_shm((1 << 2), mfc_ctx->shared_mem_vir_addr + 0x2c);
+ write_shm(mfc_ctx->dynamic_bitrate,
+ mfc_ctx->shared_mem_vir_addr + 0x90);
+ }
+ #endif
+
+ /* Set current frame buffer addr */
+#if (MFC_MAX_MEM_PORT_NUM == 2)
+ write_reg((exe_arg->in_Y_addr - mfc_mem_base(1)) >> 11, MFC_ENC_SI_CH1_CUR_Y_ADR);
+ write_reg((exe_arg->in_CbCr_addr - mfc_mem_base(1)) >> 11, MFC_ENC_SI_CH1_CUR_C_ADR);
+#else
+ write_reg((exe_arg->in_Y_addr - mfc_mem_base(0)) >> 11, MFC_ENC_SI_CH1_CUR_Y_ADR);
+ write_reg((exe_arg->in_CbCr_addr - mfc_mem_base(0)) >> 11, MFC_ENC_SI_CH1_CUR_C_ADR);
+#endif
+
+ #if 0
+ write_reg(1, MFC_ENC_STR_BF_U_EMPTY);
+ write_reg(1, MFC_ENC_STR_BF_L_EMPTY);
+
+ /* buf reset command if stream buffer is frame mode */
+ write_reg(0x1 << 1, MFC_ENC_SF_BUF_CTRL);
+ #endif
+
+ if (ctx->buf_cache_type == CACHE) {
+ flush_all_cpu_caches();
+ outer_flush_all();
+ }
+
+#if SUPPORT_SLICE_ENCODING
+ if (enc_ctx->outputmode == 0) { /* frame */
+#endif
+ ret = mfc_cmd_frame_start(ctx);
+ if (ret < 0)
+ return ret;
+
+ exe_arg->out_frame_type = read_reg(MFC_ENC_SI_SLICE_TYPE);
+ exe_arg->out_encoded_size = read_reg(MFC_ENC_SI_STRM_SIZE);
+
+ /* FIXME: port must be checked */
+ exe_arg->out_Y_addr = mfc_mem_addr_ofs(read_reg(MFC_ENCODED_Y_ADDR) << 11, 1);
+ exe_arg->out_CbCr_addr = mfc_mem_addr_ofs(read_reg(MFC_ENCODED_C_ADDR) << 11, 1);
+#if SUPPORT_SLICE_ENCODING
+ } else { /* slice */
+ ret = mfc_cmd_slice_start(ctx);
+ if (ret < 0)
+ return ret;
+
+ if (enc_ctx->slicecount) {
+ exe_arg->out_frame_type = -1;
+ exe_arg->out_encoded_size = enc_ctx->slicesize;
+
+ exe_arg->out_Y_addr = 0;
+ exe_arg->out_CbCr_addr = 0;
+ } else {
+ exe_arg->out_frame_type = read_reg(MFC_ENC_SI_SLICE_TYPE);
+ exe_arg->out_encoded_size = enc_ctx->slicesize;
+
+ /* FIXME: port must be checked */
+ exe_arg->out_Y_addr = mfc_mem_addr_ofs(read_reg(MFC_ENCODED_Y_ADDR) << 11, 1);
+ exe_arg->out_CbCr_addr = mfc_mem_addr_ofs(read_reg(MFC_ENCODED_C_ADDR) << 11, 1);
+ }
+ }
+
+ mfc_dbg("frame type: %d, encoded size: %d, slice size: %d, stream size: %d\n",
+ exe_arg->out_frame_type, exe_arg->out_encoded_size,
+ enc_ctx->slicesize, read_reg(MFC_ENC_SI_STRM_SIZE));
+#endif
+
+ /* Get Frame Tag top and bottom */
+ exe_arg->out_frametag_top = read_shm(ctx, GET_FRAME_TAG_TOP);
+ exe_arg->out_frametag_bottom = read_shm(ctx, GET_FRAME_TAG_BOT);
+
+ /* MFC fw 9/30 */
+ /*
+ enc_arg->out_Y_addr =
+ cur_frm_base + (read_reg(MFC_ENCODED_Y_ADDR) << 11);
+ enc_arg->out_CbCr_addr =
+ cur_frm_base + (read_reg(MFC_ENCODED_C_ADDR) << 11);
+ */
+
+ /* FIXME: cookie may be invalide */
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ exe_arg->out_y_secure_id = 0;
+ exe_arg->out_c_secure_id = 0;
+
+ ump_handle = mfc_get_buf_ump_handle(read_reg(MFC_ENCODED_Y_ADDR) << 11);
+ if (ump_handle != NULL)
+ exe_arg->out_y_secure_id = mfc_ump_get_id(ump_handle);
+
+ ump_handle = mfc_get_buf_ump_handle(read_reg(MFC_ENCODED_C_ADDR) << 11);
+ if (ump_handle != NULL)
+ exe_arg->out_c_secure_id = mfc_ump_get_id(ump_handle);
+
+ mfc_dbg("secure IDs Y: 0x%08x, C:0x%08x\n", exe_arg->out_y_secure_id,
+ exe_arg->out_c_secure_id);
+#elif defined(CONFIG_S5P_VMEM)
+ exe_arg->out_y_cookie = s5p_getcookie((void *)(read_reg(MFC_ENCODED_Y_ADDR) << 11));
+ exe_arg->out_c_cookie = s5p_getcookie((void *)(read_reg(MFC_ENCODED_C_ADDR) << 11));
+
+ mfc_dbg("cookie Y: 0x%08x, C:0x%08x\n", exe_arg->out_y_cookie,
+ exe_arg->out_c_cookie);
+#endif
+
+ #if 0
+ write_reg(0, MFC_ENC_SI_CH1_FRAME_INS);
+ mfc_ctx->forceSetFrameType = 0;
+
+ write_shm(0, mfc_ctx->shared_mem_vir_addr + 0x2c);
+ mfc_ctx->dynamic_framerate = 0;
+ mfc_ctx->dynamic_bitrate = 0;
+ #endif
+
+ mfc_dbg
+ ("- frame type(%d) encoded frame size(%d) encoded Y_addr(0x%08x) / C_addr(0x%08x)\r\n",
+ exe_arg->out_frame_type, exe_arg->out_encoded_size,
+ exe_arg->out_Y_addr, exe_arg->out_CbCr_addr);
+
+ return MFC_OK;
+}
+
+int mfc_exec_encoding(struct mfc_inst_ctx *ctx, union mfc_args *args)
+{
+ struct mfc_enc_exe_arg *exe_arg;
+ int ret;
+ /*
+ struct mfc_enc_ctx *enc_ctx = (struct mfc_enc_ctx *)ctx->c_priv;
+ */
+
+ exe_arg = (struct mfc_enc_exe_arg *)args;
+
+ mfc_set_inst_state(ctx, INST_STATE_EXE);
+
+ if (ctx->c_ops->pre_frame_start) {
+ if (ctx->c_ops->pre_frame_start(ctx) < 0)
+ return MFC_ENC_INIT_FAIL;
+ }
+
+ ret = mfc_encoding_frame(ctx, exe_arg);
+
+ if (ctx->c_ops->post_frame_start) {
+ if (ctx->c_ops->post_frame_start(ctx) < 0)
+ return MFC_ENC_INIT_FAIL;
+ }
+
+ mfc_set_inst_state(ctx, INST_STATE_EXE_DONE);
+
+ return ret;
+}
+
diff --git a/drivers/media/video/samsung/mfc5x/mfc_enc.h b/drivers/media/video/samsung/mfc5x/mfc_enc.h
new file mode 100644
index 0000000..4bca251
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_enc.h
@@ -0,0 +1,115 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_enc.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Encoder interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_ENC_H
+#define __MFC_ENC_H __FILE__
+
+#include <linux/list.h>
+
+#include "mfc.h"
+#include "mfc_interface.h"
+#include "mfc_inst.h"
+
+enum enc_pc {
+ EPC_ENABLE = 0,
+ EPC_DISABLE = 3,
+};
+
+struct mfc_enc_ctx {
+ unsigned int lumasize; /* C */
+ unsigned int chromasize; /* C */
+
+ unsigned long streamaddr; /* K */
+ unsigned int streamsize; /* K */
+
+ /* FIXME: temp. */
+ unsigned char *kstrmaddr;
+
+ /* init */
+ enum enc_pc pixelcache;
+ unsigned int numdpb;
+
+ /* init | exec */
+ unsigned int framemap;
+ unsigned int inputformat;
+#if SUPPORT_SLICE_ENCODING
+ unsigned int outputmode;
+#endif
+
+ /* exec */
+ unsigned int interlace;
+ unsigned int forceframe;
+ unsigned int frameskip;
+ unsigned int framerate;
+ unsigned int bitrate;
+ unsigned int iperiodval;
+ unsigned int vuiinfoval;
+ unsigned int vuiextendsar;
+
+ unsigned int frame_skip_enable;
+ unsigned int vui_info_enable;
+ unsigned int hier_p_enable;
+#if SUPPORT_SLICE_ENCODING
+ unsigned int slicecount;
+ unsigned int slicesize;
+#endif
+ /* change flag */
+ unsigned int setflag;
+ unsigned int FrameTypeCngTag;
+ unsigned int FrameRateCngTag;
+ unsigned int BitRateCngTag;
+ unsigned int FrameSkipCngTag;
+ unsigned int VUIInfoCngTag;
+ unsigned int IPeriodCngTag;
+ unsigned int HierPCngTag;
+
+ void *e_priv;
+};
+
+#define CHG_FRAME_PACKING 0x00000001
+#define CHG_I_PERIOD 0x00000002
+struct mfc_enc_h264 {
+ unsigned int change;
+ unsigned int vui_enable;
+ unsigned int hier_p_enable;
+
+ unsigned int i_period;
+
+ unsigned int sei_gen; /* H */
+ struct mfc_frame_packing fp; /* H */
+};
+
+int mfc_init_encoding(struct mfc_inst_ctx *ctx, union mfc_args *args);
+/*
+int mfc_init_encoding(struct mfc_inst_ctx *ctx, struct mfc_dec_init_arg *init_arg);
+*/
+int mfc_exec_encoding(struct mfc_inst_ctx *ctx, union mfc_args *args);
+/*
+int mfc_exec_encoding(struct mfc_inst_ctx *ctx, struct mfc_dec_exe_arg *exe_arg);
+*/
+
+/*---------------------------------------------------------------------------*/
+
+struct mfc_enc_info {
+ struct list_head list;
+ const char *name;
+ SSBSIP_MFC_CODEC_TYPE codectype;
+ int codecid;
+ unsigned int e_priv_size;
+
+ const struct codec_operations c_ops;
+};
+
+void mfc_init_encoders(void);
+
+#endif /* __MFC_ENC_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_errno.h b/drivers/media/video/samsung/mfc5x/mfc_errno.h
new file mode 100644
index 0000000..55754d6
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_errno.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Global header for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Alternatively, Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MFC_ERRNO_H
+#define __MFC_ERRNO_H __FILE__
+
+enum mfc_ret_code {
+ MFC_OK = 1,
+ MFC_FAIL = -1000,
+ MFC_OPEN_FAIL = -1001,
+ MFC_CLOSE_FAIL = -1002,
+
+ MFC_DEC_INIT_FAIL = -2000,
+ MFC_DEC_EXE_TIME_OUT = -2001,
+ MFC_DEC_EXE_ERR = -2002,
+ MFC_DEC_GET_INBUF_FAIL = 2003,
+ MFC_DEC_SET_INBUF_FAIL = 2004,
+ MFC_DEC_GET_OUTBUF_FAIL = -2005,
+ MFC_DEC_GET_CONF_FAIL = -2006,
+ MFC_DEC_SET_CONF_FAIL = -2007,
+
+ MFC_ENC_INIT_FAIL = -3000,
+ MFC_ENC_EXE_TIME_OUT = -3001,
+ MFC_ENC_EXE_ERR = -3002,
+ MFC_ENC_GET_INBUF_FAIL = -3003,
+ MFC_ENC_SET_INBUF_FAIL = -3004,
+ MFC_ENC_GET_OUTBUF_FAIL = -3005,
+ MFC_ENC_SET_OUTBUF_FAIL = -3006,
+ MFC_ENC_GET_CONF_FAIL = -3007,
+ MFC_ENC_SET_CONF_FAIL = -3008,
+
+ MFC_STATE_INVALID = -4000,
+ MFC_DEC_HEADER_FAIL = -4001,
+ MFC_DEC_INIT_BUF_FAIL = -4002,
+ MFC_ENC_HEADER_FAIL = -5000,
+ MFC_ENC_PARAM_FAIL = -5001,
+ MFC_FRM_BUF_SIZE_FAIL = -6000,
+ MFC_FW_LOAD_FAIL = -6001,
+ MFC_FW_INIT_FAIL = -6002,
+ MFC_INST_NUM_EXCEEDED_FAIL = -6003,
+ MFC_MEM_ALLOC_FAIL = -6004,
+ MFC_MEM_INVALID_ADDR_FAIL = -6005,
+ MFC_MEM_MAPPING_FAIL = -6006,
+ MFC_GET_CONF_FAIL = -6007,
+ MFC_SET_CONF_FAIL = -6008,
+ MFC_INVALID_PARAM_FAIL = -6009,
+ MFC_API_FAIL = -9000,
+
+ MFC_CMD_FAIL = -1003,
+ MFC_SLEEP_FAIL = -1010,
+ MFC_WAKEUP_FAIL = -1020,
+
+ MFC_CLK_ON_FAIL = -1030,
+ MFC_CLK_OFF_FAIL = -1030,
+ MFC_PWR_ON_FAIL = -1040,
+ MFC_PWR_OFF_FAIL = -1041,
+} ;
+
+#endif /* __MFC_ERRNO_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_inst.c b/drivers/media/video/samsung/mfc5x/mfc_inst.c
new file mode 100644
index 0000000..518fbfc
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_inst.c
@@ -0,0 +1,258 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_inst.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Instance manager for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include "mfc_inst.h"
+#include "mfc_log.h"
+#include "mfc_buf.h"
+#include "mfc_cmd.h"
+#include "mfc_pm.h"
+#include "mfc_dec.h"
+#include "mfc_enc.h"
+
+#ifdef SYSMMU_MFC_ON
+#include <linux/interrupt.h>
+#endif
+
+/*
+ * the sematic both of mfc_create_inst() and mfc_destory_inst()
+ * be symmetric, but MFC channel open operation will be execute
+ * while init. sequence. (decoding and encoding)
+ * create - just allocate context memory and initialize state
+ *
+ * destory - execute channel close operation
+ * free allocated buffer for instance
+ * free allocated context memory
+ */
+
+struct mfc_inst_ctx *mfc_create_inst(void)
+{
+ struct mfc_inst_ctx *ctx;
+
+ ctx = kzalloc(sizeof(struct mfc_inst_ctx), GFP_KERNEL);
+ if (!ctx) {
+ mfc_err("failed to create instance\n");
+ return NULL;
+ }
+
+ /* FIXME: set default values */
+ ctx->state = INST_STATE_CREATE;
+
+ ctx->codecid = -1;
+ ctx->resolution_status = RES_NO_CHANGE;
+#ifdef CONFIG_BUSFREQ
+ ctx->busfreq_flag = false;
+#endif
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_EXYNOS4_CPUFREQ)
+ ctx->cpufreq_flag = false;
+#endif
+#ifdef CONFIG_BUSFREQ_OPP
+ ctx->dmcthreshold_flag = false;
+#endif
+#ifdef SYSMMU_MFC_ON
+ /*
+ ctx->pgd = __pa(current->mm->pgd);
+ */
+ ctx->pgd = __pa(swapper_pg_dir);
+#endif
+
+ INIT_LIST_HEAD(&ctx->presetcfgs);
+
+ return ctx;
+}
+
+void mfc_destroy_inst(struct mfc_inst_ctx *ctx)
+{
+ struct mfc_dec_ctx *dec_ctx;
+ struct mfc_enc_ctx *enc_ctx;
+ struct mfc_pre_cfg *precfg;
+
+ if (ctx) {
+ if (ctx->state < INST_STATE_SETUP) {
+ while (!list_empty(&ctx->presetcfgs)) {
+ precfg = list_entry((&ctx->presetcfgs)->next,
+ struct mfc_pre_cfg, list);
+
+ mfc_dbg("remove unused preset config [0x%08x]\n",
+ precfg->type);
+
+ list_del(&precfg->list);
+ kfree(precfg);
+ }
+ } else {
+ /* free (decoder/encoder & context) private memory */
+ if (ctx->type == DECODER) {
+ dec_ctx = ctx->c_priv;
+ if (dec_ctx->d_priv)
+ kfree(dec_ctx->d_priv);
+
+ kfree(dec_ctx);
+ } else if (ctx->type == ENCODER) {
+ enc_ctx = ctx->c_priv;
+ if (enc_ctx->e_priv)
+ kfree(enc_ctx->e_priv);
+
+ kfree(enc_ctx);
+ }
+ }
+
+ if (ctx->state >= INST_STATE_OPEN) {
+ mfc_clock_on(ctx->dev);
+ mfc_cmd_inst_close(ctx);
+ mfc_clock_off(ctx->dev);
+ }
+
+ mfc_free_buf_inst(ctx->id);
+
+ /* free instance context memory */
+ kfree(ctx);
+ }
+}
+
+int mfc_set_inst_state(struct mfc_inst_ctx *ctx, enum instance_state state)
+{
+ mfc_dbg("state: 0x%08x", state);
+
+ /* only allow EXE_DONE to EXE transition */
+ if (ctx->state == INST_STATE_EXE_DONE && state == INST_STATE_EXE)
+ ctx->state = state;
+
+ if (ctx->state > state) {
+ mfc_err("failed to change state of instance [0x%08x:0x%08x]\n",
+ ctx->state, state);
+ return -1;
+ }
+
+ ctx->state = state;
+
+ return 0;
+}
+
+int mfc_chk_inst_state(struct mfc_inst_ctx *ctx, enum instance_state state)
+{
+ if (ctx->state != state)
+ return -1;
+ else
+ return 0;
+}
+
+int mfc_set_inst_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ int ret = MFC_OK;
+ struct mfc_pre_cfg *precfg;
+ union _mfc_config_arg *usercfg = (union _mfc_config_arg *)arg;
+ struct list_head *pos, *nxt;
+
+ mfc_dbg("type: 0x%08x, ctx->type: 0x%08x", type, ctx->type);
+
+ /* pre-configuration supports only basic type */
+ if (ctx->state <= INST_STATE_CREATE) {
+ precfg = (struct mfc_pre_cfg *)
+ kzalloc(sizeof(struct mfc_pre_cfg), GFP_KERNEL);
+
+ if (unlikely(precfg == NULL)) {
+ mfc_err("no more kernel memory");
+
+ return MFC_SET_CONF_FAIL;
+ }
+
+ precfg->type = type;
+ memcpy(precfg->values, usercfg->basic.values, sizeof(precfg->values));
+
+ mfc_dbg("precfg new entry");
+ mfc_dbg("type: 0x%08x", precfg->type);
+ mfc_dbg("values: %d %d %d %d", precfg->values[0],
+ precfg->values[1], precfg->values[2], precfg->values[3]);
+
+ list_add_tail(&precfg->list, &ctx->presetcfgs);
+
+ mfc_dbg("precfg entries...");
+ precfg = NULL;
+
+ list_for_each_safe(pos, nxt, &ctx->presetcfgs) {
+ precfg = list_entry(pos, struct mfc_pre_cfg, list);
+
+ mfc_dbg("type: 0x%08x", precfg->type);
+ mfc_dbg("values: %d %d %d %d", precfg->values[0],
+ precfg->values[1], precfg->values[2], precfg->values[3]);
+ }
+
+ return MFC_OK;
+ }
+
+ switch (type) {
+ case MFC_DEC_SETCONF_POST_ENABLE:
+ case MFC_DEC_SETCONF_EXTRA_BUFFER_NUM:
+ case MFC_DEC_SETCONF_DISPLAY_DELAY:
+ case MFC_DEC_SETCONF_IS_LAST_FRAME:
+ case MFC_DEC_SETCONF_SLICE_ENABLE:
+ case MFC_DEC_SETCONF_CRC_ENABLE:
+ case MFC_DEC_SETCONF_FIMV1_WIDTH_HEIGHT:
+ case MFC_DEC_SETCONF_FRAME_TAG:
+ case MFC_DEC_SETCONF_IMMEDIATELY_DISPLAY:
+ case MFC_DEC_SETCONF_DPB_FLUSH:
+ case MFC_DEC_SETCONF_SEI_PARSE:
+ case MFC_DEC_SETCONF_PIXEL_CACHE:
+ case MFC_ENC_SETCONF_FRAME_TYPE:
+ case MFC_ENC_SETCONF_CHANGE_FRAME_RATE:
+ case MFC_ENC_SETCONF_CHANGE_BIT_RATE:
+ case MFC_ENC_SETCONF_FRAME_TAG:
+ case MFC_ENC_SETCONF_ALLOW_FRAME_SKIP:
+ case MFC_ENC_SETCONF_VUI_INFO:
+ case MFC_ENC_SETCONF_I_PERIOD:
+ case MFC_ENC_SETCONF_HIER_P:
+ case MFC_ENC_SETCONF_SEI_GEN:
+ case MFC_ENC_SETCONF_FRAME_PACKING:
+ if (ctx->c_ops->set_codec_cfg) {
+ if ((ctx->c_ops->set_codec_cfg(ctx, type, arg)) < 0)
+ return MFC_SET_CONF_FAIL;
+ }
+ break;
+
+ default:
+ mfc_err("invalid set config type: 0x%08x\n", type);
+ return MFC_FAIL;
+ }
+
+ return ret;
+}
+
+int mfc_get_inst_cfg(struct mfc_inst_ctx *ctx, int type, void *arg)
+{
+ int ret = MFC_OK;
+
+ mfc_dbg("type: 0x%08x, ctx->type: 0x%08x", type, ctx->type);
+
+ switch (type) {
+ case MFC_DEC_GETCONF_CRC_DATA:
+ case MFC_DEC_GETCONF_BUF_WIDTH_HEIGHT:
+ case MFC_DEC_GETCONF_CROP_INFO:
+ case MFC_DEC_GETCONF_FRAME_TAG:
+ case MFC_DEC_GETCONF_WIDTH_HEIGHT:
+ case MFC_DEC_GETCONF_FRAME_PACKING:
+ case MFC_ENC_GETCONF_FRAME_TAG:
+ if (ctx->c_ops->get_codec_cfg) {
+ if ((ctx->c_ops->get_codec_cfg(ctx, type, arg)) < 0)
+ return MFC_GET_CONF_FAIL;
+ }
+ break;
+
+ default:
+ mfc_err("invalid get config type: 0x%08x\n", type);
+ return MFC_FAIL;
+ }
+
+ return ret;
+}
diff --git a/drivers/media/video/samsung/mfc5x/mfc_inst.h b/drivers/media/video/samsung/mfc5x/mfc_inst.h
new file mode 100644
index 0000000..e297c55
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_inst.h
@@ -0,0 +1,182 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_inst.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Instance manager file for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_INST_H
+#define __MFC_INST_H __FILE__
+
+#include <linux/list.h>
+
+#include "mfc.h"
+#include "mfc_interface.h"
+
+
+/* FIXME: instance state should be more specific */
+enum instance_state {
+ INST_STATE_NULL = 0,
+
+ /* open */
+ INST_STATE_CREATE = 0x0001,
+
+ /* ioctl - *_INIT */
+ INST_STATE_SETUP = 0x0004,
+
+ /* ioctl - *_INIT */
+ INST_STATE_OPEN = 0x0010,
+ INST_STATE_INIT,
+
+ /* ioctl - *_EXE */
+ INST_STATE_EXE = 0x0020,
+ INST_STATE_EXE_DONE,
+};
+
+struct mfc_inst_ctx;
+
+struct codec_operations {
+ /* initialization routines */
+ int (*alloc_ctx_buf) (struct mfc_inst_ctx *ctx);
+ int (*alloc_desc_buf) (struct mfc_inst_ctx *ctx);
+ int (*get_init_arg) (struct mfc_inst_ctx *ctx, void *arg);
+ int (*pre_seq_start) (struct mfc_inst_ctx *ctx);
+ int (*post_seq_start) (struct mfc_inst_ctx *ctx);
+ int (*set_init_arg) (struct mfc_inst_ctx *ctx, void *arg);
+ int (*set_codec_bufs) (struct mfc_inst_ctx *ctx);
+ int (*set_dpbs) (struct mfc_inst_ctx *ctx); /* decoder */
+ /* execution routines */
+ int (*get_exe_arg) (struct mfc_inst_ctx *ctx, void *arg);
+ int (*pre_frame_start) (struct mfc_inst_ctx *ctx);
+ int (*post_frame_start) (struct mfc_inst_ctx *ctx);
+ int (*multi_data_frame) (struct mfc_inst_ctx *ctx);
+ int (*set_exe_arg) (struct mfc_inst_ctx *ctx, void *arg);
+ /* configuration routines */
+ int (*get_codec_cfg) (struct mfc_inst_ctx *ctx, int type, void *arg);
+ int (*set_codec_cfg) (struct mfc_inst_ctx *ctx, int type, void *arg);
+};
+
+struct mfc_pre_cfg {
+ struct list_head list;
+ unsigned int type;
+ unsigned int values[4];
+};
+
+struct mfc_dec_cfg {
+ unsigned int crc;
+ unsigned int pixelcache;
+ unsigned int slice;
+ unsigned int numextradpb;
+
+ unsigned int postfilter; /* MPEG4 */
+ unsigned int dispdelay_en; /* H.264 */
+ unsigned int dispdelay_val; /* H.264 */
+ unsigned int width; /* FIMV1 */
+ unsigned int height; /* FIMV1 */
+};
+
+struct mfc_enc_cfg {
+ /*
+ type:
+ init
+ runtime
+ init + runtime
+ */
+
+ /* init */
+ unsigned int pixelcache;
+
+ unsigned int frameskip;
+ unsigned int frammode;
+ unsigned int hier_p;
+
+ /* runtime ? */
+ #if 0
+ unsigned int frametype;
+ unsigned int framerate;
+ unsigned int bitrate;
+ unsigned int vui; /* H.264 */
+ unsigned int hec; /* MPEG4 */
+ unsigned int seqhdrctrl;
+
+ unsigned int i_period;
+ #endif
+};
+
+enum mfc_resolution_status {
+ RES_INCREASED = 1,
+ RES_DECERASED = 2,
+};
+
+enum mfc_resolution_change_status {
+ RES_NO_CHANGE = 0,
+ RES_SET_CHANGE = 1,
+ RES_SET_REALLOC = 2,
+ RES_WAIT_FRAME_DONE = 3,
+};
+
+struct mfc_inst_ctx {
+ int id; /* assigned by driver */
+ int cmd_id; /* assigned by F/W */
+ int codecid;
+ unsigned int type;
+ enum instance_state state;
+ unsigned int width;
+ unsigned int height;
+ volatile unsigned char *shm;
+ unsigned int shmofs;
+ unsigned int ctxbufofs;
+ unsigned int ctxbufsize;
+ unsigned int descbufofs; /* FIXME: move to decoder context */
+ unsigned int descbufsize; /* FIXME: move to decoder context */
+ unsigned long userbase;
+ SSBIP_MFC_BUFFER_TYPE buf_cache_type;
+
+ int resolution_status;
+ /*
+ struct mfc_dec_cfg deccfg;
+ struct mfc_enc_cfg enccfg;
+ */
+ struct list_head presetcfgs;
+
+ void *c_priv;
+ struct codec_operations *c_ops;
+ struct mfc_dev *dev;
+#ifdef SYSMMU_MFC_ON
+ unsigned long pgd;
+#endif
+#if defined(CONFIG_BUSFREQ)
+ int busfreq_flag; /* context bus frequency flag */
+#endif
+
+#if defined(CONFIG_CPU_EXYNOS4210) && defined(CONFIG_EXYNOS4_CPUFREQ)
+ int cpufreq_flag; /* context CPU frequency flag*/
+#endif
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ int drm_flag;
+#endif
+
+#ifdef CONFIG_BUSFREQ_OPP
+ int dmcthreshold_flag; /* context dmc max threshold flag */
+#endif
+
+#if SUPPORT_SLICE_ENCODING
+ int slice_flag;
+#endif
+};
+
+struct mfc_inst_ctx *mfc_create_inst(void);
+void mfc_destroy_inst(struct mfc_inst_ctx *ctx);
+int mfc_set_inst_state(struct mfc_inst_ctx *ctx, enum instance_state state);
+int mfc_chk_inst_state(struct mfc_inst_ctx *ctx, enum instance_state state);
+int mfc_set_inst_cfg(struct mfc_inst_ctx *ctx, int type, void *arg);
+int mfc_get_inst_cfg(struct mfc_inst_ctx *ctx, int type, void *arg);
+
+#endif /* __MFC_INST_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_interface.h b/drivers/media/video/samsung/mfc5x/mfc_interface.h
new file mode 100644
index 0000000..61116e5
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_interface.h
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Global header for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Alternatively, Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MFC_INTERFACE_H
+#define __MFC_INTERFACE_H __FILE__
+
+#include "mfc_errno.h"
+#include "SsbSipMfcApi.h"
+
+#define IOCTL_MFC_DEC_INIT (0x00800001)
+#define IOCTL_MFC_ENC_INIT (0x00800002)
+#define IOCTL_MFC_DEC_EXE (0x00800003)
+#define IOCTL_MFC_ENC_EXE (0x00800004)
+
+#define IOCTL_MFC_GET_IN_BUF (0x00800010)
+#define IOCTL_MFC_FREE_BUF (0x00800011)
+#define IOCTL_MFC_GET_REAL_ADDR (0x00800012)
+#define IOCTL_MFC_GET_MMAP_SIZE (0x00800014)
+#define IOCTL_MFC_SET_IN_BUF (0x00800018)
+
+#define IOCTL_MFC_SET_CONFIG (0x00800101)
+#define IOCTL_MFC_GET_CONFIG (0x00800102)
+
+#define IOCTL_MFC_SET_BUF_CACHE (0x00800201)
+
+/* MFC H/W support maximum 32 extra DPB. */
+#define MFC_MAX_EXTRA_DPB 5
+#define MFC_MAX_DISP_DELAY 0xF
+
+#define MFC_LIB_VER_MAJOR 1
+#define MFC_LIB_VER_MINOR 00
+
+#define BUF_L_UNIT (1024)
+#define Align(x, alignbyte) (((x)+(alignbyte)-1)/(alignbyte)*(alignbyte))
+
+
+enum inst_type {
+ DECODER = 0x1,
+ ENCODER = 0x2,
+};
+
+typedef enum {
+ MFC_UNPACKED_PB = 0,
+ MFC_PACKED_PB = 1
+} mfc_packed_mode;
+
+
+typedef enum
+{
+ MFC_USE_NONE = 0x00,
+ MFC_USE_YUV_BUFF = 0x01,
+ MFC_USE_STRM_BUFF = 0x10
+} s3c_mfc_interbuff_status;
+
+#ifndef FPS
+typedef struct
+{
+ int luma0; /* per frame (or top field)*/
+ int chroma0; /* per frame (or top field)*/
+ int luma1; /* per frame (or bottom field)*/
+ int chroma1; /* per frame (or bottom field)*/
+} SSBSIP_MFC_CRC_DATA;
+#endif
+
+struct mfc_strm_ref_buf_arg {
+ unsigned int strm_ref_y;
+ unsigned int mv_ref_yc;
+};
+
+struct mfc_frame_buf_arg {
+ unsigned int luma;
+ unsigned int chroma;
+};
+
+
+struct mfc_enc_init_common_arg {
+ SSBSIP_MFC_CODEC_TYPE in_codec_type; /* [IN] codec type */
+
+ int in_width; /* [IN] width of YUV420 frame to be encoded */
+ int in_height; /* [IN] height of YUV420 frame to be encoded */
+
+ int in_gop_num; /* [IN] GOP Number (interval of I-frame) */
+ int in_vop_quant; /* [IN] VOP quant */
+ int in_vop_quant_p; /* [IN] VOP quant for P frame */
+
+ /* [IN] RC enable */
+ /* [IN] RC enable (0:disable, 1:frame level RC) */
+ int in_rc_fr_en;
+ int in_rc_bitrate; /* [IN] RC parameter (bitrate in kbps) */
+
+ int in_rc_qbound_min; /* [IN] RC parameter (Q bound Min) */
+ int in_rc_qbound_max; /* [IN] RC parameter (Q bound Max) */
+ int in_rc_rpara; /* [IN] RC parameter (Reaction Coefficient) */
+
+ /* [IN] Multi-slice mode (0:single, 1:multiple) */
+ int in_ms_mode;
+ /* [IN] Multi-slice size (in num. of mb or byte) */
+ int in_ms_arg;
+
+ int in_mb_refresh; /* [IN] Macroblock refresh */
+
+ /* [IN] Enable (1) / Disable (0) padding with the specified values */
+ int in_pad_ctrl_on;
+
+ /* [IN] pad value if pad_ctrl_on is Enable */
+ int in_y_pad_val;
+ int in_cb_pad_val;
+ int in_cr_pad_val;
+
+ /* linear or tiled */
+ int in_frame_map;
+
+ unsigned int in_pixelcache;
+ unsigned int in_mapped_addr;
+
+ struct mfc_strm_ref_buf_arg out_u_addr;
+ struct mfc_strm_ref_buf_arg out_p_addr;
+ struct mfc_strm_ref_buf_arg out_buf_size;
+ unsigned int out_header_size;
+
+#if SUPPORT_SLICE_ENCODING
+ unsigned int in_output_mode;
+#endif
+};
+
+struct mfc_enc_init_h263_arg {
+ int in_rc_framerate; /* [IN] RC parameter (framerate) */
+};
+
+struct mfc_enc_init_mpeg4_arg {
+ int in_profile; /* [IN] profile */
+ int in_level; /* [IN] level */
+
+ int in_vop_quant_b; /* [IN] VOP quant for B frame */
+
+ /* [IN] B frame number */
+ int in_bframenum;
+
+ /* [IN] Quarter-pel MC enable (1:enabled, 0:disabled) */
+ int in_quart_pixel;
+
+ int in_TimeIncreamentRes; /* [IN] VOP time resolution */
+ int in_VopTimeIncreament; /* [IN] Frame delta */
+};
+
+struct mfc_enc_init_h264_arg {
+ int in_profile; /* [IN] profile */
+ int in_level; /* [IN] level */
+
+ int in_vop_quant_b; /* [IN] VOP quant for B frame */
+
+ /* [IN] B frame number */
+ int in_bframenum;
+
+ /* [IN] interlace mode(0:progressive, 1:interlace) */
+ int in_interlace_mode;
+
+ /* [IN] reference number */
+ int in_reference_num;
+ /* [IN] reference number of P frame */
+ int in_ref_num_p;
+
+ int in_rc_framerate; /* [IN] RC parameter (framerate) */
+ int in_rc_mb_en; /* [IN] RC enable (0:disable, 1:MB level RC) */
+ /* [IN] MB level rate control dark region adaptive feature */
+ int in_rc_mb_dark_dis; /* (0:enable, 1:disable) */
+ /* [IN] MB level rate control smooth region adaptive feature */
+ int in_rc_mb_smooth_dis; /* (0:enable, 1:disable) */
+ /* [IN] MB level rate control static region adaptive feature */
+ int in_rc_mb_static_dis; /* (0:enable, 1:disable) */
+ /* [IN] MB level rate control activity region adaptive feature */
+ int in_rc_mb_activity_dis; /* (0:enable, 1:disable) */
+
+ /* [IN] disable deblocking filter idc */
+ int in_deblock_dis; /* (0: enable,1: disable, 2:Disable at slice boundary) */
+ /* [IN] slice alpha c0 offset of deblocking filter */
+ int in_deblock_alpha_c0;
+ /* [IN] slice beta offset of deblocking filter */
+ int in_deblock_beta;
+
+ /* [IN] ( 0 : CAVLC, 1 : CABAC ) */
+ int in_symbolmode;
+ /* [IN] (0: only 4x4 transform, 1: allow using 8x8 transform) */
+ int in_transform8x8_mode;
+
+ /* [IN] Inter weighted parameter for mode decision */
+ int in_md_interweight_pps;
+ /* [IN] Intra weighted parameter for mode decision */
+ int in_md_intraweight_pps;
+};
+
+struct mfc_enc_init_arg {
+ struct mfc_enc_init_common_arg cmn;
+ union {
+ struct mfc_enc_init_h264_arg h264;
+ struct mfc_enc_init_mpeg4_arg mpeg4;
+ struct mfc_enc_init_h263_arg h263;
+ } codec;
+};
+
+struct mfc_enc_exe_arg {
+ SSBSIP_MFC_CODEC_TYPE in_codec_type; /* [IN] codec type */
+ unsigned int in_Y_addr; /*[IN]In-buffer addr of Y component */
+ unsigned int in_CbCr_addr;/*[IN]In-buffer addr of CbCr component */
+ unsigned int in_Y_addr_vir; /*[IN]In-buffer addr of Y component */
+ unsigned int in_CbCr_addr_vir;/*[IN]In-buffer addr of CbCr component */
+ unsigned int in_strm_st; /*[IN]Out-buffer start addr of encoded strm*/
+ unsigned int in_strm_end; /*[IN]Out-buffer end addr of encoded strm */
+ unsigned int in_frametag; /* [IN] unique frame ID */
+
+ unsigned int out_frame_type; /* [OUT] frame type */
+ int out_encoded_size; /* [OUT] Length of Encoded video stream */
+ unsigned int out_Y_addr; /*[OUT]Out-buffer addr of encoded Y component */
+ unsigned int out_CbCr_addr; /*[OUT]Out-buffer addr of encoded CbCr component */
+ unsigned int out_frametag_top; /* [OUT] unique frame ID of an output frame or top field */
+ unsigned int out_frametag_bottom;/* [OUT] unique frame ID of bottom field */
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ unsigned int out_y_secure_id;
+ unsigned int out_c_secure_id;
+#elif defined(CONFIG_S5P_VMEM)
+ unsigned int out_y_cookie;
+ unsigned int out_c_cookie;
+#endif
+};
+
+struct mfc_dec_init_arg {
+ SSBSIP_MFC_CODEC_TYPE in_codec_type; /* [IN] codec type */
+ int in_strm_buf; /* [IN] address of stream buffer */
+ int in_strm_size; /* [IN] filled size in stream buffer */
+ int in_packed_PB; /* [IN] Is packed PB frame or not, 1: packedPB 0: unpacked */
+
+ unsigned int in_crc; /* [IN] */
+ unsigned int in_pixelcache; /* [IN] */
+ unsigned int in_slice; /* [IN] */
+ unsigned int in_numextradpb; /* [IN] */
+
+ unsigned int in_mapped_addr;
+
+ int out_frm_width; /* [OUT] width of YUV420 frame */
+ int out_frm_height; /* [OUT] height of YUV420 frame */
+ int out_buf_width; /* [OUT] width of YUV420 frame */
+ int out_buf_height; /* [OUT] height of YUV420 frame */
+
+ int out_dpb_cnt; /* [OUT] the number of buffers which is nessary during decoding. */
+
+ int out_crop_right_offset; /* [OUT] crop information for h264 */
+ int out_crop_left_offset;
+ int out_crop_bottom_offset;
+ int out_crop_top_offset;
+};
+
+struct mfc_dec_exe_arg {
+ SSBSIP_MFC_CODEC_TYPE in_codec_type; /* [IN] codec type */
+ int in_strm_buf; /* [IN] the physical address of STRM_BUF */
+ /* [IN] Size of video stream filled in STRM_BUF */
+ int in_strm_size;
+ /* [IN] the address of dpb FRAME_BUF */
+ struct mfc_frame_buf_arg in_frm_buf;
+ /* [IN] size of dpb FRAME_BUF */
+ struct mfc_frame_buf_arg in_frm_size;
+ /* [IN] Unique frame ID eg. application specific timestamp */
+ unsigned int in_frametag;
+ /* [IN] immdiate Display for seek,thumbnail and one frame */
+ int in_immediately_disp;
+ /* [OUT] the physical address of display buf */
+ int out_display_Y_addr;
+ /* [OUT] the physical address of display buf */
+ int out_display_C_addr;
+ int out_display_status;
+ /* [OUT] unique frame ID of an output frame or top field */
+ unsigned int out_frametag_top;
+ /* [OUT] unique frame ID of bottom field */
+ unsigned int out_frametag_bottom;
+ int out_pic_time_top;
+ int out_pic_time_bottom;
+ int out_consumed_byte;
+
+ int out_crop_right_offset;
+ int out_crop_left_offset;
+ int out_crop_bottom_offset;
+ int out_crop_top_offset;
+
+ /* in new driver, each buffer offset must be return to the user */
+ int out_y_offset;
+ int out_c_offset;
+
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ unsigned int out_y_secure_id;
+ unsigned int out_c_secure_id;
+#elif defined(CONFIG_S5P_VMEM)
+ unsigned int out_y_cookie;
+ unsigned int out_c_cookie;
+#endif
+ int out_img_width; /* [OUT] width of YUV420 frame */
+ int out_img_height; /* [OUT] height of YUV420 frame */
+ int out_buf_width; /* [OUT] width of YUV420 frame */
+ int out_buf_height; /* [OUT] height of YUV420 frame */
+
+ int out_disp_pic_frame_type; /* [OUT] display picture frame type information */
+};
+
+struct mfc_basic_config {
+ int values[4];
+};
+
+struct mfc_frame_packing {
+ int available;
+ unsigned int arrangement_id;
+ int arrangement_cancel_flag;
+ unsigned char arrangement_type;
+ int quincunx_sampling_flag;
+ unsigned char content_interpretation_type;
+ int spatial_flipping_flag;
+ int frame0_flipped_flag;
+ int field_views_flag;
+ int current_frame_is_frame0_flag;
+ unsigned char frame0_grid_pos_x;
+ unsigned char frame0_grid_pos_y;
+ unsigned char frame1_grid_pos_x;
+ unsigned char frame1_grid_pos_y;
+};
+
+union _mfc_config_arg {
+ struct mfc_basic_config basic;
+ struct mfc_frame_packing frame_packing;
+};
+
+struct mfc_config_arg {
+ int type;
+ union _mfc_config_arg args;
+};
+
+struct mfc_get_real_addr_arg {
+ unsigned int key;
+ unsigned int addr;
+};
+
+struct mfc_buf_alloc_arg {
+ enum inst_type type;
+ int size;
+ /*
+ unsigned int mapped;
+ */
+ unsigned int align;
+
+ unsigned int addr;
+ /*
+ unsigned int phys;
+ */
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ /* FIMXE: invalid secure id == -1 */
+ unsigned int secure_id;
+#elif defined(CONFIG_S5P_VMEM)
+ unsigned int cookie;
+#else
+ unsigned int offset;
+#endif
+};
+
+struct mfc_buf_free_arg {
+ unsigned int addr;
+};
+
+
+/* RMVME */
+struct mfc_mem_alloc_arg {
+ enum inst_type type;
+ int buff_size;
+ SSBIP_MFC_BUFFER_TYPE buf_cache_type;
+ unsigned int mapped_addr;
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ unsigned int secure_id;
+#elif defined(CONFIG_S5P_VMEM)
+ unsigned int cookie;
+#else
+ unsigned int offset;
+#endif
+};
+
+struct mfc_mem_free_arg {
+ unsigned int key;
+};
+/* RMVME */
+
+union mfc_args {
+ /*
+ struct mfc_enc_init_arg enc_init;
+
+ struct mfc_enc_init_mpeg4_arg enc_init_mpeg4;
+ struct mfc_enc_init_mpeg4_arg enc_init_h263;
+ struct mfc_enc_init_h264_arg enc_init_h264;
+ */
+ struct mfc_enc_init_arg enc_init;
+ struct mfc_enc_exe_arg enc_exe;
+
+ struct mfc_dec_init_arg dec_init;
+ struct mfc_dec_exe_arg dec_exe;
+
+ struct mfc_config_arg config;
+
+ struct mfc_buf_alloc_arg buf_alloc;
+ struct mfc_buf_free_arg buf_free;
+ struct mfc_get_real_addr_arg real_addr;
+
+ /* RMVME */
+ struct mfc_mem_alloc_arg mem_alloc;
+ struct mfc_mem_free_arg mem_free;
+ /* RMVME */
+};
+
+struct mfc_common_args {
+ enum mfc_ret_code ret_code; /* [OUT] error code */
+ union mfc_args args;
+};
+
+struct mfc_enc_vui_info {
+ int aspect_ratio_idc;
+};
+
+struct mfc_dec_fimv1_info {
+ int width;
+ int height;
+};
+
+struct mfc_enc_hier_p_qp {
+ int t0_frame_qp;
+ int t2_frame_qp;
+ int t3_frame_qp;
+};
+
+struct mfc_enc_set_config {
+ int enable;
+ int number;
+};
+
+typedef struct
+{
+ int magic;
+ int hMFC;
+ int hVMEM;
+ int width;
+ int height;
+ int sizeStrmBuf;
+ struct mfc_frame_buf_arg sizeFrmBuf;
+ int displayStatus;
+ int inter_buff_status;
+ unsigned int virFreeStrmAddr;
+ unsigned int phyStrmBuf;
+ unsigned int virStrmBuf;
+ unsigned int virMvRefYC;
+ struct mfc_frame_buf_arg phyFrmBuf;
+ struct mfc_frame_buf_arg virFrmBuf;
+ unsigned int mapped_addr;
+ unsigned int mapped_size;
+ struct mfc_common_args MfcArg;
+ SSBSIP_MFC_CODEC_TYPE codecType;
+ SSBSIP_MFC_DEC_OUTPUT_INFO decOutInfo;
+ unsigned int inframetag;
+ unsigned int outframetagtop;
+ unsigned int outframetagbottom;
+ unsigned int immediatelydisp;
+ unsigned int encodedHeaderSize;
+ int encodedDataSize;
+ unsigned int encodedframeType;
+ struct mfc_frame_buf_arg encodedphyFrmBuf;
+
+ unsigned int dec_crc;
+ unsigned int dec_pixelcache;
+ unsigned int dec_slice;
+ unsigned int dec_numextradpb;
+ unsigned int dec_packedPB_detect;
+
+ int input_cookie;
+ int input_secure_id;
+ int input_size;
+
+ unsigned int encode_cnt;
+ int enc_frame_map;
+} _MFCLIB;
+
+#define ENC_PROFILE_LEVEL(profile, level) ((profile) | ((level) << 8))
+#define ENC_RC_QBOUND(min_qp, max_qp) ((min_qp) | ((max_qp) << 8))
+
+#endif /* __MFC_INTERFACE_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_log.h b/drivers/media/video/samsung/mfc5x/mfc_log.h
new file mode 100644
index 0000000..872cdeb
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_log.h
@@ -0,0 +1,59 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_log.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Logging interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_LOG_H
+#define __MFC_LOG_H __FILE__
+
+/* debug macros */
+#define MFC_DEBUG(fmt, ...) \
+ do { \
+ printk(KERN_DEBUG \
+ "%s-> " fmt, __func__, ##__VA_ARGS__); \
+ } while(0)
+
+#define MFC_ERROR(fmt, ...) \
+ do { \
+ printk(KERN_INFO \
+ "%s-> " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define MFC_NOTICE(fmt, ...) \
+ do { \
+ printk(KERN_NOTICE \
+ fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define MFC_INFO(fmt, ...) \
+ do { \
+ printk(KERN_INFO \
+ fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define MFC_WARN(fmt, ...) \
+ do { \
+ printk(KERN_WARNING \
+ fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#ifdef CONFIG_VIDEO_MFC5X_DEBUG
+#define mfc_dbg(fmt, ...) MFC_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define mfc_dbg(fmt, ...)
+#endif
+
+#define mfc_err(fmt, ...) MFC_ERROR(fmt, ##__VA_ARGS__)
+#define mfc_notice(fmt, ...) MFC_NOTICE(fmt, ##__VA_ARGS__)
+#define mfc_info(fmt, ...) MFC_INFO(fmt, ##__VA_ARGS__)
+#define mfc_warn(fmt, ...) MFC_WARN(fmt, ##__VA_ARGS__)
+
+#endif /* __MFC_LOG_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_mem.c b/drivers/media/video/samsung/mfc5x/mfc_mem.c
new file mode 100644
index 0000000..bdf7148
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_mem.c
@@ -0,0 +1,944 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_mem.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Memory manager for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_ARCH_EXYNOS4
+#include <mach/media.h>
+#endif
+#include <plat/media.h>
+
+#ifndef CONFIG_S5P_VMEM
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#endif
+
+#ifdef CONFIG_S5P_MEM_CMA
+#include <linux/cma.h>
+#endif
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+#include <plat/s5p-vcm.h>
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_interface_vcm.h"
+#endif
+
+#include "mfc_mem.h"
+#include "mfc_buf.h"
+#include "mfc_log.h"
+#include "mfc_pm.h"
+
+static int mem_ports = -1;
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+static struct mfc_mem mem_infos[MFC_MAX_MEM_CHUNK_NUM];
+#else
+static struct mfc_mem mem_infos[MFC_MAX_MEM_PORT_NUM];
+#endif
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+static struct mfc_vcm vcm_info;
+#endif
+
+static int mfc_mem_addr_port(unsigned long addr)
+{
+ int i;
+ int port = -1;
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ for (i = 0; i < MFC_MAX_MEM_CHUNK_NUM; i++) {
+#else
+ for (i = 0; i < mem_ports; i++) {
+#endif
+ if ((addr >= mem_infos[i].base)
+ && (addr < (mem_infos[i].base + mem_infos[i].size))) {
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ port = 0;
+#else
+ port = i;
+#endif
+ break;
+ }
+ }
+
+ return port;
+}
+
+int mfc_mem_count(void)
+{
+ return mem_ports;
+}
+
+unsigned long mfc_mem_base(int port)
+{
+ if ((port < 0) || (port >= mem_ports))
+ return 0;
+
+ return mem_infos[port].base;
+}
+
+unsigned char *mfc_mem_addr(int port)
+{
+ if ((port < 0) || (port >= mem_ports))
+ return 0;
+
+ return mem_infos[port].addr;
+}
+
+unsigned long mfc_mem_data_base(int port)
+{
+ unsigned long addr;
+
+#ifndef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((port < 0) || (port >= mem_ports))
+ return 0;
+#endif
+ if (port == 0)
+ addr = mem_infos[port].base + MFC_FW_SYSTEM_SIZE;
+ else
+ addr = mem_infos[port].base;
+
+ return addr;
+}
+
+unsigned int mfc_mem_data_size(int port)
+{
+ unsigned int size;
+
+#ifndef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if ((port < 0) || (port >= mem_ports))
+ return 0;
+#endif
+ if (port == 0)
+ size = mem_infos[port].size - MFC_FW_SYSTEM_SIZE;
+ else
+ size = mem_infos[port].size;
+
+ return size;
+}
+
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+unsigned int mfc_mem_hole_size(void)
+{
+ if (mfc_mem_data_size(1))
+ return mfc_mem_data_base(1) -
+ (mfc_mem_data_base(0) + mfc_mem_data_size(0));
+ else
+ return 0;
+}
+#endif
+
+unsigned long mfc_mem_data_ofs(unsigned long addr, int contig)
+{
+ unsigned int offset;
+ int i;
+ int port;
+
+ port = mfc_mem_addr_port(addr);
+ if (port < 0)
+ return 0;
+
+ offset = addr - mfc_mem_data_base(port);
+
+ if (contig) {
+ for (i = 0; i < port; i++)
+ offset += mfc_mem_data_size(i);
+ }
+
+ return offset;
+}
+
+unsigned long mfc_mem_base_ofs(unsigned long addr)
+{
+ int port;
+
+ port = mfc_mem_addr_port(addr);
+ if (port < 0)
+ return 0;
+
+ return addr - mem_infos[port].base;
+}
+
+unsigned long mfc_mem_addr_ofs(unsigned long ofs, int from)
+{
+ if (from >= mem_ports)
+ from = mem_ports - 1;
+
+ return mem_infos[from].base + ofs;
+}
+
+long mfc_mem_ext_ofs(unsigned long addr, unsigned int size, int from)
+{
+ int port;
+ long ofs;
+
+ if (from >= mem_ports)
+ from = mem_ports - 1;
+
+ port = mfc_mem_addr_port(addr);
+ if (port < 0) {
+ mfc_dbg("given address is out of MFC: "
+ "0x%08lx\n", addr);
+ port = from;
+ } else if (port != from) {
+ mfc_warn("given address is in the port#%d [%d]",
+ port, from);
+ port = from;
+ }
+
+ ofs = addr - mem_infos[port].base;
+
+ if ((ofs < 0) || (ofs >= MAX_MEM_OFFSET)) {
+ mfc_err("given address cannot access by MFC: "
+ "0x%08lx\n", addr);
+ ofs = -MAX_MEM_OFFSET;
+ } else if ((ofs + size) > MAX_MEM_OFFSET) {
+ mfc_warn("some part of given address cannot access: "
+ "0x%08lx\n", addr);
+ }
+
+ return ofs;
+}
+
+#ifdef SYSMMU_MFC_ON
+#ifdef CONFIG_S5P_VMEM
+void mfc_mem_cache_clean(const void *start_addr, unsigned long size)
+{
+ s5p_vmem_dmac_map_area(start_addr, size, DMA_TO_DEVICE);
+}
+
+void mfc_mem_cache_inv(const void *start_addr, unsigned long size)
+{
+ s5p_vmem_dmac_map_area(start_addr, size, DMA_FROM_DEVICE);
+}
+#else /* CONFIG_VIDEO_MFC_VCM_UMP or kernel virtual memory allocator */
+void mfc_mem_cache_clean(const void *start_addr, unsigned long size)
+{
+ unsigned long paddr;
+ void *cur_addr, *end_addr;
+
+ dmac_map_area(start_addr, size, DMA_TO_DEVICE);
+
+ cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
+ end_addr = cur_addr + PAGE_ALIGN(size);
+
+ while (cur_addr < end_addr) {
+ paddr = page_to_pfn(vmalloc_to_page(cur_addr));
+ paddr <<= PAGE_SHIFT;
+ if (paddr)
+ outer_clean_range(paddr, paddr + PAGE_SIZE);
+ cur_addr += PAGE_SIZE;
+ }
+
+ /* FIXME: L2 operation optimization */
+ /*
+ unsigned long start, end, unitsize;
+ unsigned long cur_addr, remain;
+
+ dmac_map_area(start_addr, size, DMA_TO_DEVICE);
+
+ cur_addr = (unsigned long)start_addr;
+ remain = size;
+
+ start = page_to_pfn(vmalloc_to_page(cur_addr));
+ start <<= PAGE_SHIFT;
+ if (start & PAGE_MASK) {
+ unitsize = min((start | PAGE_MASK) - start + 1, remain);
+ end = start + unitsize;
+ outer_clean_range(start, end);
+ remain -= unitsize;
+ cur_addr += unitsize;
+ }
+
+ while (remain >= PAGE_SIZE) {
+ start = page_to_pfn(vmalloc_to_page(cur_addr));
+ start <<= PAGE_SHIFT;
+ end = start + PAGE_SIZE;
+ outer_clean_range(start, end);
+ remain -= PAGE_SIZE;
+ cur_addr += PAGE_SIZE;
+ }
+
+ if (remain) {
+ start = page_to_pfn(vmalloc_to_page(cur_addr));
+ start <<= PAGE_SHIFT;
+ end = start + remain;
+ outer_clean_range(start, end);
+ }
+ */
+
+}
+
+void mfc_mem_cache_inv(const void *start_addr, unsigned long size)
+{
+ unsigned long paddr;
+ void *cur_addr, *end_addr;
+
+ cur_addr = (void *)((unsigned long)start_addr & PAGE_MASK);
+ end_addr = cur_addr + PAGE_ALIGN(size);
+
+ while (cur_addr < end_addr) {
+ paddr = page_to_pfn(vmalloc_to_page(cur_addr));
+ paddr <<= PAGE_SHIFT;
+ if (paddr)
+ outer_inv_range(paddr, paddr + PAGE_SIZE);
+ cur_addr += PAGE_SIZE;
+ }
+
+ dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
+
+ /* FIXME: L2 operation optimization */
+ /*
+ unsigned long start, end, unitsize;
+ unsigned long cur_addr, remain;
+
+ cur_addr = (unsigned long)start_addr;
+ remain = size;
+
+ start = page_to_pfn(vmalloc_to_page(cur_addr));
+ start <<= PAGE_SHIFT;
+ if (start & PAGE_MASK) {
+ unitsize = min((start | PAGE_MASK) - start + 1, remain);
+ end = start + unitsize;
+ outer_inv_range(start, end);
+ remain -= unitsize;
+ cur_addr += unitsize;
+ }
+
+ while (remain >= PAGE_SIZE) {
+ start = page_to_pfn(vmalloc_to_page(cur_addr));
+ start <<= PAGE_SHIFT;
+ end = start + PAGE_SIZE;
+ outer_inv_range(start, end);
+ remain -= PAGE_SIZE;
+ cur_addr += PAGE_SIZE;
+ }
+
+ if (remain) {
+ start = page_to_pfn(vmalloc_to_page(cur_addr));
+ start <<= PAGE_SHIFT;
+ end = start + remain;
+ outer_inv_range(start, end);
+ }
+
+ dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
+ */
+}
+#endif /* end of CONFIG_S5P_VMEM */
+#else /* not SYSMMU_MFC_ON */
+ /* early allocator */
+ /* CMA or bootmem(memblock) */
+void mfc_mem_cache_clean(const void *start_addr, unsigned long size)
+{
+ unsigned long paddr;
+
+ dmac_map_area(start_addr, size, DMA_TO_DEVICE);
+ /*
+ * virtual & phsical addrees mapped directly, so we can convert
+ * the address just using offset
+ */
+ paddr = __pa((unsigned long)start_addr);
+ outer_clean_range(paddr, paddr + size);
+
+ /* OPT#1: kernel provide below function */
+ /*
+ dma_map_single(NULL, (void *)start_addr, size, DMA_TO_DEVICE);
+ */
+}
+
+void mfc_mem_cache_inv(const void *start_addr, unsigned long size)
+{
+ unsigned long paddr;
+
+ paddr = __pa((unsigned long)start_addr);
+ outer_inv_range(paddr, paddr + size);
+ dmac_unmap_area(start_addr, size, DMA_FROM_DEVICE);
+
+ /* OPT#1: kernel provide below function */
+ /*
+ dma_unmap_single(NULL, (void *)start_addr, size, DMA_FROM_DEVICE);
+ */
+}
+#endif /* end of SYSMMU_MFC_ON */
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+static void mfc_tlb_invalidate(enum vcm_dev_id id)
+{
+ if (mfc_power_chk()) {
+ /*mfc_clock_on();*/
+
+ s5p_sysmmu_tlb_invalidate(NULL);
+
+ /*mfc_clock_off();*/
+ }
+}
+
+static void mfc_set_pagetable(enum vcm_dev_id id, unsigned long base)
+{
+ if (mfc_power_chk()) {
+ /*mfc_clock_on();*/
+
+ s5p_sysmmu_set_tablebase_pgd(NULL, base);
+
+ /*mfc_clock_off();*/
+ }
+}
+
+const static struct s5p_vcm_driver mfc_vcm_driver = {
+ .tlb_invalidator = &mfc_tlb_invalidate,
+ .pgd_base_specifier = &mfc_set_pagetable,
+ .phys_alloc = NULL,
+ .phys_free = NULL,
+};
+#endif
+
+#define MAX_ALLOCATION 3
+int mfc_init_mem_mgr(struct mfc_dev *dev)
+{
+ int i;
+#if !defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ dma_addr_t base[MAX_ALLOCATION];
+#else
+ /* FIXME: for support user-side allocation. it's temporary solution */
+ struct vcm_res *hole;
+#endif
+#ifndef SYSMMU_MFC_ON
+ size_t size;
+#endif
+#ifdef CONFIG_S5P_MEM_CMA
+ struct cma_info cma_infos[2];
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ size_t bound_size;
+ size_t available_size;
+ size_t hole_size;
+#else
+ int cma_index = 0;
+#endif
+#else
+ unsigned int align_margin;
+#endif
+
+ dev->mem_ports = MFC_MAX_MEM_PORT_NUM;
+ memset(dev->mem_infos, 0, sizeof(dev->mem_infos));
+
+#ifdef SYSMMU_MFC_ON
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ dev->vcm_info.sysmmu_vcm = vcm_create_unified(
+ SZ_256M * dev->mem_ports,
+ VCM_DEV_MFC,
+ &mfc_vcm_driver);
+
+ memcpy(&vcm_info, &dev->vcm_info, sizeof(struct mfc_vcm));
+
+ dev->mem_infos[0].vcm_s = vcm_reserve(dev->vcm_info.sysmmu_vcm,
+ MFC_MEMSIZE_PORT_A, 0);
+
+ if (IS_ERR(dev->mem_infos[0].vcm_s))
+ return PTR_ERR(dev->mem_infos[0].vcm_s);
+
+ dev->mem_infos[0].base = ALIGN(dev->mem_infos[0].vcm_s->start,
+ ALIGN_128KB);
+ align_margin = dev->mem_infos[0].base - dev->mem_infos[0].vcm_s->start;
+ /* FIXME: for offset operation. it's temporary solution */
+ /*
+ dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin;
+ */
+ dev->mem_infos[0].size = SZ_256M - align_margin;
+ dev->mem_infos[0].addr = NULL;
+
+ /* FIXME: for support user-side allocation. it's temporary solution */
+ if (MFC_MEMSIZE_PORT_A < SZ_256M)
+ hole = vcm_reserve(dev->vcm_info.sysmmu_vcm,
+ SZ_256M - MFC_MEMSIZE_PORT_A, 0);
+
+ if (dev->mem_ports == 2) {
+ dev->mem_infos[1].vcm_s = vcm_reserve(dev->vcm_info.sysmmu_vcm,
+ MFC_MEMSIZE_PORT_B, 0);
+
+ if (IS_ERR(dev->mem_infos[1].vcm_s)) {
+ vcm_unreserve(dev->mem_infos[0].vcm_s);
+ return PTR_ERR(dev->mem_infos[1].vcm_s);
+ }
+
+ dev->mem_infos[1].base = ALIGN(dev->mem_infos[1].vcm_s->start,
+ ALIGN_128KB);
+ align_margin = dev->mem_infos[1].base - dev->mem_infos[1].vcm_s->start;
+ dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin;
+ dev->mem_infos[1].addr = NULL;
+ }
+
+ /* FIXME: for support user-side allocation. it's temporary solution */
+ vcm_unreserve(hole);
+
+ dev->fw.vcm_s = mfc_vcm_bind(dev->mem_infos[0].base, MFC_FW_SYSTEM_SIZE);
+ if (IS_ERR(dev->fw.vcm_s))
+ return PTR_ERR(dev->fw.vcm_s);
+
+ dev->fw.vcm_k = mfc_vcm_map(dev->fw.vcm_s->res.phys);
+ if (IS_ERR(dev->fw.vcm_k)) {
+ mfc_vcm_unbind(dev->fw.vcm_s, 0);
+ return PTR_ERR(dev->fw.vcm_k);
+ }
+
+ /* FIXME: it's very tricky! MUST BE FIX */
+ dev->mem_infos[0].addr = (unsigned char *)dev->fw.vcm_k->start;
+#elif defined(CONFIG_S5P_VMEM)
+ base[0] = MFC_FREEBASE;
+
+ dev->mem_infos[0].base = ALIGN(base[0], ALIGN_128KB);
+ align_margin = dev->mem_infos[0].base - base[0];
+ dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin;
+ dev->mem_infos[0].addr = (unsigned char *)dev->mem_infos[0].base;
+
+ if (dev->mem_ports == 2) {
+ base[1] = dev->mem_infos[0].base + dev->mem_infos[0].size;
+ dev->mem_infos[1].base = ALIGN(base[1], ALIGN_128KB);
+ align_margin = dev->mem_infos[1].base - base[1];
+ dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin;
+ dev->mem_infos[1].addr = (unsigned char *)dev->mem_infos[1].base;
+ }
+
+ dev->fw.vmem_cookie = s5p_vmem_vmemmap(MFC_FW_SYSTEM_SIZE,
+ dev->mem_infos[0].base,
+ dev->mem_infos[0].base + MFC_FW_SYSTEM_SIZE);
+
+ if (!dev->fw.vmem_cookie)
+ return -ENOMEM;
+#else /* not CONFIG_VIDEO_MFC_VCM_UMP && not CONFIG_S5P_VMEM */
+ /* kernel virtual memory allocator */
+
+ dev->mem_infos[0].vmalloc_addr = vmalloc(MFC_MEMSIZE_PORT_A);
+ if (dev->mem_infos[0].vmalloc_addr == NULL)
+ return -ENOMEM;
+
+ base[0] = (unsigned long)dev->mem_infos[0].vmalloc_addr;
+ dev->mem_infos[0].base = ALIGN(base[0], ALIGN_128KB);
+ align_margin = dev->mem_infos[0].base - base[0];
+ dev->mem_infos[0].size = MFC_MEMSIZE_PORT_A - align_margin;
+ dev->mem_infos[0].addr = (unsigned char *)dev->mem_infos[0].base;
+
+ if (dev->mem_ports == 2) {
+ dev->mem_infos[1].vmalloc_addr = vmalloc(MFC_MEMSIZE_PORT_B);
+ if (dev->mem_infos[1].vmalloc_addr == NULL) {
+ vfree(dev->mem_infos[0].vmalloc_addr);
+ return -ENOMEM;
+ }
+
+ base[1] = (unsigned long)dev->mem_infos[1].vmalloc_addr;
+ dev->mem_infos[1].base = ALIGN(base[1], ALIGN_128KB);
+ align_margin = dev->mem_infos[1].base - base[1];
+ dev->mem_infos[1].size = MFC_MEMSIZE_PORT_B - align_margin;
+ dev->mem_infos[1].addr = (unsigned char *)dev->mem_infos[1].base;
+ }
+#endif /* end of CONFIG_VIDEO_MFC_VCM_UMP */
+#else /* not SYSMMU_MFC_ON */
+ /* early allocator */
+#if defined(CONFIG_S5P_MEM_CMA)
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ if (cma_info(&cma_infos[0], dev->device, "A")) {
+ mfc_info("failed to get CMA info of 'mfc-secure'\n");
+ return -ENOMEM;
+ }
+
+ if (cma_info(&cma_infos[1], dev->device, "B")) {
+ mfc_info("failed to get CMA info of 'mfc-normal'\n");
+ return -ENOMEM;
+ }
+
+ if (cma_infos[0].lower_bound > cma_infos[1].lower_bound) {
+ mfc_info("'mfc-secure' region must be lower than 'mfc-normal' region\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * available = secure + normal
+ * bound = secure + hole + normal
+ * hole = bound - available
+ */
+ available_size = cma_infos[0].free_size + cma_infos[1].free_size;
+ bound_size = cma_infos[1].upper_bound - cma_infos[0].lower_bound;
+ hole_size = bound_size - available_size;
+ mfc_dbg("avail: 0x%08x, bound: 0x%08x offset: 0x%08x, hole: 0x%08x\n",
+ available_size, bound_size, MAX_MEM_OFFSET, hole_size);
+
+ /* re-assign actually available size */
+ if (bound_size > MAX_MEM_OFFSET) {
+ if (cma_infos[0].free_size > MAX_MEM_OFFSET)
+ /* it will be return error */
+ available_size = MAX_MEM_OFFSET;
+ else if ((cma_infos[0].free_size + hole_size) >= MAX_MEM_OFFSET)
+ /* it will be return error */
+ available_size = cma_infos[0].free_size;
+ else
+ available_size -= (bound_size - MAX_MEM_OFFSET);
+ }
+ mfc_dbg("avail: 0x%08x\n", available_size);
+
+ size = cma_infos[0].free_size;
+ if (size > available_size) {
+ mfc_info("'mfc-secure' region is too large (%d:%d)",
+ size >> 10,
+ MAX_MEM_OFFSET >> 10);
+ return -ENOMEM;
+ }
+
+ base[0] = cma_alloc(dev->device, "A", size, ALIGN_128KB);
+ if (IS_ERR_VALUE(base[0])) {
+ mfc_err("failed to get rsv. memory from CMA on mfc-secure");
+ return -ENOMEM;
+ }
+
+ dev->mem_infos[0].base = base[0];
+ dev->mem_infos[0].size = size;
+ dev->mem_infos[0].addr = cma_get_virt(base[0], size, 0);
+
+ available_size -= dev->mem_infos[0].size;
+ mfc_dbg("avail: 0x%08x\n", available_size);
+
+ size = MFC_MEMSIZE_DRM;
+ if (size > available_size) {
+ mfc_info("failed to allocate DRM shared area (%d:%d)\n",
+ size >> 10, available_size >> 10);
+ return -ENOMEM;
+ }
+
+ base[1] = cma_alloc(dev->device, "B", size, 0);
+ if (IS_ERR_VALUE(base[1])) {
+ mfc_err("failed to get rsv. memory from CMA for DRM on mfc-normal");
+ cma_free(base[0]);
+ return -ENOMEM;
+ }
+
+ dev->drm_info.base = base[1];
+ dev->drm_info.size = size;
+ dev->drm_info.addr = cma_get_virt(base[1], size, 0);
+
+ available_size -= dev->drm_info.size;
+ mfc_dbg("avail: 0x%08x\n", available_size);
+
+ if (available_size > 0) {
+ size = cma_infos[1].free_size - MFC_MEMSIZE_DRM;
+ if (size > available_size) {
+ mfc_warn("<Warning> large hole between reserved memory, "
+ "'mfc-normal' size will be shrink (%d:%d)\n",
+ size >> 10,
+ available_size >> 10);
+ size = available_size;
+ }
+
+ base[2] = cma_alloc(dev->device, "B", size, ALIGN_128KB);
+ if (IS_ERR_VALUE(base[2])) {
+ mfc_err("failed to get rsv. memory from CMA on mfc-normal");
+ cma_free(base[1]);
+ cma_free(base[0]);
+ return -ENOMEM;
+ }
+
+ dev->mem_infos[1].base = base[2];
+ dev->mem_infos[1].size = size;
+ dev->mem_infos[1].addr = cma_get_virt(base[2], size, 0);
+ }
+#else
+ if (dev->mem_ports == 1) {
+ if (cma_info(&cma_infos[0], dev->device, "AB")) {
+ mfc_info("failed to get CMA info of 'mfc'\n");
+ return -ENOMEM;
+ }
+
+ size = cma_infos[0].free_size;
+ if (size > MAX_MEM_OFFSET) {
+ mfc_warn("<Warning> too large 'mfc' reserved memory, "
+ "size will be shrink (%d:%d)\n",
+ size >> 10,
+ MAX_MEM_OFFSET >> 10);
+ size = MAX_MEM_OFFSET;
+ }
+
+ base[0] = cma_alloc(dev->device, "AB", size, ALIGN_128KB);
+ if (IS_ERR_VALUE(base[0])) {
+ mfc_err("failed to get rsv. memory from CMA");
+ return -ENOMEM;
+ }
+
+ dev->mem_infos[0].base = base[0];
+ dev->mem_infos[0].size = size;
+ dev->mem_infos[0].addr = cma_get_virt(base[0], size, 0);
+ } else if (dev->mem_ports == 2) {
+ if (cma_info(&cma_infos[0], dev->device, "A")) {
+ mfc_info("failed to get CMA info of 'mfc0'\n");
+ return -ENOMEM;
+ }
+
+ if (cma_info(&cma_infos[1], dev->device, "B")) {
+ mfc_info("failed to get CMA info of 'mfc1'\n");
+ return -ENOMEM;
+ }
+
+ if (cma_infos[0].lower_bound > cma_infos[1].lower_bound)
+ cma_index = 1;
+
+ size = cma_infos[cma_index].free_size;
+ if (size > MAX_MEM_OFFSET) {
+ mfc_warn("<Warning> too large 'mfc%d' reserved memory, "
+ "size will be shrink (%d:%d)\n",
+ cma_index, size >> 10,
+ MAX_MEM_OFFSET >> 10);
+ size = MAX_MEM_OFFSET;
+ }
+
+ base[0] = cma_alloc(dev->device, cma_index ? "B" : "A", size, ALIGN_128KB);
+ if (IS_ERR_VALUE(base[0])) {
+ mfc_err("failed to get rsv. memory from CMA on port #0");
+ return -ENOMEM;
+ }
+
+ dev->mem_infos[0].base = base[0];
+ dev->mem_infos[0].size = size;
+ dev->mem_infos[0].addr = cma_get_virt(base[0], size, 0);
+
+ /* swap CMA index */
+ cma_index = !cma_index;
+
+ size = cma_infos[cma_index].free_size;
+ if (size > MAX_MEM_OFFSET) {
+ mfc_warn("<Warning> too large 'mfc%d' reserved memory, "
+ "size will be shrink (%d:%d)\n",
+ cma_index, size >> 10,
+ MAX_MEM_OFFSET >> 10);
+ size = MAX_MEM_OFFSET;
+ }
+
+ base[1] = cma_alloc(dev->device, cma_index ? "B" : "A", size, ALIGN_128KB);
+ if (IS_ERR_VALUE(base[1])) {
+ mfc_err("failed to get rsv. memory from CMA on port #1");
+ cma_free(base[0]);
+ return -ENOMEM;
+ }
+
+ dev->mem_infos[1].base = base[1];
+ dev->mem_infos[1].size = size;
+ dev->mem_infos[1].addr = cma_get_virt(base[1], size, 0);
+ } else {
+ mfc_err("failed to get reserved memory from CMA");
+ return -EPERM;
+ }
+#endif
+#elif defined(CONFIG_S5P_MEM_BOOTMEM)
+ for (i = 0; i < dev->mem_ports; i++) {
+#ifdef CONFIG_ARCH_EXYNOS4
+ base[i] = s5p_get_media_memory_bank(S5P_MDEV_MFC, i);
+#else
+ base[i] = s3c_get_media_memory_bank(S3C_MDEV_MFC, i);
+#endif
+ if (base[i] == 0) {
+ mfc_err("failed to get rsv. memory from bootmem on port #%d", i);
+ return -EPERM;
+ }
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ size = s5p_get_media_memsize_bank(S5P_MDEV_MFC, i);
+#else
+ size = s3c_get_media_memsize_bank(S3C_MDEV_MFC, i);
+#endif
+ if (size == 0) {
+ mfc_err("failed to get rsv. size from bootmem on port #%d", i);
+ return -EPERM;
+ }
+
+ dev->mem_infos[i].base = ALIGN(base[i], ALIGN_128KB);
+ align_margin = dev->mem_infos[i].base - base[i];
+ dev->mem_infos[i].size = size - align_margin;
+ /* kernel direct mapped memory address */
+ dev->mem_infos[i].addr = phys_to_virt(dev->mem_infos[i].base);
+ }
+#else
+ mfc_err("failed to find valid memory allocator for MFC");
+ return -EPERM;
+#endif /* end of CONFIG_S5P_MEM_CMA */
+#endif /* end of SYSMMU_MFC_ON */
+
+ mem_ports = dev->mem_ports;
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ for (i = 0; i < MFC_MAX_MEM_CHUNK_NUM; i++)
+ memcpy(&mem_infos[i], &dev->mem_infos[i], sizeof(struct mfc_mem));
+#else
+ for (i = 0; i < mem_ports; i++)
+ memcpy(&mem_infos[i], &dev->mem_infos[i], sizeof(struct mfc_mem));
+#endif
+ return 0;
+}
+
+void mfc_final_mem_mgr(struct mfc_dev *dev)
+{
+#ifdef SYSMMU_MFC_ON
+#if defined(CONFIG_VIDEO_MFC_VCM_UMP)
+ vcm_unreserve(dev->mem_infos[0].vcm_s);
+ if (dev->mem_ports == 2)
+ vcm_unreserve(dev->mem_infos[1].vcm_s);
+
+ vcm_destroy(dev->vcm_info.sysmmu_vcm);
+#elif defined(CONFIG_S5P_VMEM)
+ s5p_vfree(dev->fw.vmem_cookie);
+#else
+ vfree(dev->mem_infos[0].vmalloc_addr);
+ if (dev->mem_ports == 2)
+ vfree(dev->mem_infos[1].vmalloc_addr);
+#endif /* CONFIG_VIDEO_MFC_VCM_UMP */
+#else
+ /* no action */
+#endif /* SYSMMU_MFC_ON */
+}
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+void mfc_vcm_dump_res(struct vcm_res *res)
+{
+ mfc_dbg("vcm_res -\n");
+ mfc_dbg("\tstart: 0x%08x, res_size : 0x%08x\n", (unsigned int)res->start, (unsigned int)res->res_size);
+ mfc_dbg("\tphys : 0x%08x, bound_size: 0x%08x\n", (unsigned int)res->phys, (unsigned int)res->bound_size);
+}
+
+struct vcm_mmu_res *mfc_vcm_bind(unsigned long addr, unsigned int size)
+{
+ struct vcm_mmu_res *s_res;
+ struct vcm_phys *phys;
+ int ret;
+
+ int i;
+
+ s_res = kzalloc(sizeof(struct vcm_mmu_res), GFP_KERNEL);
+ if (unlikely(s_res == NULL)) {
+ mfc_err("no more kernel memory");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ s_res->res.start = addr;
+ s_res->res.res_size = size;
+ s_res->res.vcm = vcm_info.sysmmu_vcm;
+ INIT_LIST_HEAD(&s_res->bound);
+
+ phys = vcm_alloc(vcm_info.sysmmu_vcm, size, 0);
+ if (IS_ERR(phys))
+ return ERR_PTR(PTR_ERR(phys));
+
+ mfc_dbg("phys->size: 0x%08x\n", phys->size);
+ for (i = 0; i < phys->count; i++)
+ mfc_dbg("start 0x%08x, size: 0x%08x\n",
+ (unsigned int)phys->parts[i].start,
+ (unsigned int)phys->parts[i].size);
+
+ ret = vcm_bind(&s_res->res, phys);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mfc_vcm_dump_res(&s_res->res);
+
+ return s_res;
+}
+
+void mfc_vcm_unbind(struct vcm_mmu_res *s_res, int flag)
+{
+ struct vcm_phys *phys;
+
+ phys = vcm_unbind(&s_res->res);
+
+ /* Flag means...
+ * 0 : allocated by MFC
+ * 1 : allocated by other IP */
+ if (flag == 0)
+ vcm_free(phys);
+
+ kfree(s_res);
+}
+
+struct vcm_res *mfc_vcm_map(struct vcm_phys *phys)
+{
+ struct vcm_res *res;
+
+ res = vcm_map(vcm_vmm, phys, 0);
+
+ mfc_vcm_dump_res(res);
+
+ return res;
+}
+
+void mfc_vcm_unmap(struct vcm_res *res)
+{
+ vcm_unmap(res);
+}
+
+void *mfc_ump_map(struct vcm_phys *phys, unsigned long vcminfo)
+{
+ struct vcm_phys_part *part = phys->parts;
+ int num_blocks = phys->count;
+ ump_dd_physical_block *blocks;
+ ump_dd_handle handle;
+ int i;
+
+ blocks = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * num_blocks);
+
+ for(i = 0; i < num_blocks; i++) {
+ blocks[i].addr = part->start;
+ blocks[i].size = part->size;
+ ++part;
+
+ mfc_dbg("\tblock 0x%08lx, size: 0x%08lx\n", blocks[i].addr, blocks[i].size);
+ }
+
+ handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
+ /*
+ ump_dd_reference_add(handle);
+ */
+
+ vfree(blocks);
+
+ if (handle == UMP_DD_HANDLE_INVALID)
+ return ERR_PTR(-ENOMEM);
+
+ if (ump_dd_meminfo_set(handle, (void*)vcminfo) != UMP_DD_SUCCESS)
+ return ERR_PTR(-ENOMEM);
+
+ return (void *)handle;
+}
+
+void mfc_ump_unmap(void *handle)
+{
+ ump_dd_reference_release(handle);
+}
+
+unsigned int mfc_ump_get_id(void *handle)
+{
+ return ump_dd_secure_id_get(handle);
+}
+
+unsigned long mfc_ump_get_virt(unsigned int secure_id)
+{
+ struct vcm_res *res = (struct vcm_res *)
+ ump_dd_meminfo_get(secure_id, (void*)VCM_DEV_MFC);
+
+ if (res) {
+ return res->start;
+ } else {
+ mfc_err("failed to get device virtual, id: %d",
+ (unsigned int)secure_id);
+
+ return 0;
+ }
+}
+#endif /* CONFIG_VIDEO_MFC_VCM_UMP */
+
diff --git a/drivers/media/video/samsung/mfc5x/mfc_mem.h b/drivers/media/video/samsung/mfc5x/mfc_mem.h
new file mode 100644
index 0000000..bc1bc6d
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_mem.h
@@ -0,0 +1,76 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_mem.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Memory manager for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_MEM_H_
+#define __MFC_MEM_H_ __FILE__
+
+#include "mfc.h"
+#include "mfc_dev.h"
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+#include <plat/s5p-vcm.h>
+#endif
+
+#ifdef CONFIG_S5P_VMEM
+#include <linux/dma-mapping.h>
+
+extern unsigned int s5p_vmem_vmemmap(size_t size, unsigned long va_start,
+ unsigned long va_end);
+extern void s5p_vfree(unsigned int cookie);
+extern unsigned int s5p_getcookie(void *addr);
+extern void *s5p_getaddress(unsigned int cookie);
+extern void s5p_vmem_dmac_map_area(const void *start_addr,
+ unsigned long size, int dir);
+#endif
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+struct vcm_res;
+
+struct vcm_mmu_res {
+ struct vcm_res res;
+ struct list_head bound;
+};
+#endif
+
+int mfc_mem_count(void);
+unsigned long mfc_mem_base(int port);
+unsigned char *mfc_mem_addr(int port);
+unsigned long mfc_mem_data_base(int port);
+unsigned int mfc_mem_data_size(int port);
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+unsigned int mfc_mem_hole_size(void);
+#endif
+unsigned long mfc_mem_data_ofs(unsigned long addr, int contig);
+unsigned long mfc_mem_base_ofs(unsigned long addr);
+unsigned long mfc_mem_addr_ofs(unsigned long ofs, int from);
+long mfc_mem_ext_ofs(unsigned long addr, unsigned int size, int from);
+
+void mfc_mem_cache_clean(const void *start_addr, unsigned long size);
+void mfc_mem_cache_inv(const void *start_addr, unsigned long size);
+
+int mfc_init_mem_mgr(struct mfc_dev *dev);
+void mfc_final_mem_mgr(struct mfc_dev *dev);
+
+#ifdef CONFIG_VIDEO_MFC_VCM_UMP
+void mfc_vcm_dump_res(struct vcm_res *res);
+struct vcm_mmu_res *mfc_vcm_bind(unsigned int addr, unsigned int size);
+void mfc_vcm_unbind(struct vcm_mmu_res *s_res, int flag);
+struct vcm_res *mfc_vcm_map(struct vcm_phys *phys);
+void mfc_vcm_unmap(struct vcm_res *res);
+void *mfc_ump_map(struct vcm_phys *phys, unsigned long vcminfo);
+void mfc_ump_unmap(void *handle);
+unsigned int mfc_ump_get_id(void *handle);
+unsigned long mfc_ump_get_virt(unsigned int secure_id);
+#endif
+
+#endif /* __MFC_MEM_H_ */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_pm.c b/drivers/media/video/samsung/mfc5x/mfc_pm.c
new file mode 100644
index 0000000..6f40437
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_pm.c
@@ -0,0 +1,198 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_pm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Power management module for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#ifdef CONFIG_PM_RUNTIME
+#include <linux/pm_runtime.h>
+#endif
+#include <linux/interrupt.h>
+
+#include <plat/clock.h>
+#include <plat/s5p-mfc.h>
+#include <plat/cpu.h>
+
+#include <mach/regs-pmu.h>
+
+#include <asm/io.h>
+
+#include "mfc_dev.h"
+#include "mfc_log.h"
+
+#define MFC_PARENT_CLK_NAME "mout_mfc0"
+#define MFC_CLKNAME "sclk_mfc"
+#define MFC_GATE_CLK_NAME "mfc"
+
+#undef CLK_DEBUG
+
+static struct mfc_pm *pm;
+
+#ifdef CLK_DEBUG
+atomic_t clk_ref;
+#endif
+
+int mfc_init_pm(struct mfc_dev *mfcdev)
+{
+ struct clk *parent, *sclk;
+ int ret = 0;
+
+ pm = &mfcdev->pm;
+
+ parent = clk_get(mfcdev->device, MFC_PARENT_CLK_NAME);
+ if (IS_ERR(parent)) {
+ printk(KERN_ERR "failed to get parent clock\n");
+ ret = -ENOENT;
+ goto err_gp_clk;
+ }
+
+ sclk = clk_get(mfcdev->device, MFC_CLKNAME);
+ if (IS_ERR(sclk)) {
+ printk(KERN_ERR "failed to get source clock\n");
+ ret = -ENOENT;
+ goto err_gs_clk;
+ }
+
+ ret = clk_set_parent(sclk, parent);
+ if (ret) {
+ printk(KERN_ERR "unable to set parent %s of clock %s\n",
+ parent->name, sclk->name);
+ goto err_sp_clk;
+ }
+
+ /* FIXME: clock name & rate have to move to machine code */
+ ret = clk_set_rate(sclk, mfc_clk_rate);
+ if (ret) {
+ printk(KERN_ERR "%s rate change failed: %u\n", sclk->name, 200 * 1000000);
+ goto err_ss_clk;
+ }
+
+ /* clock for gating */
+ pm->clock = clk_get(mfcdev->device, MFC_GATE_CLK_NAME);
+ if (IS_ERR(pm->clock)) {
+ printk(KERN_ERR "failed to get clock-gating control\n");
+ ret = -ENOENT;
+ goto err_gg_clk;
+ }
+
+ atomic_set(&pm->power, 0);
+
+#ifdef CONFIG_PM_RUNTIME
+ pm->device = mfcdev->device;
+ pm_runtime_enable(pm->device);
+#endif
+
+#ifdef CLK_DEBUG
+ atomic_set(&clk_ref, 0);
+#endif
+
+ return 0;
+
+err_gg_clk:
+err_ss_clk:
+err_sp_clk:
+ clk_put(sclk);
+err_gs_clk:
+ clk_put(parent);
+err_gp_clk:
+ return ret;
+}
+
+void mfc_final_pm(struct mfc_dev *mfcdev)
+{
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_disable(pm->device);
+#endif
+}
+
+int mfc_clock_on(struct mfc_dev *mfcdev)
+{
+ int ret;
+#ifdef CLK_DEBUG
+ atomic_inc(&clk_ref);
+ mfc_dbg("+ %d", atomic_read(&clk_ref));
+#endif
+
+ ret = clk_enable(pm->clock);
+ enable_irq(mfcdev->irq);
+ return ret;
+}
+
+void mfc_clock_off(struct mfc_dev *mfcdev)
+{
+#ifdef CLK_DEBUG
+ atomic_dec(&clk_ref);
+ mfc_dbg("- %d", atomic_read(&clk_ref));
+#endif
+ disable_irq(mfcdev->irq);
+ clk_disable(pm->clock);
+}
+
+int mfc_power_on(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ if ((soc_is_exynos4212() && (samsung_rev() < EXYNOS4212_REV_1_0)) ||
+ (soc_is_exynos4412() && (samsung_rev() < EXYNOS4412_REV_1_1)))
+ return 0;
+ else
+ return pm_runtime_get_sync(pm->device);
+#else
+ atomic_set(&pm->power, 1);
+
+ return 0;
+#endif
+}
+
+int mfc_power_off(void)
+{
+#ifdef CONFIG_PM_RUNTIME
+ if ((soc_is_exynos4212() && (samsung_rev() < EXYNOS4212_REV_1_0)) ||
+ (soc_is_exynos4412() && (samsung_rev() < EXYNOS4412_REV_1_1)))
+ return 0;
+ else
+ return pm_runtime_put_sync(pm->device);
+#else
+ atomic_set(&pm->power, 0);
+
+ return 0;
+#endif
+}
+
+bool mfc_power_chk(void)
+{
+ mfc_dbg("%s", atomic_read(&pm->power) ? "on" : "off");
+
+ return atomic_read(&pm->power) ? true : false;
+}
+
+void mfc_pd_enable(void)
+{
+ u32 timeout;
+
+ __raw_writel(S5P_INT_LOCAL_PWR_EN, S5P_PMU_MFC_CONF);
+
+ /* Wait max 1ms */
+ timeout = 10;
+ while ((__raw_readl(S5P_PMU_MFC_CONF + 0x4) & S5P_INT_LOCAL_PWR_EN)
+ != S5P_INT_LOCAL_PWR_EN) {
+ if (timeout == 0) {
+ printk(KERN_ERR "Power domain MFC enable failed.\n");
+ break;
+ }
+
+ timeout--;
+
+ udelay(100);
+ }
+}
+
diff --git a/drivers/media/video/samsung/mfc5x/mfc_pm.h b/drivers/media/video/samsung/mfc5x/mfc_pm.h
new file mode 100644
index 0000000..ac370ca
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_pm.h
@@ -0,0 +1,29 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_pm.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Power management module for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_PM_H
+#define __MFC_PM_H __FILE__
+
+int mfc_init_pm(struct mfc_dev *mfcdev);
+void mfc_final_pm(struct mfc_dev *mfcdev);
+
+int mfc_clock_on(struct mfc_dev *mfcdev);
+void mfc_clock_off(struct mfc_dev *mfcdev);
+int mfc_power_on(void);
+int mfc_power_off(void);
+#ifdef CONFIG_CPU_EXYNOS4210
+bool mfc_power_chk(void);
+#endif
+void mfc_pd_enable(void);
+
+#endif /* __MFC_PM_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_reg.c b/drivers/media/video/samsung/mfc5x/mfc_reg.c
new file mode 100644
index 0000000..91375f1
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_reg.c
@@ -0,0 +1,32 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_regs.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+
+static void __iomem *regs;
+
+void init_reg(void __iomem *base)
+{
+ regs = base;
+}
+
+void write_reg(unsigned int data, unsigned int offset)
+{
+ __raw_writel(data, regs + offset);
+}
+
+unsigned int read_reg(unsigned int offset)
+{
+ return __raw_readl(regs + offset);
+}
+
diff --git a/drivers/media/video/samsung/mfc5x/mfc_reg.h b/drivers/media/video/samsung/mfc5x/mfc_reg.h
new file mode 100644
index 0000000..11d9ab0
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_reg.h
@@ -0,0 +1,21 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_regs.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_REGS_H
+#define __MFC_REGS_H __FILE__
+
+void init_reg(void __iomem *base);
+void write_reg(unsigned int data, unsigned int offset);
+unsigned int read_reg(unsigned int offset);
+
+#endif /* __MFC_REGS_H */
diff --git a/drivers/media/video/samsung/mfc5x/mfc_shm.c b/drivers/media/video/samsung/mfc5x/mfc_shm.c
new file mode 100644
index 0000000..58638f1
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_shm.c
@@ -0,0 +1,88 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_shm.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Shared memory interface file for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+
+#include "mfc_inst.h"
+#include "mfc_mem.h"
+#include "mfc_buf.h"
+#include "mfc_log.h"
+
+int init_shm(struct mfc_inst_ctx *ctx)
+{
+#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
+ struct mfc_dev *dev = ctx->dev;
+ struct mfc_alloc_buffer *alloc;
+
+ if (ctx->drm_flag) {
+ ctx->shm = (unsigned char *)(dev->drm_info.addr + MFC_SHM_OFS_DRM
+ + MFC_SHM_SIZE*ctx->id);
+ ctx->shmofs = mfc_mem_ext_ofs(dev->drm_info.base + MFC_SHM_OFS_DRM
+ + MFC_SHM_SIZE*ctx->id, MFC_SHM_SIZE, PORT_A);
+
+ if (ctx->shmofs >= 0) {
+ memset((void *)ctx->shm, 0, MFC_SHM_SIZE);
+ mfc_mem_cache_clean((void *)ctx->shm, MFC_SHM_SIZE);
+
+ return 0;
+ }
+ } else {
+ alloc = _mfc_alloc_buf(ctx, MFC_SHM_SIZE, ALIGN_4B, MBT_SHM | PORT_A);
+
+ if (alloc != NULL) {
+ ctx->shm = alloc->addr;
+ ctx->shmofs = mfc_mem_base_ofs(alloc->real);
+
+ memset((void *)ctx->shm, 0, MFC_SHM_SIZE);
+ mfc_mem_cache_clean((void *)ctx->shm, MFC_SHM_SIZE);
+
+ return 0;
+ }
+ }
+#else
+ struct mfc_alloc_buffer *alloc;
+
+ alloc = _mfc_alloc_buf(ctx, MFC_SHM_SIZE, ALIGN_4B, MBT_SHM | PORT_A);
+
+ if (alloc != NULL) {
+ ctx->shm = alloc->addr;
+ ctx->shmofs = mfc_mem_base_ofs(alloc->real);
+
+ memset((void *)ctx->shm, 0, MFC_SHM_SIZE);
+ mfc_mem_cache_clean((void *)ctx->shm, MFC_SHM_SIZE);
+
+ return 0;
+ }
+#endif
+ mfc_err("failed alloc shared memory buffer\n");
+
+ ctx->shm = NULL;
+ ctx->shmofs = 0;
+
+ return -1;
+}
+
+void write_shm(struct mfc_inst_ctx *ctx, unsigned int data, unsigned int offset)
+{
+ writel(data, (ctx->shm + offset));
+
+ mfc_mem_cache_clean((void *)((unsigned int)(ctx->shm) + offset), 4);
+}
+
+unsigned int read_shm(struct mfc_inst_ctx *ctx, unsigned int offset)
+{
+ mfc_mem_cache_inv((void *)((unsigned int)(ctx->shm) + offset), 4);
+
+ return readl(ctx->shm + offset);
+}
+
diff --git a/drivers/media/video/samsung/mfc5x/mfc_shm.h b/drivers/media/video/samsung/mfc5x/mfc_shm.h
new file mode 100644
index 0000000..e5d7ba7
--- /dev/null
+++ b/drivers/media/video/samsung/mfc5x/mfc_shm.h
@@ -0,0 +1,82 @@
+/*
+ * linux/drivers/media/video/samsung/mfc5x/mfc_shm.h
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Shared memory interface for Samsung MFC (Multi Function Codec - FIMV) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFC_SHM_H
+#define __MFC_SHM_H __FILE__
+
+enum MFC_SHM_OFS
+{
+ EXTENEDED_DECODE_STATUS = 0x0000, /* D */
+ SET_FRAME_TAG = 0x0004, /* D */
+ GET_FRAME_TAG_TOP = 0x0008, /* D */
+ GET_FRAME_TAG_BOT = 0x000C, /* D */
+ PIC_TIME_TOP = 0x0010, /* D */
+ PIC_TIME_BOT = 0x0014, /* D */
+ START_BYTE_NUM = 0x0018, /* D */
+ CROP_INFO1 = 0x0020, /* D, H.264 */
+ CROP_INFO2 = 0x0024, /* D, H.264 */
+ EXT_ENC_CONTROL = 0x0028, /* E */
+ ENC_PARAM_CHANGE = 0x002C, /* E */
+ VOP_TIMING = 0x0030, /* E, MPEG4 */
+ HEC_PERIOD = 0x0034, /* E, MPEG4 */
+ METADATA_ENABLE = 0x0038, /* C */
+ METADATA_STATUS = 0x003C, /* C */
+ METADATA_DISPLAY_INDEX = 0x0040, /* C */
+ EXT_METADATA_START_ADDR = 0x0044, /* C */
+ PUT_EXTRADATA = 0x0048, /* C */
+ EXTRADATA_ADDR = 0x004C, /* C */
+ ALLOCATED_LUMA_DPB_SIZE = 0x0064, /* D */
+ ALLOCATED_CHROMA_DPB_SIZE = 0x0068, /* D */
+ ALLOCATED_MV_SIZE = 0x006C, /* D */
+ P_B_FRAME_QP = 0x0070, /* E */
+ ASPECT_RATIO_IDC = 0x0074, /* E, H.264, depend on ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+ EXTENDED_SAR = 0x0078, /* E, H.264, depned on ASPECT_RATIO_VUI_ENABLE in EXT_ENC_CONTROL */
+ DISP_PIC_PROFILE = 0x007C, /* D */
+ FLUSH_CMD_TYPE = 0x0080, /* C */
+ FLUSH_CMD_INBUF1 = 0x0084, /* C */
+ FLUSH_CMD_INBUF2 = 0x0088, /* C */
+ FLUSH_CMD_OUTBUF = 0x008C, /* E */
+ NEW_RC_BIT_RATE = 0x0090, /* E, format as RC_BIT_RATE(0xC5A8) depend on RC_BIT_RATE_CHANGE in ENC_PARAM_CHANGE */
+ NEW_RC_FRAME_RATE = 0x0094, /* E, format as RC_FRAME_RATE(0xD0D0) depend on RC_FRAME_RATE_CHANGE in ENC_PARAM_CHANGE */
+ NEW_I_PERIOD = 0x0098, /* E, format as I_FRM_CTRL(0xC504) depend on I_PERIOD_CHANGE in ENC_PARAM_CHANGE */
+ H264_I_PERIOD = 0x009C, /* E, H.264, open GOP */
+ RC_CONTROL_CONFIG = 0x00A0, /* E */
+ BATCH_INPUT_ADDR = 0x00A4, /* E */
+ BATCH_OUTPUT_ADDR = 0x00A8, /* E */
+ BATCH_OUTPUT_SIZE = 0x00AC, /* E */
+ MIN_LUMA_DPB_SIZE = 0x00B0, /* D */
+ DEVICE_FORMAT_ID = 0x00B4, /* C */
+ H264_POC_TYPE = 0x00B8, /* D */
+ MIN_CHROMA_DPB_SIZE = 0x00BC, /* D */
+ DISP_PIC_FRAME_TYPE = 0x00C0, /* D */
+ FREE_LUMA_DPB = 0x00C4, /* D, VC1 MPEG4 */
+ ASPECT_RATIO_INFO = 0x00C8, /* D, MPEG4 */
+ EXTENDED_PAR = 0x00CC, /* D, MPEG4 */
+ DBG_HISTORY_INPUT0 = 0x00D0, /* C */
+ DBG_HISTORY_INPUT1 = 0x00D4, /* C */
+ DBG_HISTORY_OUTPUT = 0x00D8, /* C */
+ HIERARCHICAL_P_QP = 0x00E0, /* E, H.264 */
+ HW_VERSRION = 0x010C, /* C */
+ SEI_ENABLE = 0x0168, /* C, H.264 */
+ FRAME_PACK_SEI_AVAIL = 0x016C, /* D, H.264 */
+ FRAME_PACK_ARRGMENT_ID = 0x0170, /* D, H.264 */
+ FRAME_PACK_DEC_INFO = 0x0174, /* D, H.264 */
+ FRAME_PACK_GRID_POS = 0x0178, /* D, H.264 */
+ FRAME_PACK_ENC_INFO = 0x017C, /* E, H.264 */
+};
+
+int init_shm(struct mfc_inst_ctx *ctx);
+void write_shm(struct mfc_inst_ctx *ctx, unsigned int data, unsigned int offset);
+unsigned int read_shm(struct mfc_inst_ctx *ctx, unsigned int offset);
+
+#endif /* __MFC_SHM_H */
diff --git a/drivers/media/video/samsung/tsi/Kconfig b/drivers/media/video/samsung/tsi/Kconfig
new file mode 100644
index 0000000..6de7906
--- /dev/null
+++ b/drivers/media/video/samsung/tsi/Kconfig
@@ -0,0 +1,19 @@
+#
+# Configuration for rotator
+#
+
+config VIDEO_TSI
+ bool "Samsung Transport Stream Interface"
+ depends on VIDEO_SAMSUNG && (CPU_EXYNOS4210 || CPU_S5PC100 || CPU_S5PC110 || CPU_S5PV210 || CPU_S5PV310)
+ default n
+ ---help---
+ This is a Transport Stream Interface for Samsung S5PC110.
+
+config VIDEO_TSI_DEBUG
+ bool "print TSI debug message"
+ depends on VIDEO_TSI
+ default n
+config TSI_LIST_DEBUG
+ bool "print TSI list debug message"
+ depends on VIDEO_TSI
+ default n
diff --git a/drivers/media/video/samsung/tsi/Makefile b/drivers/media/video/samsung/tsi/Makefile
new file mode 100644
index 0000000..9bbac3e
--- /dev/null
+++ b/drivers/media/video/samsung/tsi/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the tsi device drivers.
+#
+obj-$(CONFIG_VIDEO_TSI) += s3c-tsi.o
diff --git a/drivers/media/video/samsung/tsi/s3c-tsi.c b/drivers/media/video/samsung/tsi/s3c-tsi.c
new file mode 100644
index 0000000..50a1ae0
--- /dev/null
+++ b/drivers/media/video/samsung/tsi/s3c-tsi.c
@@ -0,0 +1,959 @@
+/* linux/drivers/media/video/samsung/s3c-tsi.c
+ *
+ * Driver file for Samsung Transport Stream Interface
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsungsemi.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <mach/irqs.h>
+#include <mach/gpio.h>
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+#include <mach/map.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-tsi.h>
+#else
+#include <plat/map.h>
+#include <plat/regs-clock.h>
+#include <plat/regs-tsi.h>
+#endif
+#include <plat/gpio-cfg.h>
+
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#endif
+
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+#define TSI_BUF_SIZE (128*1024)
+#define TSI_PKT_CNT 16
+#else
+#define TSI_BUF_SIZE (256*1024)
+#endif
+
+enum filter_mode {
+ OFF,
+ ON
+};
+
+enum pid_filter_mode {
+ BYPASS = 0,
+ FILTERING
+};
+
+enum data_byte_order {
+ MSB2LSB = 0,
+ LSB2MSB
+};
+typedef struct {
+ struct list_head list;
+ dma_addr_t addr;
+ void *buf;
+ u32 len;
+} tsi_pkt;
+
+
+typedef struct {
+ enum filter_mode flt_mode;
+ enum pid_filter_mode pid_flt_mode;
+ enum data_byte_order byte_order;
+ u16 burst_len;
+ u8 sync_detect;
+ u8 byte_swap;
+ u16 pad_pattern;
+ u16 num_packet;
+} s3c_tsi_conf;
+
+
+typedef struct {
+ spinlock_t tsi_lock;
+ struct clk *tsi_clk;
+ struct resource *tsi_mem;
+/* struct resource *tsi_irq; */
+ void __iomem *tsi_base;
+ int tsi_irq;
+ int running;
+#if defined(CONFIG_PM) && defined(CONFIG_TARGET_LOCALE_NTT)
+ int last_running_state;
+#endif
+ int new_pkt;
+ dma_addr_t tsi_buf_phy;
+ void *tsi_buf_virt;
+ u32 tsi_buf_size;
+ s3c_tsi_conf *tsi_conf;
+ struct list_head free_list;
+ struct list_head full_list;
+ struct list_head partial_list;
+ wait_queue_head_t read_wq;
+} tsi_dev;
+
+tsi_dev *tsi_priv;
+
+static struct platform_device *s3c_tsi_dev;
+
+/* #define DRIVER_LOGIC_CHK */
+#ifdef DRIVER_LOGIC_CHK
+static struct timer_list tsi_timer;
+#endif
+
+
+/* debug macro */
+#define TSI_DEBUG(fmt, ...) \
+ do { \
+ printk( \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+#define TSI_WARN(fmt, ...) \
+ do { \
+ printk(KERN_WARNING \
+ fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define TSI_ERROR(fmt, ...) \
+ do { \
+ printk(KERN_ERR \
+ "%s: " fmt, __func__, ##__VA_ARGS__); \
+ } while (0)
+
+
+/*#define CONFIG_VIDEO_TSI_DEBUG */
+#ifdef CONFIG_VIDEO_TSI_DEBUG
+#define tsi_dbg(fmt, ...) TSI_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define tsi_dbg(fmt, ...)
+#endif
+
+#define tsi_warn(fmt, ...) TSI_WARN(fmt, ##__VA_ARGS__)
+#define tsi_err(fmt, ...) TSI_ERROR(fmt, ##__VA_ARGS__)
+
+#define tsi_list_dbg(fmt, ...) TSI_DEBUG(fmt, ##__VA_ARGS__)
+
+
+#ifdef CONFIG_TSI_LIST_DEBUG
+void list_debug(struct list_head *head)
+{
+ int i;
+ tsi_pkt *pkt;
+ /* tsi_list_dbg("DEBUGGING FREE LIST\n"); */
+ i = 1;
+ list_for_each_entry(pkt, head, list) {
+ tsi_list_dbg(" node %d node_addr %x physical add %p virt add %p size %d\n",
+ i, pkt, pkt->addr, pkt->buf, pkt->len);
+ i++;
+ }
+}
+#endif
+
+/*This should be done in platform*/
+void s3c_tsi_set_gpio(void)
+{
+ /* CLK */
+ s3c_gpio_cfgpin(EXYNOS4210_GPE0(0), S3C_GPIO_SFN(4));
+ s3c_gpio_setpull(EXYNOS4210_GPE0(0), S3C_GPIO_PULL_NONE);
+
+ /* DTEN */
+ s3c_gpio_cfgpin(EXYNOS4210_GPE0(2), S3C_GPIO_SFN(4));
+ s3c_gpio_setpull(EXYNOS4210_GPE0(2), S3C_GPIO_PULL_NONE);
+
+#if defined(CONFIG_TARGET_LOCALE_NTT)
+ printk(" %s : system_rev %d\n", __func__, system_rev);
+
+ if (system_rev >= 11) {
+ /* DATA */
+ s3c_gpio_cfgpin(EXYNOS4210_GPE0(3), S3C_GPIO_SFN(4));
+ s3c_gpio_setpull(EXYNOS4210_GPE0(3), S3C_GPIO_PULL_NONE);
+ }
+#else
+ /* DATA */
+ s3c_gpio_cfgpin(S5PV310_GPE0(3), S3C_GPIO_SFN(4));
+ s3c_gpio_setpull(S5PV310_GPE0(3), S3C_GPIO_PULL_NONE);
+#endif
+
+#if !defined(CONFIG_TARGET_LOCALE_NTT)
+ /* SYNC */
+ s3c_gpio_cfgpin(S5PV310_GPE0(1), S3C_GPIO_SFN(4));
+ s3c_gpio_setpull(S5PV310_GPE0(1), S3C_GPIO_PULL_NONE);
+#endif
+}
+
+
+void s3c_tsi_reset(tsi_dev *tsi)
+{
+ u32 tscon;
+ tscon = readl((tsi->tsi_base + S3C_TS_CON));
+ tscon |= S3C_TSI_SWRESET ;
+ writel(tscon, (tsi->tsi_base + S3C_TS_CON));
+}
+
+void s3c_tsi_set_timeout(u32 count, tsi_dev *tsi)
+{
+ writel(count, (tsi->tsi_base + S3C_TS_CNT));
+}
+
+tsi_pkt *tsi_get_pkt(tsi_dev *tsi, struct list_head *head)
+{
+ unsigned long flags;
+ tsi_pkt *pkt;
+ spin_lock_irqsave(&tsi->tsi_lock, flags);
+
+ if (list_empty(head)) {
+ tsi_err("TSI %p list is null\n", head);
+ spin_unlock_irqrestore(&tsi->tsi_lock, flags);
+ return NULL;
+ }
+ pkt = list_first_entry(head, tsi_pkt, list);
+ spin_unlock_irqrestore(&tsi->tsi_lock, flags);
+
+ return pkt;
+}
+
+void s3c_tsi_set_dest_addr(dma_addr_t addr, u32 reg)
+{
+ writel(addr, reg);
+}
+
+void s3c_tsi_set_sync_mode(u8 mode, u32 reg)
+{
+ writel(mode, reg);
+}
+
+void s3c_tsi_set_clock(u8 enable, u32 reg)
+{
+ u32 val = 0;
+ if (enable)
+ val |= 0x1;
+ writel(val, reg);
+}
+
+void tsi_enable_interrupts(tsi_dev *tsi)
+{
+ u32 mask;
+ /* Enable all the interrupts... */
+ mask = 0xFF;
+ writel(mask, (tsi->tsi_base + S3C_TS_INTMASK));
+}
+
+void tsi_disable_interrupts(tsi_dev *tsi)
+{
+ writel(0, (tsi->tsi_base + S3C_TS_INTMASK));
+}
+
+static int s3c_tsi_start(tsi_dev *tsi)
+{
+ unsigned long flags;
+ u32 pkt_size;
+ tsi_pkt *pkt1;
+ pkt1 = tsi_get_pkt(tsi, &tsi->free_list);
+ if (pkt1 == NULL) {
+ tsi_err("Failed to start TSI--No buffers avaialble\n");
+ return -1;
+ }
+ pkt_size = pkt1->len;
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+ /* when set the TS BUF SIZE to the S3C_TS_SIZE,
+ if you want get a 10-block TS from TSIF,
+ you should set the value of S3C_TS_SIZE as 47*10(not 188*10)
+ This register get a value of word-multiple values.
+ So, pkt_size which is counted to BYTES must be divided by 4
+ (2 bit shift lefted)
+ Commented by sjinu, 2009_03_18
+ */
+ writel(pkt_size>>2, (tsi->tsi_base+S3C_TS_SIZE));
+#else
+ writel(pkt_size, (tsi->tsi_base+S3C_TS_SIZE));
+#endif
+ s3c_tsi_set_dest_addr(pkt1->addr, (u32)(tsi->tsi_base+S3C_TS_BASE));
+
+ spin_lock_irqsave(&tsi->tsi_lock, flags);
+ list_move_tail(&pkt1->list, &tsi->partial_list);
+ spin_unlock_irqrestore(&tsi->tsi_lock, flags);
+ /* start the clock */
+ s3c_tsi_set_clock(TSI_CLK_START, (u32)(tsi->tsi_base+S3C_TS_CLKCON));
+ /* set the next buffer immediatly */
+ pkt1 = tsi_get_pkt(tsi, &tsi->free_list);
+ if (pkt1 == NULL) {
+ tsi_err("Failed to start TSI--No buffers avaialble\n");
+ return -1;
+ }
+ s3c_tsi_set_dest_addr(pkt1->addr, (u32)(tsi->tsi_base+S3C_TS_BASE));
+ spin_lock_irqsave(&tsi->tsi_lock, flags);
+ list_move_tail(&pkt1->list, &tsi->partial_list);
+ spin_unlock_irqrestore(&tsi->tsi_lock, flags);
+ tsi_enable_interrupts(tsi);
+
+#ifdef CONFIG_TSI_LIST_DEBUG1
+ tsi_list_dbg("Debugging Partial list\n");
+ list_debug(&tsi->partial_list);
+ tsi_list_dbg("Debugging free list\n");
+ list_debug(&tsi->free_list);
+#endif
+ return 0;
+}
+
+static int s3c_tsi_stop(tsi_dev *tsi)
+{
+ unsigned long flags;
+ tsi_pkt *pkt;
+ struct list_head *full = &tsi->full_list;
+ struct list_head *partial = &tsi->partial_list;
+
+ spin_lock_irqsave(&tsi->tsi_lock, flags);
+ #ifdef DRIVER_LOGIC_CHK
+ del_timer(&tsi_timer);
+ #endif
+
+ tsi_disable_interrupts(tsi);
+ s3c_tsi_set_clock(TSI_CLK_STOP, (u32)(tsi->tsi_base+S3C_TS_CLKCON));
+ /* move all the packets from partial and full list to free list */
+ while (!list_empty(full)) {
+ pkt = list_entry(full->next, tsi_pkt, list);
+ list_move_tail(&pkt->list, &tsi->free_list);
+ }
+
+ while (!list_empty(partial)) {
+ pkt = list_entry(partial->next, tsi_pkt, list);
+ list_move_tail(&pkt->list, &tsi->free_list);
+ }
+ tsi->running = 0;
+ tsi_priv->new_pkt = 0;
+ spin_unlock_irqrestore(&tsi->tsi_lock, flags);
+
+ return 0;
+}
+
+void s3c_tsi_setup(tsi_dev *tsi)
+{
+ u32 tscon;
+ s3c_tsi_conf *conf = tsi->tsi_conf;
+ s3c_tsi_reset(tsi);
+ s3c_tsi_set_timeout(TS_TIMEOUT_CNT_MAX, tsi);
+
+ tscon = readl((tsi->tsi_base+S3C_TS_CON));
+
+ tscon &= ~(S3C_TSI_SWRESET_MASK|S3C_TSI_CLKFILTER_MASK|
+ S3C_TSI_BURST_LEN_MASK | S3C_TSI_INT_FIFO_FULL_INT_ENA_MASK |
+ S3C_TSI_SYNC_MISMATCH_INT_MASK | S3C_TSI_PSUF_INT_MASK|
+ S3C_TSI_PSOF_INT_MASK | S3C_TSI_TS_CLK_TIME_OUT_INT_MASK |
+ S3C_TSI_TS_ERROR_MASK | S3C_TSI_PID_FILTER_MASK |
+ S3C_TSI_ERROR_ACTIVE_MASK | S3C_TSI_DATA_BYTE_ORDER_MASK |
+ S3C_TSI_TS_VALID_ACTIVE_MASK | S3C_TSI_SYNC_ACTIVE_MASK |
+ S3C_TSI_CLK_INVERT_MASK);
+
+ tscon |= (conf->flt_mode << S3C_TSI_CLKFILTER_SHIFT);
+ tscon |= (conf->pid_flt_mode << S3C_TSI_PID_FILTER_SHIFT);
+ tscon |= (conf->byte_order << S3C_TSI_DATA_BYTE_ORDER_SHIFT);
+ tscon |= (conf->burst_len << S3C_TSI_BURST_LEN_SHIFT);
+ tscon |= (conf->pad_pattern << S3C_TSI_PAD_PATTERN_SHIFT);
+
+ tscon |= (S3C_TSI_OUT_BUF_FULL_INT_ENA | S3C_TSI_INT_FIFO_FULL_INT_ENA);
+ tscon |= (S3C_TSI_SYNC_MISMATCH_INT_SKIP | S3C_TSI_PSUF_INT_SKIP |
+ S3C_TSI_PSOF_INT_SKIP);
+ tscon |= (S3C_TSI_TS_CLK_TIME_OUT_INT);
+ /* These values are bd dependent? */
+ tscon |= (S3C_TSI_TS_VALID_ACTIVE_HIGH | S3C_TSI_CLK_INVERT_HIGH);
+ writel(tscon, (tsi->tsi_base+S3C_TS_CON));
+ s3c_tsi_set_sync_mode(conf->sync_detect, (u32)(tsi->tsi_base+S3C_TS_SYNC));
+}
+
+void s3c_tsi_rx_int(tsi_dev *tsi)
+{
+ tsi_pkt *pkt;
+ /* deque the pcket from partial list to full list
+ incase the free list is empty, stop the tsi.. */
+
+ pkt = tsi_get_pkt(tsi, &tsi->partial_list);
+
+ /* this situation should not come.. stop_tsi */
+ if (pkt == NULL) {
+ tsi_err("TSI..Receive interrupt without buffer\n");
+ s3c_tsi_stop(tsi);
+ return;
+ }
+
+ tsi_dbg("moving %p node %x phy %p virt to full list\n",
+ pkt, pkt->addr, pkt->buf);
+
+ list_move_tail(&pkt->list, &tsi->full_list);
+
+ pkt = tsi_get_pkt(tsi, &tsi->free_list);
+ if (pkt == NULL) {
+ /* this situation should not come.. stop_tsi */
+ tsi_err("TSI..No more free bufs..stopping channel\n");
+ s3c_tsi_stop(tsi);
+ return;
+ }
+ list_move_tail(&pkt->list, &tsi->partial_list);
+
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+ /* namkh, request from Abraham
+ If there arise a buffer-full interrupt,
+ a new ts buffer address should be set.
+
+ Commented by sjinu, 2009_03_18 */
+ s3c_tsi_set_dest_addr(pkt->addr, (u32)(tsi->tsi_base+S3C_TS_BASE));
+#endif
+
+#ifdef CONFIG_TSI_LIST_DEBUG
+ tsi_list_dbg("Debugging Full list\n");
+ list_debug(&tsi->full_list);
+ tsi_list_dbg("Debugging Partial list\n");
+ list_debug(&tsi->partial_list);
+#endif
+ tsi->new_pkt = 1;
+ wake_up(&tsi->read_wq);
+}
+
+
+static irqreturn_t s3c_tsi_irq(int irq, void *dev_id)
+{
+ u32 intpnd;
+ tsi_dev *tsi = platform_get_drvdata((struct platform_device *)dev_id);
+ intpnd = readl(tsi->tsi_base + S3C_TS_INT);
+ tsi_dbg("INTPND is %x\n", intpnd);
+ writel(intpnd, (tsi->tsi_base+S3C_TS_INT));
+
+ if (intpnd & S3C_TSI_OUT_BUF_FULL)
+ s3c_tsi_rx_int(tsi);
+ return IRQ_HANDLED;
+}
+
+static int s3c_tsi_release(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+ tsi_dev *tsi = file->private_data;
+ tsi_dbg("TSI_RELEASE\n");
+ if (tsi->running) {
+ tsi_dbg("TSI_RELEASE stopping\n");
+ tsi->running = 0;
+ ret = s3c_tsi_stop(tsi);
+ tsi_dbg("TSI_RELEASE LIST cleaned\n");
+ }
+
+#ifdef CONFIG_TSI_LIST_DEBUG
+ tsi_list_dbg("Debugging Full list\n");
+ list_debug(&tsi->full_list);
+ tsi_list_dbg("Debugging Partial list\n");
+ list_debug(&tsi->partial_list);
+#endif
+
+ return ret;
+}
+
+int s3c_tsi_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+static unsigned int s3c_tsi_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ tsi_dev *tsi = file->private_data;
+
+ poll_wait(file, &tsi->read_wq, wait);
+
+ if (tsi->new_pkt)
+ mask |= (POLLIN | POLLRDNORM);
+
+ return mask;
+}
+#endif
+
+static ssize_t s3c_tsi_read(struct file *file, char *buf, size_t count, loff_t *pos)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 len = 0, pkt_size = 0;
+ tsi_pkt *pkt;
+ tsi_dev *tsi = file->private_data;
+ struct list_head *full = &tsi->full_list;
+
+#ifdef CONFIG_TSI_LIST_DEBUG
+ tsi_list_dbg("Debugging Full list\n");
+ tsi_dbg("count is %d\n", count);
+ list_debug(&tsi->full_list);
+#endif
+
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+ ret = wait_event_interruptible(tsi->read_wq, tsi->new_pkt);
+ if (ret < 0) {
+ tsi_dbg("woken up from signal..returning\n");
+ return ret;
+ }
+ pkt = tsi_get_pkt(tsi, full);
+
+ pkt_size = pkt->len; /* pkt_size should be multiple of 188 bytes. */
+
+ tsi_dbg("pkt_size is %d\n", pkt_size);
+ if (pkt_size > count)
+ pkt_size = count;
+
+ if (copy_to_user((buf+len), pkt->buf, pkt_size)) {
+ tsi_dbg("copy user fail\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ len += pkt_size;
+ count -= pkt_size;
+ tsi_dbg("len is%d count %d pkt_size %d\n", len, count, pkt_size);
+ ret = len;
+ spin_lock_irqsave(&tsi->tsi_lock, flags);
+ list_move(&pkt->list, &tsi->free_list);
+ spin_unlock_irqrestore(&tsi->tsi_lock, flags);
+
+ if (list_empty(full))
+ tsi->new_pkt = 0;
+#else
+ while (count > 0) {
+ /* deque packet from full list */
+ pkt = tsi_get_pkt(tsi, full);
+ if (pkt == NULL) {
+ ret = wait_event_interruptible(tsi->read_wq, tsi->new_pkt);
+
+ if (ret < 0) {
+ tsi_dbg("woken up from signal..returning\n");
+ return ret;
+ }
+ tsi_dbg("woken up proprt\n");
+ pkt = tsi_get_pkt(tsi, full);
+
+ }
+ pkt_size = pkt->len * 4;
+ if (pkt_size > count)
+ pkt_size = count;
+
+ if (copy_to_user((buf+len), pkt->buf, pkt_size)) {
+ tsi_dbg("copy user fail\n");
+ ret = -EFAULT;
+ break;
+ }
+
+ len += pkt_size;
+ count -= pkt_size;
+ tsi_dbg("len is%d count %d pkt_size %d\n", len, count, pkt_size);
+ ret = len;
+ spin_lock_irqsave(&tsi->tsi_lock, flags);
+ list_move(&pkt->list, &tsi->free_list);
+ spin_unlock_irqrestore(&tsi->tsi_lock, flags);
+
+ if (list_empty(full))
+ tsi->new_pkt = 0;
+ }
+#endif
+
+#ifdef CONFIG_TSI_LIST_DEBUG1
+ tsi_list_dbg("Debugging Free list\n");
+ list_debug(&tsi->free_list);
+#endif
+ return ret;
+}
+
+#define TSI_TRIGGER 0xAABB
+#define TSI_STOP 0xAACC
+
+static long s3c_tsi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret = 0;
+ tsi_dev *tsi = platform_get_drvdata(s3c_tsi_dev);
+ /* currently only two ioctl for tigger and stop are provided.. */
+ tsi_dbg("TSI cmd is %x\n", cmd);
+ switch (cmd) {
+ case TSI_TRIGGER:
+ if (tsi->running)
+ return -EBUSY;
+ tsi->running = 1;
+ ret = s3c_tsi_start(tsi);
+ #ifdef DRIVER_LOGIC_CHK
+ tsi_timer.expires = jiffies + HZ/10;
+ add_timer(&tsi_timer);
+ #endif
+ break;
+ case TSI_STOP:
+ tsi->running = 0;
+ ret = s3c_tsi_stop(tsi);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int s3c_tsi_open(struct inode *inode, struct file *file)
+{
+ tsi_dev *s3c_tsi = platform_get_drvdata(s3c_tsi_dev);
+ tsi_dbg(" %s\n", __func__);
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+ /* Fix the TSI data problem (Don't generated waking up sleep state)
+ clk_enable(s3c_tsi->tsi_clk);
+ */
+ s3c_tsi_setup(s3c_tsi);
+#endif
+ file->private_data = s3c_tsi;
+ return 0;
+}
+
+static struct file_operations tsi_fops = {
+ owner: THIS_MODULE,
+ open : s3c_tsi_open,
+ release : s3c_tsi_release,
+ unlocked_ioctl : s3c_tsi_ioctl,
+ read : s3c_tsi_read,
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+ poll : s3c_tsi_poll,
+#endif
+ mmap : s3c_tsi_mmap,
+};
+
+
+static struct miscdevice s3c_tsi_miscdev = {
+ minor: MISC_DYNAMIC_MINOR,
+ name : "s3c-tsi",
+ fops : &tsi_fops
+};
+
+static int tsi_setup_bufs(tsi_dev *dev, struct list_head *head)
+{
+ tsi_pkt *pkt;
+ u32 tsi_virt, tsi_size, buf_size;
+ u16 num_buf;
+ dma_addr_t tsi_phy;
+ int i;
+
+ tsi_phy = dev->tsi_buf_phy;
+ tsi_virt = (u32) dev->tsi_buf_virt;
+ tsi_size = dev->tsi_buf_size;
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+ /* TSI generates interrupt after filling this many bytes */
+ buf_size = dev->tsi_conf->num_packet * TS_PKT_SIZE*TSI_PKT_CNT;
+#else
+ /* TSI generates interrupt after filling this many bytes */
+ buf_size = dev->tsi_conf->num_packet * TS_PKT_SIZE;
+#endif
+ num_buf = (tsi_size / buf_size);
+
+ for (i = 0; i < num_buf; i++) {
+ pkt = kmalloc(sizeof(tsi_pkt), GFP_KERNEL);
+ if (!pkt)
+ return list_empty(head) ? -ENOMEM : 0 ;
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+ /* Address should be byte-aligned
+ Commented by sjinu, 2009_03_18 */
+ pkt->addr = ((u32)tsi_phy + i*buf_size);
+ pkt->buf = (void *)(u8 *)((u32)tsi_virt + i*buf_size);
+#else
+ pkt->addr = (tsi_phy + i*4*buf_size);
+ pkt->buf = (void *)(tsi_virt + i*4*buf_size);
+#endif
+ pkt->len = buf_size;
+ list_add_tail(&pkt->list, head);
+ }
+
+ tsi_dbg("total nodes calulated %d buf_size %d\n", num_buf, buf_size);
+#ifdef CONFIG_TSI_LIST_DEBUG1
+ list_debug(head);
+#endif
+
+return 0;
+
+}
+
+#ifdef DRIVER_LOGIC_CHK
+int timer_count = 100;
+
+void tsi_timer_function(u32 dev)
+{
+ tsi_dev *tsi = (tsi_dev *)(dev);
+ s3c_tsi_rx_int(tsi);
+ tsi_timer.expires = jiffies + HZ/100;
+ timer_count--;
+ if (timer_count > 0)
+ add_timer(&tsi_timer);
+}
+#endif
+
+static int s3c_tsi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ static int size;
+ static int ret;
+ s3c_tsi_conf *conf;
+ dma_addr_t map_dma;
+ struct device *dev = &pdev->dev;
+
+ tsi_dbg(" %s\n", __func__);
+ tsi_priv = kmalloc(sizeof(tsi_dev), GFP_KERNEL);
+ if (tsi_priv == NULL) {
+ printk("NO Memory for tsi allocation\n");
+ return -ENOMEM;
+ }
+ conf = kmalloc(sizeof(s3c_tsi_conf), GFP_KERNEL);
+ if (conf == NULL) {
+ printk("NO Memory for tsi conf allocation\n");
+ kfree(tsi_priv);
+ return -ENOMEM;
+ }
+ /* Initialise the dafault conf parameters..
+ * this should be obtained from the platform data and ioctl
+ * move this to platform later */
+
+ conf->flt_mode = OFF;
+ conf->pid_flt_mode = BYPASS;
+ conf->byte_order = MSB2LSB;
+#if defined(CONFIG_TARGET_LOCALE_NTT)
+ conf->sync_detect = S3C_TSI_SYNC_DET_MODE_TS_SYNC_BYTE;
+#else
+ conf->sync_detect = S3C_TSI_SYNC_DET_MODE_TS_SYNC8;
+#endif
+
+#if defined(CONFIG_CPU_S5PV210) || defined(CONFIG_TARGET_LOCALE_NTT)
+ /*
+ to avoid making interrupt during getting the TS from TS buffer,
+ we use the burst-length as 8 beat.
+ This burst-length may be changed next time.
+ Commented by sjinu, 2009_03_18
+ */
+ conf->burst_len = 2;
+#else
+ conf->burst_len = 0;
+#endif
+ conf->byte_swap = 1; /* little endian */
+ conf->pad_pattern = 0; /* this might vary from bd to bd */
+ conf->num_packet = TS_NUM_PKT; /* this might vary from bd to bd */
+
+ tsi_priv->tsi_conf = conf;
+ tsi_priv->tsi_buf_size = TSI_BUF_SIZE;
+
+ tsi_priv->tsi_clk = clk_get(NULL, "tsi");
+ //printk("Clk Get Result %x\n", tsi_priv->tsi_clk);
+ if (tsi_priv->tsi_clk == NULL) {
+ printk(KERN_ERR "Failed to get TSI clock\n");
+ return -ENOENT;
+ }
+ clk_enable(tsi_priv->tsi_clk);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (res == NULL) {
+ tsi_err("failed to get memory region resouce\n");
+ return -ENOENT;
+ }
+
+ size = (res->end - res->start) + 1;
+ tsi_priv->tsi_mem = request_mem_region(res->start, size, pdev->name);
+
+ if (tsi_priv->tsi_mem == NULL) {
+ tsi_err("failed to get memory region\n");
+ return -ENOENT;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+
+ if (ret == 0) {
+ tsi_err("failed to get irq resource\n");
+ ret = -ENOENT;
+ goto err_res;
+ }
+
+ tsi_priv->tsi_irq = ret;
+ ret = request_irq(tsi_priv->tsi_irq, (void *)s3c_tsi_irq, 0, pdev->name, pdev);
+
+ if (ret != 0) {
+ tsi_err("failed to install irq (%d)\n", ret);
+ goto err_res;
+ }
+
+ tsi_priv->tsi_base = ioremap(tsi_priv->tsi_mem->start, size);
+
+ if (tsi_priv->tsi_base == 0) {
+ tsi_err("failed to ioremap() region\n");
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+ INIT_LIST_HEAD(&tsi_priv->free_list);
+ INIT_LIST_HEAD(&tsi_priv->full_list);
+ INIT_LIST_HEAD(&tsi_priv->partial_list);
+ spin_lock_init(&tsi_priv->tsi_lock);
+ init_waitqueue_head(&tsi_priv->read_wq);
+ tsi_priv->new_pkt = 0;
+ tsi_priv->running = 0;
+#if defined(CONFIG_PM) && defined(CONFIG_TARGET_LOCALE_NTT)
+ tsi_priv->last_running_state = tsi_priv->running;
+#endif
+
+ /* get the dma coherent mem */
+ tsi_priv->tsi_buf_virt = dma_alloc_coherent(dev, tsi_priv->tsi_buf_size, &map_dma, GFP_KERNEL);
+ if (tsi_priv->tsi_buf_virt == NULL) {
+ tsi_err("Failed to claim TSI memory\n");
+ ret = -ENOMEM;
+ goto err_map;
+ }
+
+ tsi_dbg("TSI dev dma mem phy %x virt %p\n", map_dma, tsi_priv->tsi_buf_virt);
+
+ tsi_priv->tsi_buf_phy = map_dma;
+
+ ret = tsi_setup_bufs(tsi_priv, &tsi_priv->free_list);
+ if (ret) {
+ tsi_err("TSI failed to setup pkt list");
+ goto err_clk;
+ }
+
+ platform_set_drvdata(pdev, tsi_priv);
+ s3c_tsi_set_gpio();
+ s3c_tsi_setup(tsi_priv);
+ s3c_tsi_dev = pdev;
+ ret = misc_register(&s3c_tsi_miscdev);
+ if (ret) {
+ tsi_err("Unable to register the s3c-tsi driver\n");
+ goto err_clk;
+ }
+
+#ifdef DRIVER_LOGIC_CHK
+ init_timer(&tsi_timer);
+ tsi_timer.function = tsi_timer_function;
+ tsi_timer.data = (unsigned long) tsi_priv;
+/*
+ s3c_tsi_start(tsi_priv);
+ s3c_tsi_rx_int(tsi_priv);
+*/
+#endif
+
+ return 0;
+
+err_clk:
+ clk_disable(tsi_priv->tsi_clk);
+err_map:
+ iounmap(tsi_priv->tsi_base);
+err_irq:
+ free_irq(tsi_priv->tsi_irq, pdev);
+err_res:
+ release_resource(tsi_priv->tsi_mem);
+ kfree(tsi_priv);
+
+ return ret;
+}
+
+static void tsi_free_packets(tsi_dev *tsi)
+{
+ tsi_pkt *pkt;
+ struct list_head *head = &(tsi->free_list);
+
+ while (!list_empty(head)) {
+ pkt = list_entry(head->next, tsi_pkt, list);
+ list_del(&pkt->list);
+ kfree(pkt);
+ }
+}
+
+static int s3c_tsi_remove(struct platform_device *dev)
+{
+ tsi_dev *tsi = platform_get_drvdata((struct platform_device *)dev);
+ if (tsi->running)
+ s3c_tsi_stop(tsi);
+
+ /* free allocated memory and nodes */
+ tsi_free_packets(tsi);
+ free_irq(tsi->tsi_irq, dev);
+ dma_free_coherent(&dev->dev, tsi->tsi_buf_size, tsi->tsi_buf_virt, tsi->tsi_buf_phy);
+ kfree(tsi);
+ return 0;
+}
+
+
+#if defined(CONFIG_PM) && defined(CONFIG_TARGET_LOCALE_NTT)
+static int s3c_tsi_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ tsi_dev *tsi = platform_get_drvdata(s3c_tsi_dev);
+
+ tsi->last_running_state = tsi->running;
+ if (tsi_priv->last_running_state)
+ s3c_tsi_stop(tsi_priv);
+
+ clk_disable(tsi_priv->tsi_clk);
+
+ return 0;
+}
+
+static int s3c_tsi_resume(struct platform_device *pdev)
+{
+ tsi_dev *tsi = platform_get_drvdata(s3c_tsi_dev);
+
+ clk_enable(tsi_priv->tsi_clk);
+ s3c_tsi_set_gpio();
+ s3c_tsi_setup(tsi_priv);
+
+ if (tsi->last_running_state) {
+ tsi->running = 1;
+ s3c_tsi_start(tsi);
+ s3c_tsi_rx_int(tsi);
+ }
+ return 0;
+}
+
+#endif
+
+static struct platform_driver s3c_tsi_driver = {
+ .probe = s3c_tsi_probe,
+ .remove = s3c_tsi_remove,
+ .shutdown = NULL,
+#if defined(CONFIG_PM) && defined(CONFIG_TARGET_LOCALE_NTT)
+ .suspend = s3c_tsi_suspend,
+ .resume = s3c_tsi_resume,
+#else
+ .suspend = NULL,
+ .resume = NULL,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "s3c-tsi",
+ },
+};
+
+
+
+const char banner[] __initdata = "TSI Driver Version 1.0\n";
+
+static int __init s3c_tsi_init(void)
+{
+ printk(banner);
+ tsi_dbg(" %s\n", __func__);
+ return platform_driver_register(&s3c_tsi_driver);
+}
+
+
+
+
+static void __exit s3c_tsi_exit(void)
+{
+
+ platform_driver_unregister(&s3c_tsi_driver);
+}
+
+
+
+module_init(s3c_tsi_init);
+module_exit(s3c_tsi_exit);
+
+MODULE_AUTHOR("Samsung");
+MODULE_DESCRIPTION("S3C TSI Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/tvout/Kconfig b/drivers/media/video/samsung/tvout/Kconfig
new file mode 100644
index 0000000..176efeb
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/Kconfig
@@ -0,0 +1,174 @@
+#
+# Configuration for TV driver
+#
+
+config VIDEO_TVOUT
+ bool "Samsung TVOUT Driver"
+ depends on VIDEO_SAMSUNG
+ default y
+ ---help---
+ This is a TVOUT driver for Samsung S5P platform
+
+choice
+depends on VIDEO_TVOUT
+prompt "Select default audio channel"
+default VIDEO_TVOUT_2CH_AUDIO
+config VIDEO_TVOUT_2CH_AUDIO
+ bool "2ch audio mode"
+ depends on VIDEO_TVOUT
+ ---help---
+ TV out supports 2 channel audio
+
+config VIDEO_TVOUT_5_1CH_AUDIO
+ bool "5.1ch audio mode"
+ depends on VIDEO_TVOUT
+ ---help---
+ TV out supports 5.1 channel audio
+endchoice
+
+config HDMI_CEC
+ bool "HDMI CEC driver support."
+ depends on VIDEO_TVOUT && ARCH_EXYNOS4
+ default n
+ ---help---
+ This is a HDMI CEC driver for Samsung S5P platform.
+ Check dev node (major 10, minor 242)
+
+config HDMI_EARJACK_MUTE
+ bool "HDMI Earjack support"
+ depends on VIDEO_TVOUT
+ default n
+ ---help---
+ Say y here if you intend to provide sysfs interface for audio
+ framework to control HDMI audio. Android audio framework will
+ refer to the following node:
+ "sys/class/hdmi_audio/hdmi_audio/hdmi_audio_set_ext"
+
+config HDMI_HPD
+ bool "HDMI HPD driver support."
+ depends on VIDEO_TVOUT && ARCH_EXYNOS4
+ default y
+ ---help---
+ This is a HDMI HPD driver for Samsung S5P platform.
+ Check dev node (major 10, minor 243)
+
+config HDMI_CONTROLLED_BY_EXT_IC
+ bool "External HDMI related IC support"
+ depends on HDMI_HPD
+ default n
+ ---help---
+ Say y here if the H/W has external IC to control HDMI hpd and
+ it needs to be controlled by HPD interrupt.
+ For example, the H/W has HDMI level shifter then it should be turned
+ on when HPD interrupt comes.
+
+config HDMI_SWITCH_HPD
+ bool "HDMI HPD switch uevent driver support"
+ depends on HDMI_HPD
+ default y
+ ---help---
+ Say y here if you intend to use switch uevent instaed of
+ costumized kobject uevent. Android framework will refer to
+ the following device node to get HPD event:
+ "/sys/devices/virtual/switch/hdmi/state"
+
+config HDMI_14A_3D
+ bool "HDMI 14A driver support."
+ depends on VIDEO_TVOUT && ARCH_EXYNOS4 && CPU_EXYNOS4212
+ default y
+ ---help---
+ This is a HDMI 1.4A 3D driver for Samsung S5P platform.
+
+config HDMI_PHY_32N
+ bool "HDMI PHY 32N driver support."
+ depends on VIDEO_TVOUT && HDMI_14A_3D && ARCH_EXYNOS4
+ default y
+ ---help---
+ This is a HDMI PHY version for Samsung S5P platform.
+
+config ANALOG_TVENC
+ bool "Analog driver support."
+ depends on VIDEO_TVOUT && ARCH_EXYNOS4 && CPU_EXYNOS4210
+ default n
+ ---help---
+ This is a analog TVENC driver for Samsung S5P platform.
+
+config VPLL_USE_FOR_TVENC
+ bool "VPLL use for TVENC."
+ depends on VIDEO_TVOUT && ANALOG_TVENC && ARCH_EXYNOS4
+ default n
+ ---help---
+ This is a VPLL use of TVENC for Samsung S5P platform.
+
+config TV_FB
+ bool "TVOUT frame buffer driver support."
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+
+ depends on VIDEO_TVOUT && FB && ARCH_EXYNOS4
+ default y
+ ---help---
+
+config USER_ALLOC_TVOUT
+ bool "Support pre allocated frame buffer memory."
+ depends on VIDEO_TVOUT && TV_FB
+ default y
+ ---help---
+ TV Driver doesn't allocate memory for frame buffer.
+ So, before enabling TV out, the frame buffer should be allocated.
+
+config TV_FB_NUM
+ int "Index of TVOUT frame buffer"
+ depends on VIDEO_TVOUT && TV_FB && !USER_ALLOC_TVOUT
+ default 5
+ ---help---
+
+config LSI_HDMI_AUDIO_CH_EVENT
+ bool "Support uevent of multi-channel audio info for hdmi"
+ default n
+ ---help---
+ receive audio channel info from platform using ioctl.
+ #define AUDIO_CH_SET_STATE _IOR('H', 101, unsigned int)
+ and send this to platform using uevent
+ audio_ch_switch.name = "hdmi_audio_ch";
+
+config TV_DEBUG
+ bool "TVOUT driver debug message"
+ depends on VIDEO_TVOUT
+ default n
+
+config VP_DEBUG
+ bool "Video Processor debug message"
+ depends on TV_DEBUG
+ default n
+
+config MIXER_DEBUG
+ bool "Mixer debug message"
+ depends on TV_DEBUG
+ default n
+
+config HDMI_DEBUG
+ bool "HDMI debug message"
+ depends on TV_DEBUG
+ default n
+
+config SDO_DEBUG
+ bool "SDO(Composite) debug message"
+ depends on TV_DEBUG
+ default n
+
+config HDCP_DEBUG
+ bool "HDCP debug message"
+ depends on TV_DEBUG
+ default n
+
+config CEC_DEBUG
+ bool "CEC debug message"
+ depends on TV_DEBUG
+ default n
+
+config HPD_DEBUG
+ bool "HPD debug message"
+ depends on TV_DEBUG
+ default n
diff --git a/drivers/media/video/samsung/tvout/Makefile b/drivers/media/video/samsung/tvout/Makefile
new file mode 100644
index 0000000..81d358f
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/Makefile
@@ -0,0 +1,27 @@
+# linux/drivers/media/video/samsung/tvout/Makefile
+#
+# Copyright (c) 2009 Samsung Electronics
+# http://www.samsung.com/
+#
+# Makefile for Samsung TVOUT driver
+
+ifeq ($(CONFIG_VIDEO_TVOUT_DEBUG), y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+ifeq ($(CONFIG_VIDEO_UMP),y)
+ EXTRA_CFLAGS += -Idrivers/media/video/samsung/ump/include
+endif
+
+obj-$(CONFIG_VIDEO_TVOUT) := s5p_tvout.o \
+ s5p_tvout_v4l2.o \
+ s5p_tvout_fb.o \
+ s5p_tvout_common_lib.o \
+ s5p_mixer_ctrl.o \
+ s5p_vp_ctrl.o \
+ s5p_tvif_ctrl.o
+
+obj-$(CONFIG_HDMI_HPD) += s5p_tvout_hpd.o
+
+obj-$(CONFIG_HDMI_CEC) += s5p_tvout_cec.o
+
+obj-$(CONFIG_VIDEO_TVOUT) += hw_if/
diff --git a/drivers/media/video/samsung/tvout/hw_if/Makefile b/drivers/media/video/samsung/tvout/hw_if/Makefile
new file mode 100644
index 0000000..d416a7d
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/hw_if/Makefile
@@ -0,0 +1,19 @@
+# linux/drivers/media/video/samsung/tvout/hw_if/Makefile
+#
+# Copyright (c) 2009 Samsung Electronics
+# http://www.samsung.com/
+#
+# Makefile for Samsung TVOUT driver
+
+ifeq ($(CONFIG_VIDEO_TVOUT_DEBUG), y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+obj-$(CONFIG_VIDEO_TVOUT) += mixer.o \
+ vp.o \
+ hdmi.o \
+ hdcp.o
+
+obj-$(CONFIG_ANALOG_TVENC) += sdo.o
+
+
+obj-$(CONFIG_HDMI_CEC) += cec.o
diff --git a/drivers/media/video/samsung/tvout/hw_if/cec.c b/drivers/media/video/samsung/tvout/hw_if/cec.c
new file mode 100644
index 0000000..5554459
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/hw_if/cec.c
@@ -0,0 +1,262 @@
+/* linux/drivers/media/video/samsung/tvout/hw_if/cec.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * cec ftn file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <mach/regs-clock.h>
+#include <mach/regs-clock.h>
+#include <mach/regs-cec.h>
+
+#include "../s5p_tvout_common_lib.h"
+#include "hw_if.h"
+
+#undef tvout_dbg
+
+#ifdef CONFIG_CEC_DEBUG
+#define tvout_dbg(fmt, ...) \
+ printk(KERN_INFO "\t\t[CEC] %s(): " fmt, \
+ __func__, ##__VA_ARGS__)
+#else
+#define tvout_dbg(fmt, ...)
+#endif
+
+
+#define S5P_HDMI_FIN 24000000
+#define CEC_DIV_RATIO 320000
+
+#define CEC_MESSAGE_BROADCAST_MASK 0x0F
+#define CEC_MESSAGE_BROADCAST 0x0F
+#define CEC_FILTER_THRESHOLD 0x15
+
+static struct resource *cec_mem;
+void __iomem *cec_base;
+
+struct cec_rx_struct cec_rx_struct;
+struct cec_tx_struct cec_tx_struct;
+
+
+void s5p_cec_set_divider(void)
+{
+ u32 div_ratio, reg, div_val;
+
+ div_ratio = S5P_HDMI_FIN / CEC_DIV_RATIO - 1;
+
+ reg = readl(S5P_HDMI_PHY_CONTROL);
+ reg = (reg & ~(0x3FF << 16)) | (div_ratio << 16);
+
+ writel(reg, S5P_HDMI_PHY_CONTROL);
+
+ div_val = CEC_DIV_RATIO * 0.00005 - 1;
+
+ writeb(0x0, cec_base + S5P_CES_DIVISOR_3);
+ writeb(0x0, cec_base + S5P_CES_DIVISOR_2);
+ writeb(0x0, cec_base + S5P_CES_DIVISOR_1);
+ writeb(div_val, cec_base + S5P_CES_DIVISOR_0);
+}
+
+void s5p_cec_enable_rx(void)
+{
+ u8 reg;
+
+ reg = readb(cec_base + S5P_CES_RX_CTRL);
+ reg |= S5P_CES_RX_CTRL_ENABLE;
+ writeb(reg, cec_base + S5P_CES_RX_CTRL);
+}
+
+void s5p_cec_mask_rx_interrupts(void)
+{
+ u8 reg;
+
+ reg = readb(cec_base + S5P_CES_IRQ_MASK);
+ reg |= S5P_CES_IRQ_RX_DONE;
+ reg |= S5P_CES_IRQ_RX_ERROR;
+ writeb(reg, cec_base + S5P_CES_IRQ_MASK);
+}
+
+void s5p_cec_unmask_rx_interrupts(void)
+{
+ u8 reg;
+
+ reg = readb(cec_base + S5P_CES_IRQ_MASK);
+ reg &= ~S5P_CES_IRQ_RX_DONE;
+ reg &= ~S5P_CES_IRQ_RX_ERROR;
+ writeb(reg, cec_base + S5P_CES_IRQ_MASK);
+}
+
+void s5p_cec_mask_tx_interrupts(void)
+{
+ u8 reg;
+ reg = readb(cec_base + S5P_CES_IRQ_MASK);
+ reg |= S5P_CES_IRQ_TX_DONE;
+ reg |= S5P_CES_IRQ_TX_ERROR;
+ writeb(reg, cec_base + S5P_CES_IRQ_MASK);
+
+}
+
+void s5p_cec_unmask_tx_interrupts(void)
+{
+ u8 reg;
+
+ reg = readb(cec_base + S5P_CES_IRQ_MASK);
+ reg &= ~S5P_CES_IRQ_TX_DONE;
+ reg &= ~S5P_CES_IRQ_TX_ERROR;
+ writeb(reg, cec_base + S5P_CES_IRQ_MASK);
+}
+
+void s5p_cec_reset(void)
+{
+ writeb(S5P_CES_RX_CTRL_RESET, cec_base + S5P_CES_RX_CTRL);
+ writeb(S5P_CES_TX_CTRL_RESET, cec_base + S5P_CES_TX_CTRL);
+}
+
+void s5p_cec_tx_reset(void)
+{
+ writeb(S5P_CES_TX_CTRL_RESET, cec_base + S5P_CES_TX_CTRL);
+}
+
+void s5p_cec_rx_reset(void)
+{
+ writeb(S5P_CES_RX_CTRL_RESET, cec_base + S5P_CES_RX_CTRL);
+}
+
+void s5p_cec_threshold(void)
+{
+ writeb(CEC_FILTER_THRESHOLD, cec_base + S5P_CES_RX_FILTER_TH);
+ writeb(0, cec_base + S5P_CES_RX_FILTER_CTRL);
+}
+
+void s5p_cec_set_tx_state(enum cec_state state)
+{
+ atomic_set(&cec_tx_struct.state, state);
+}
+
+void s5p_cec_set_rx_state(enum cec_state state)
+{
+ atomic_set(&cec_rx_struct.state, state);
+}
+
+void s5p_cec_copy_packet(char *data, size_t count)
+{
+ int i = 0;
+ u8 reg;
+
+ while (i < count) {
+ writeb(data[i], cec_base + (S5P_CES_TX_BUFF0 + (i * 4)));
+ i++;
+ }
+
+ writeb(count, cec_base + S5P_CES_TX_BYTES);
+ s5p_cec_set_tx_state(STATE_TX);
+ reg = readb(cec_base + S5P_CES_TX_CTRL);
+ reg |= S5P_CES_TX_CTRL_START;
+
+ if ((data[0] & CEC_MESSAGE_BROADCAST_MASK) == CEC_MESSAGE_BROADCAST)
+ reg |= S5P_CES_TX_CTRL_BCAST;
+ else
+ reg &= ~S5P_CES_TX_CTRL_BCAST;
+
+ reg |= 0x50;
+ writeb(reg, cec_base + S5P_CES_TX_CTRL);
+}
+
+void s5p_cec_set_addr(u32 addr)
+{
+ writeb(addr & 0x0F, cec_base + S5P_CES_LOGIC_ADDR);
+}
+
+u32 s5p_cec_get_status(void)
+{
+ u32 status = 0;
+
+ status = readb(cec_base + S5P_CES_STATUS_0);
+ status |= readb(cec_base + S5P_CES_STATUS_1) << 8;
+ status |= readb(cec_base + S5P_CES_STATUS_2) << 16;
+ status |= readb(cec_base + S5P_CES_STATUS_3) << 24;
+
+ tvout_dbg("status = 0x%x!\n", status);
+
+ return status;
+}
+
+void s5p_clr_pending_tx(void)
+{
+ writeb(S5P_CES_IRQ_TX_DONE | S5P_CES_IRQ_TX_ERROR,
+ cec_base + S5P_CES_IRQ_CLEAR);
+}
+
+void s5p_clr_pending_rx(void)
+{
+ writeb(S5P_CES_IRQ_RX_DONE | S5P_CES_IRQ_RX_ERROR,
+ cec_base + S5P_CES_IRQ_CLEAR);
+}
+
+void s5p_cec_get_rx_buf(u32 size, u8 *buffer)
+{
+ u32 i = 0;
+
+ while (i < size) {
+ buffer[i] = readb(cec_base + S5P_CES_RX_BUFF0 + (i * 4));
+ i++;
+ }
+}
+
+int __init s5p_cec_mem_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ size_t size;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (res == NULL) {
+ dev_err(&pdev->dev,
+ "failed to get memory region resource for cec\n");
+ return -ENOENT;
+ }
+
+ size = (res->end - res->start) + 1;
+ cec_mem = request_mem_region(res->start, size, pdev->name);
+
+ if (cec_mem == NULL) {
+ dev_err(&pdev->dev,
+ "failed to get memory region for cec\n");
+ return -ENOENT;
+ }
+
+ cec_base = ioremap(res->start, size);
+
+ if (cec_base == NULL) {
+ dev_err(&pdev->dev,
+ "failed to ioremap address region for cec\n");
+ return -ENOENT;
+ }
+
+ return ret;
+}
+
+int __init s5p_cec_mem_release(struct platform_device *pdev)
+{
+ iounmap(cec_base);
+
+ if (cec_mem != NULL) {
+ if (release_resource(cec_mem))
+ dev_err(&pdev->dev,
+ "Can't remove tvout drv !!\n");
+
+ kfree(cec_mem);
+
+ cec_mem = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/tvout/hw_if/hdcp.c b/drivers/media/video/samsung/tvout/hw_if/hdcp.c
new file mode 100644
index 0000000..569de28
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/hw_if/hdcp.c
@@ -0,0 +1,1123 @@
+/* linux/drivers/media/video/samsung/tvout/hw_if/hdcp.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * HDCP function for Samsung TVOUT driver
+ *
+ * This program is free software. you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <mach/regs-hdmi.h>
+
+#include "hw_if.h"
+#include "../s5p_tvout_common_lib.h"
+
+#undef tvout_dbg
+
+#ifdef CONFIG_TVOUT_DEBUG
+#define tvout_dbg(fmt, ...) \
+do { \
+ if (unlikely(tvout_dbg_flag & (1 << DBG_FLAG_HDCP))) { \
+ printk(KERN_INFO "\t\t[HDCP] %s(): " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define tvout_dbg(fmt, ...)
+#endif
+
+#define AN_SZ 8
+#define AKSV_SZ 5
+#define BKSV_SZ 5
+#define MAX_KEY_SZ 16
+
+#define BKSV_RETRY_CNT 14
+#define BKSV_DELAY 100
+
+#define DDC_RETRY_CNT 400000
+#define DDC_DELAY 25
+
+#define KEY_LOAD_RETRY_CNT 1000
+#define ENCRYPT_CHECK_CNT 10
+
+#define KSV_FIFO_RETRY_CNT 50
+#define KSV_FIFO_CHK_DELAY 100 /* ms */
+#define KSV_LIST_RETRY_CNT 10000
+#define SHA_1_RETRY_CNT 4
+
+#define BCAPS_SIZE 1
+#define BSTATUS_SIZE 2
+#define SHA_1_HASH_SIZE 20
+#define HDCP_MAX_DEVS 128
+#define HDCP_KSV_SIZE 5
+
+#define HDCP_Bksv 0x00
+#define HDCP_Ri 0x08
+#define HDCP_Aksv 0x10
+#define HDCP_Ainfo 0x15
+#define HDCP_An 0x18
+#define HDCP_SHA1 0x20
+#define HDCP_Bcaps 0x40
+#define HDCP_BStatus 0x41
+#define HDCP_KSVFIFO 0x43
+
+#define KSV_FIFO_READY (0x1 << 5)
+
+#define MAX_CASCADE_EXCEEDED_ERROR (-2)
+#define MAX_DEVS_EXCEEDED_ERROR (-3)
+#define REPEATER_ILLEGAL_DEVICE_ERROR (-4)
+#define REPEATER_TIMEOUT_ERROR (-5)
+
+#define MAX_CASCADE_EXCEEDED (0x1 << 3)
+#define MAX_DEVS_EXCEEDED (0x1 << 7)
+
+
+#define DDC_BUF_SIZE 32
+
+enum hdcp_event {
+ HDCP_EVENT_STOP = 1 << 0,
+ HDCP_EVENT_START = 1 << 1,
+ HDCP_EVENT_READ_BKSV_START = 1 << 2,
+ HDCP_EVENT_WRITE_AKSV_START = 1 << 4,
+ HDCP_EVENT_CHECK_RI_START = 1 << 8,
+ HDCP_EVENT_SECOND_AUTH_START = 1 << 16
+};
+
+enum hdcp_state {
+ NOT_AUTHENTICATED,
+ RECEIVER_READ_READY,
+ BCAPS_READ_DONE,
+ BKSV_READ_DONE,
+ AN_WRITE_DONE,
+ AKSV_WRITE_DONE,
+ FIRST_AUTHENTICATION_DONE,
+ SECOND_AUTHENTICATION_RDY,
+ SECOND_AUTHENTICATION_DONE,
+};
+
+struct s5p_hdcp_info {
+ u8 is_repeater;
+ u32 hdcp_enable;
+
+ spinlock_t reset_lock;
+
+ enum hdcp_event event;
+ enum hdcp_state auth_status;
+
+ struct work_struct work;
+};
+
+struct i2c_client *ddc_port;
+
+static bool sw_reset;
+extern bool s5p_hdmi_ctrl_status(void);
+
+static struct s5p_hdcp_info hdcp_info = {
+ .is_repeater = false,
+ .hdcp_enable = false,
+ .event = HDCP_EVENT_STOP,
+ .auth_status = NOT_AUTHENTICATED,
+};
+
+static struct workqueue_struct *hdcp_wq;
+
+/* start: external functions for HDMI */
+extern void __iomem *hdmi_base;
+
+
+/* end: external functions for HDMI */
+
+/* ddc i2c */
+static int s5p_ddc_read(u8 reg, int bytes, u8 *dest)
+{
+ struct i2c_client *i2c = ddc_port;
+ u8 addr = reg;
+ int ret, cnt = 0;
+
+ struct i2c_msg msg[] = {
+ [0] = {
+ .addr = i2c->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr
+ },
+ [1] = {
+ .addr = i2c->addr,
+ .flags = I2C_M_RD,
+ .len = bytes,
+ .buf = dest
+ }
+ };
+
+ do {
+ if (s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() ||
+ on_stop_process)
+ goto ddc_read_err;
+
+ ret = i2c_transfer(i2c->adapter, msg, 2);
+
+ if (ret < 0 || ret != 2)
+ tvout_dbg("ddc : can't read data, retry %d\n", cnt);
+ else
+ break;
+
+ if (hdcp_info.auth_status == FIRST_AUTHENTICATION_DONE
+ || hdcp_info.auth_status == SECOND_AUTHENTICATION_DONE)
+ goto ddc_read_err;
+
+ msleep(DDC_DELAY);
+ cnt++;
+ } while (cnt < DDC_RETRY_CNT);
+
+ if (cnt == DDC_RETRY_CNT)
+ goto ddc_read_err;
+
+ tvout_dbg("ddc : read data ok\n");
+
+ return 0;
+ddc_read_err:
+ tvout_err("ddc : can't read data, timeout\n");
+ return -1;
+}
+
+static int s5p_ddc_write(u8 reg, int bytes, u8 *src)
+{
+ struct i2c_client *i2c = ddc_port;
+ u8 msg[bytes + 1];
+ int ret, cnt = 0;
+
+ msg[0] = reg;
+ memcpy(&msg[1], src, bytes);
+
+ do {
+ if (s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() ||
+ on_stop_process)
+ goto ddc_write_err;
+
+ ret = i2c_master_send(i2c, msg, bytes + 1);
+
+ if (ret < 0 || ret < bytes + 1)
+ tvout_dbg("ddc : can't write data, retry %d\n", cnt);
+ else
+ break;
+
+ msleep(DDC_DELAY);
+ cnt++;
+ } while (cnt < DDC_RETRY_CNT);
+
+ if (cnt == DDC_RETRY_CNT)
+ goto ddc_write_err;
+
+ tvout_dbg("ddc : write data ok\n");
+ return 0;
+ddc_write_err:
+ tvout_err("ddc : can't write data, timeout\n");
+ return -1;
+}
+
+static ssize_t sysfs_hdcp_ddc_i2c_num_show(struct class *class,
+ struct class_attribute *attr, char *buf)
+{
+ int size;
+ int ddc_i2c_num = ddc_port->adapter->nr;
+
+ pr_info("%s() ddc_i2c_num : %d\n",
+ __func__, ddc_i2c_num);
+ size = sprintf(buf, "DDC %d\n", ddc_i2c_num);
+
+ return size;
+}
+
+static CLASS_ATTR(ddc_i2c_num, 0664 , sysfs_hdcp_ddc_i2c_num_show, NULL);
+
+static int __devinit s5p_ddc_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ int ret = 0;
+ struct class *sec_hdcp;
+
+ ddc_port = client;
+
+ sec_hdcp = class_create(THIS_MODULE, "hdcp");
+ if (IS_ERR(sec_hdcp)) {
+ pr_err("Failed to create class(sec_hdcp)!\n");
+ ret = -ENOMEM;
+ goto err_exit1;
+ }
+
+ ret = class_create_file(sec_hdcp, &class_attr_ddc_i2c_num);
+ if (ret) {
+ pr_err("Failed to create device file in sysfs entries!\n");
+ ret = -ENOMEM;
+ goto err_exit2;
+ }
+
+ dev_info(&client->adapter->dev, "attached s5p_ddc "
+ "into i2c adapter successfully\n");
+ return ret;
+
+err_exit2:
+ class_destroy(sec_hdcp);
+
+err_exit1:
+ return ret;
+}
+
+static int s5p_ddc_remove(struct i2c_client *client)
+{
+ dev_info(&client->adapter->dev, "detached s5p_ddc "
+ "from i2c adapter successfully\n");
+
+ return 0;
+}
+
+static int s5p_ddc_suspend(struct i2c_client *cl, pm_message_t mesg)
+{
+ return 0;
+};
+
+static int s5p_ddc_resume(struct i2c_client *cl)
+{
+ return 0;
+};
+
+static struct i2c_device_id ddc_idtable[] = {
+ {"s5p_ddc", 0},
+};
+MODULE_DEVICE_TABLE(i2c, ddc_idtable);
+
+static struct i2c_driver ddc_driver = {
+ .driver = {
+ .name = "s5p_ddc",
+ .owner = THIS_MODULE,
+ },
+ .id_table = ddc_idtable,
+ .probe = s5p_ddc_probe,
+ .remove = __devexit_p(s5p_ddc_remove),
+ .suspend = s5p_ddc_suspend,
+ .resume = s5p_ddc_resume,
+};
+
+static int __init s5p_ddc_init(void)
+{
+ return i2c_add_driver(&ddc_driver);
+}
+
+static void __exit s5p_ddc_exit(void)
+{
+ i2c_del_driver(&ddc_driver);
+}
+
+
+module_init(s5p_ddc_init);
+module_exit(s5p_ddc_exit);
+
+/* hdcp */
+static int s5p_hdcp_encryption(bool on)
+{
+ u8 reg;
+ if (on)
+ reg = S5P_HDMI_HDCP_ENC_ENABLE;
+ else
+ reg = S5P_HDMI_HDCP_ENC_DISABLE;
+
+ writeb(reg, hdmi_base + S5P_HDMI_ENC_EN);
+ s5p_hdmi_reg_mute(!on);
+
+ return 0;
+}
+
+static int s5p_hdcp_write_key(int sz, int reg, int type)
+{
+ u8 buff[MAX_KEY_SZ] = {0,};
+ int cnt = 0, zero = 0;
+
+ hdmi_read_l(buff, hdmi_base, reg, sz);
+
+ for (cnt = 0; cnt < sz; cnt++)
+ if (buff[cnt] == 0)
+ zero++;
+
+ if (zero == sz) {
+ tvout_dbg("%s : null\n", type == HDCP_An ? "an" : "aksv");
+ goto write_key_err;
+ }
+
+ if (s5p_ddc_write(type, sz, buff) < 0)
+ goto write_key_err;
+
+#ifdef CONFIG_HDCP_DEBUG
+ {
+ u16 i = 0;
+
+ for (i = 1; i < sz + 1; i++)
+ tvout_dbg("%s[%d] : 0x%02x\n",
+ type == HDCP_An ? "an" : "aksv", i, buff[i]);
+ }
+#endif
+
+ return 0;
+write_key_err:
+ tvout_err("write %s : failed\n", type == HDCP_An ? "an" : "aksv");
+ return -1;
+}
+
+static int s5p_hdcp_read_bcaps(void)
+{
+ u8 bcaps = 0;
+
+ if (s5p_ddc_read(HDCP_Bcaps, BCAPS_SIZE, &bcaps) < 0)
+ goto bcaps_read_err;
+
+ if (s5p_hdmi_ctrl_status() == false || !s5p_hdmi_reg_get_hpd_status() || on_stop_process)
+ goto bcaps_read_err;
+
+ writeb(bcaps, hdmi_base + S5P_HDMI_HDCP_BCAPS);
+
+ if (bcaps & S5P_HDMI_HDCP_BCAPS_REPEATER)
+ hdcp_info.is_repeater = 1;
+ else
+ hdcp_info.is_repeater = 0;
+
+ tvout_dbg("device : %s\n", hdcp_info.is_repeater ? "REPEAT" : "SINK");
+ tvout_dbg("[i2c] bcaps : 0x%02x\n", bcaps);
+ tvout_dbg("[sfr] bcaps : 0x%02x\n",
+ readb(hdmi_base + S5P_HDMI_HDCP_BCAPS));
+
+ return 0;
+
+bcaps_read_err:
+ tvout_err("can't read bcaps : timeout\n");
+ return -1;
+}
+
+static int s5p_hdcp_read_bksv(void)
+{
+ u8 bksv[BKSV_SZ] = {0, };
+ int i = 0, j = 0;
+ u32 one = 0, zero = 0, res = 0;
+ u32 cnt = 0;
+
+ do {
+ if (s5p_ddc_read(HDCP_Bksv, BKSV_SZ, bksv) < 0)
+ goto bksv_read_err;
+
+#ifdef CONFIG_HDCP_DEBUG
+ for (i = 0; i < BKSV_SZ; i++)
+ tvout_dbg("i2c read : bksv[%d]: 0x%x\n", i, bksv[i]);
+#endif
+
+ for (i = 0; i < BKSV_SZ; i++) {
+
+ for (j = 0; j < 8; j++) {
+ res = bksv[i] & (0x1 << j);
+
+ if (res == 0)
+ zero++;
+ else
+ one++;
+ }
+
+ }
+
+ if (s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() || on_stop_process)
+ goto bksv_read_err;
+
+ if ((zero == 20) && (one == 20)) {
+ hdmi_write_l(bksv, hdmi_base,
+ S5P_HDMI_HDCP_BKSV_0_0, BKSV_SZ);
+ break;
+ }
+ tvout_dbg("invalid bksv, retry : %d\n", cnt);
+
+ msleep(BKSV_DELAY);
+ cnt++;
+ } while (cnt < BKSV_RETRY_CNT);
+
+ if (cnt == BKSV_RETRY_CNT)
+ goto bksv_read_err;
+
+ tvout_dbg("bksv read OK, retry : %d\n", cnt);
+ return 0;
+
+bksv_read_err:
+ tvout_err("can't read bksv : timeout\n");
+ return -1;
+}
+
+static int s5p_hdcp_read_ri(void)
+{
+ static unsigned long int cnt;
+ u8 ri[2] = {0, 0};
+ u8 rj[2] = {0, 0};
+
+ cnt++;
+ ri[0] = readb(hdmi_base + S5P_HDMI_HDCP_Ri_0);
+ ri[1] = readb(hdmi_base + S5P_HDMI_HDCP_Ri_1);
+
+ if (s5p_ddc_read(HDCP_Ri, 2, rj) < 0)
+ goto compare_err;
+
+ tvout_dbg("Rx(ddc) -> rj[0]: 0x%02x, rj[1]: 0x%02x\n",
+ rj[0], rj[1]);
+ tvout_dbg("Tx(register) -> ri[0]: 0x%02x, ri[1]: 0x%02x\n",
+ ri[0], ri[1]);
+
+ if ((ri[0] == rj[0]) && (ri[1] == rj[1]) && (ri[0] | ri[1]))
+ writeb(S5P_HDMI_HDCP_Ri_MATCH_RESULT_Y,
+ hdmi_base + S5P_HDMI_HDCP_CHECK_RESULT);
+ else {
+ writeb(S5P_HDMI_HDCP_Ri_MATCH_RESULT_N,
+ hdmi_base + S5P_HDMI_HDCP_CHECK_RESULT);
+ goto compare_err;
+ }
+
+ ri[0] = 0;
+ ri[1] = 0;
+ rj[0] = 0;
+ rj[1] = 0;
+
+ tvout_dbg("ri, ri' : matched\n");
+
+ return 0;
+compare_err:
+ hdcp_info.event = HDCP_EVENT_STOP;
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+ tvout_err("read ri : failed - missmatch "
+ "Rx(ddc) rj[0]:0x%02x, rj[1]:0x%02x "
+ "Tx(register) ri[0]:0x%02x, ri[1]:0x%02x "
+ "cnt = %lu\n",
+ rj[0], rj[1], ri[0], ri[1], cnt);
+ msleep(10);
+ return -1;
+}
+
+static void s5p_hdcp_reset_sw(void)
+{
+ u8 reg;
+
+ sw_reset = true;
+ reg = s5p_hdmi_reg_intc_get_enabled();
+
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 0);
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 0);
+
+ s5p_hdmi_reg_sw_hpd_enable(true);
+ s5p_hdmi_reg_set_hpd_onoff(false);
+ s5p_hdmi_reg_set_hpd_onoff(true);
+ s5p_hdmi_reg_sw_hpd_enable(false);
+
+ if (reg & 1<<HDMI_IRQ_HPD_PLUG)
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 1);
+ if (reg & 1<<HDMI_IRQ_HPD_UNPLUG)
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 1);
+
+ sw_reset = false;
+}
+
+static void s5p_hdcp_reset_auth(void)
+{
+ u8 reg;
+ unsigned long spin_flags;
+
+ if (s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() || on_stop_process)
+ return;
+ spin_lock_irqsave(&hdcp_info.reset_lock, spin_flags);
+
+ hdcp_info.event = HDCP_EVENT_STOP;
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_CTRL1);
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_CTRL2);
+ s5p_hdmi_reg_mute(true);
+
+ s5p_hdcp_encryption(false);
+
+ tvout_err("reset authentication\n");
+
+ reg = readb(hdmi_base + S5P_HDMI_STATUS_EN);
+ reg &= S5P_HDMI_INT_DIS_ALL;
+ writeb(reg, hdmi_base + S5P_HDMI_STATUS_EN);
+
+ writeb(S5P_HDMI_HDCP_CLR_ALL_RESULTS,
+ hdmi_base + S5P_HDMI_HDCP_CHECK_RESULT);
+
+ /* need some delay (at least 1 frame) */
+ mdelay(16);
+
+ s5p_hdcp_reset_sw();
+
+ reg = readb(hdmi_base + S5P_HDMI_STATUS_EN);
+ reg |= S5P_HDMI_WTFORACTIVERX_INT_OCC |
+ S5P_HDMI_WATCHDOG_INT_OCC |
+ S5P_HDMI_WRITE_INT_OCC |
+ S5P_HDMI_UPDATE_RI_INT_OCC;
+ writeb(reg, hdmi_base + S5P_HDMI_STATUS_EN);
+ writeb(S5P_HDMI_HDCP_CP_DESIRED_EN, hdmi_base + S5P_HDMI_HDCP_CTRL1);
+ spin_unlock_irqrestore(&hdcp_info.reset_lock, spin_flags);
+}
+
+static int s5p_hdcp_loadkey(void)
+{
+ u8 reg;
+ int cnt = 0;
+
+ writeb(S5P_HDMI_EFUSE_CTRL_HDCP_KEY_READ,
+ hdmi_base + S5P_HDMI_EFUSE_CTRL);
+
+ do {
+ reg = readb(hdmi_base + S5P_HDMI_EFUSE_STATUS);
+ if (reg & S5P_HDMI_EFUSE_ECC_DONE)
+ break;
+ cnt++;
+ mdelay(1);
+ } while (cnt < KEY_LOAD_RETRY_CNT);
+
+ if (cnt == KEY_LOAD_RETRY_CNT)
+ goto key_load_err;
+
+ reg = readb(hdmi_base + S5P_HDMI_EFUSE_STATUS);
+
+ if (reg & S5P_HDMI_EFUSE_ECC_FAIL)
+ goto key_load_err;
+
+ tvout_dbg("load key : OK\n");
+ return 0;
+key_load_err:
+ tvout_err("can't load key\n");
+ return -1;
+}
+
+static int s5p_hdmi_start_encryption(void)
+{
+ u8 reg;
+ u32 cnt = 0;
+
+ do {
+ reg = readb(hdmi_base + S5P_HDMI_SYS_STATUS);
+
+ if (reg & S5P_HDMI_AUTHEN_ACK_AUTH) {
+ s5p_hdcp_encryption(true);
+ break;
+ }
+
+ mdelay(1);
+
+ cnt++;
+ } while (cnt < ENCRYPT_CHECK_CNT);
+
+ if (cnt == ENCRYPT_CHECK_CNT)
+ goto encrypt_err;
+
+
+ tvout_dbg("encrypt : start\n");
+ return 0;
+
+encrypt_err:
+ s5p_hdcp_encryption(false);
+ tvout_err("encrypt : failed\n");
+ return -1;
+}
+
+static int s5p_hdmi_check_repeater(void)
+{
+ int reg = 0;
+ int cnt = 0, cnt2 = 0;
+
+ u8 bcaps = 0;
+ u8 status[BSTATUS_SIZE] = {0, 0};
+ u8 rx_v[SHA_1_HASH_SIZE] = {0};
+ u8 ksv_list[HDCP_MAX_DEVS * HDCP_KSV_SIZE] = {0};
+
+ u32 dev_cnt;
+
+ memset(rx_v, 0x0, SHA_1_HASH_SIZE);
+ memset(ksv_list, 0x0, HDCP_MAX_DEVS * HDCP_KSV_SIZE);
+
+ do {
+ if (s5p_hdcp_read_bcaps() < 0)
+ goto check_repeater_err;
+
+ bcaps = readb(hdmi_base + S5P_HDMI_HDCP_BCAPS);
+
+ if (bcaps & KSV_FIFO_READY)
+ break;
+
+ msleep(KSV_FIFO_CHK_DELAY);
+
+ cnt++;
+ } while (cnt < KSV_FIFO_RETRY_CNT);
+
+ if (cnt == KSV_FIFO_RETRY_CNT) {
+ tvout_dbg("repeater : ksv fifo not ready, timeout error");
+ tvout_dbg(", retries : %d\n", cnt);
+ return REPEATER_TIMEOUT_ERROR;
+ }
+
+ tvout_dbg("repeater : ksv fifo ready\n");
+ tvout_dbg(", retries : %d\n", cnt);
+
+
+ if (s5p_ddc_read(HDCP_BStatus, BSTATUS_SIZE, status) < 0)
+ goto check_repeater_err;
+
+ if (status[1] & MAX_CASCADE_EXCEEDED)
+ return MAX_CASCADE_EXCEEDED_ERROR;
+ else if (status[0] & MAX_DEVS_EXCEEDED)
+ return MAX_DEVS_EXCEEDED_ERROR;
+
+ writeb(status[0], hdmi_base + S5P_HDMI_HDCP_BSTATUS_0);
+ writeb(status[1], hdmi_base + S5P_HDMI_HDCP_BSTATUS_1);
+
+ tvout_dbg("status[0] :0x%02x\n", status[0]);
+ tvout_dbg("status[1] :0x%02x\n", status[1]);
+
+ dev_cnt = status[0] & 0x7f;
+
+ tvout_dbg("repeater : dev cnt = %d\n", dev_cnt);
+
+ if (dev_cnt) {
+
+ if (s5p_ddc_read(HDCP_KSVFIFO, dev_cnt * HDCP_KSV_SIZE,
+ ksv_list) < 0)
+ goto check_repeater_err;
+
+ cnt = 0;
+
+ do {
+ hdmi_write_l(&ksv_list[cnt*5], hdmi_base,
+ S5P_HDMI_HDCP_RX_KSV_0_0, HDCP_KSV_SIZE);
+
+ reg = S5P_HDMI_HDCP_KSV_WRITE_DONE;
+
+ if (cnt == dev_cnt - 1)
+ reg |= S5P_HDMI_HDCP_KSV_END;
+
+ writeb(reg, hdmi_base + S5P_HDMI_HDCP_KSV_LIST_CON);
+
+ if (cnt < dev_cnt - 1) {
+ cnt2 = 0;
+ do {
+ reg = readb(hdmi_base
+ + S5P_HDMI_HDCP_KSV_LIST_CON);
+
+ if (reg & S5P_HDMI_HDCP_KSV_READ)
+ break;
+ cnt2++;
+ } while (cnt2 < KSV_LIST_RETRY_CNT);
+
+ if (cnt2 == KSV_LIST_RETRY_CNT)
+ tvout_dbg("ksv list not readed\n");
+ }
+ cnt++;
+ } while (cnt < dev_cnt);
+ } else {
+ writeb(S5P_HDMI_HDCP_KSV_LIST_EMPTY,
+ hdmi_base + S5P_HDMI_HDCP_KSV_LIST_CON);
+ }
+
+ if (s5p_ddc_read(HDCP_SHA1, SHA_1_HASH_SIZE, rx_v) < 0)
+ goto check_repeater_err;
+
+#ifdef S5P_HDCP_DEBUG
+ for (i = 0; i < SHA_1_HASH_SIZE; i++)
+ tvout_dbg("[i2c] SHA-1 rx :: %02x\n", rx_v[i]);
+#endif
+
+ hdmi_write_l(rx_v, hdmi_base, S5P_HDMI_HDCP_RX_SHA1_0_0,
+ SHA_1_HASH_SIZE);
+
+ reg = readb(hdmi_base + S5P_HDMI_HDCP_SHA_RESULT);
+ if (reg & S5P_HDMI_HDCP_SHA_VALID_RD) {
+ if (reg & S5P_HDMI_HDCP_SHA_VALID) {
+ tvout_dbg("SHA-1 result : OK\n");
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_SHA_RESULT);
+ } else {
+ tvout_dbg("SHA-1 result : not vaild\n");
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_SHA_RESULT);
+ goto check_repeater_err;
+ }
+ } else {
+ tvout_dbg("SHA-1 result : not ready\n");
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_SHA_RESULT);
+ goto check_repeater_err;
+ }
+
+ tvout_dbg("check repeater : OK\n");
+ return 0;
+check_repeater_err:
+ tvout_err("check repeater : failed\n");
+ return -1;
+}
+
+int s5p_hdcp_stop(void)
+{
+ u32 sfr_val = 0;
+
+ tvout_dbg("HDCP ftn. Stop!!\n");
+
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HDCP, 0);
+
+ hdcp_info.event = HDCP_EVENT_STOP;
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+ hdcp_info.hdcp_enable = false;
+
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_CTRL1);
+
+ s5p_hdmi_reg_sw_hpd_enable(false);
+
+ sfr_val = readb(hdmi_base + S5P_HDMI_STATUS_EN);
+ sfr_val &= S5P_HDMI_INT_DIS_ALL;
+ writeb(sfr_val, hdmi_base + S5P_HDMI_STATUS_EN);
+
+ sfr_val = readb(hdmi_base + S5P_HDMI_SYS_STATUS);
+ sfr_val |= S5P_HDMI_INT_EN_ALL;
+ writeb(sfr_val, hdmi_base + S5P_HDMI_SYS_STATUS);
+
+ tvout_dbg("Stop Encryption by Stop!!\n");
+ s5p_hdcp_encryption(false);
+
+ writeb(S5P_HDMI_HDCP_Ri_MATCH_RESULT_N,
+ hdmi_base + S5P_HDMI_HDCP_CHECK_RESULT);
+ writeb(S5P_HDMI_HDCP_CLR_ALL_RESULTS,
+ hdmi_base + S5P_HDMI_HDCP_CHECK_RESULT);
+
+ return 0;
+}
+
+int s5p_hdcp_start(void)
+{
+ u32 sfr_val;
+
+ hdcp_info.event = HDCP_EVENT_STOP;
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+
+ tvout_dbg("HDCP ftn. Start\n");
+
+ s5p_hdcp_reset_sw();
+
+ tvout_dbg("Stop Encryption by Start\n");
+
+ s5p_hdcp_encryption(false);
+
+ msleep(120);
+ if (s5p_hdcp_loadkey() < 0)
+ return -1;
+
+ writeb(S5P_HDMI_GCP_CON_NO_TRAN, hdmi_base + S5P_HDMI_GCP_CON);
+ writeb(S5P_HDMI_INT_EN_ALL, hdmi_base + S5P_HDMI_STATUS_EN);
+
+ sfr_val = S5P_HDMI_HDCP_CP_DESIRED_EN;
+ writeb(sfr_val, hdmi_base + S5P_HDMI_HDCP_CTRL1);
+
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HDCP, 1);
+
+ hdcp_info.hdcp_enable = 1;
+
+ return 0;
+}
+
+static int s5p_hdcp_bksv(void)
+{
+ tvout_dbg("bksv start : start\n");
+
+ hdcp_info.auth_status = RECEIVER_READ_READY;
+
+ msleep(100);
+
+ if (s5p_hdcp_read_bcaps() < 0)
+ goto bksv_start_err;
+
+ hdcp_info.auth_status = BCAPS_READ_DONE;
+
+ if (s5p_hdcp_read_bksv() < 0)
+ goto bksv_start_err;
+
+ hdcp_info.auth_status = BKSV_READ_DONE;
+
+ tvout_dbg("bksv start : OK\n");
+
+ return 0;
+
+bksv_start_err:
+ tvout_err("bksv start : failed\n");
+ msleep(100);
+ return -1;
+}
+
+static int s5p_hdcp_second_auth(void)
+{
+ int ret = 0;
+
+ tvout_dbg("second auth : start\n");
+
+ if (!hdcp_info.hdcp_enable)
+ goto second_auth_err;
+
+ if (s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() || on_stop_process)
+ goto second_auth_err;
+
+ ret = s5p_hdmi_check_repeater();
+ if (ret)
+ goto second_auth_err;
+
+ hdcp_info.auth_status = SECOND_AUTHENTICATION_DONE;
+ s5p_hdmi_start_encryption();
+
+ tvout_dbg("second auth : OK\n");
+ return 0;
+
+second_auth_err:
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+ tvout_err("second auth : failed\n");
+ return -1;
+}
+
+static int s5p_hdcp_write_aksv(void)
+{
+ tvout_dbg("aksv start : start\n");
+
+ if (hdcp_info.auth_status != BKSV_READ_DONE) {
+ tvout_dbg("aksv start : bksv is not ready\n");
+ goto aksv_write_err;
+ }
+ if (s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() || on_stop_process)
+ goto aksv_write_err;
+
+ if (s5p_hdcp_write_key(AN_SZ, S5P_HDMI_HDCP_An_0_0, HDCP_An) < 0)
+ goto aksv_write_err;
+
+ hdcp_info.auth_status = AN_WRITE_DONE;
+
+ tvout_dbg("write an : done\n");
+
+ if (s5p_hdcp_write_key(AKSV_SZ, S5P_HDMI_HDCP_AKSV_0_0, HDCP_Aksv) < 0)
+ goto aksv_write_err;
+
+ msleep(100);
+
+ hdcp_info.auth_status = AKSV_WRITE_DONE;
+
+ tvout_dbg("write aksv : done\n");
+ tvout_dbg("aksv start : OK\n");
+ return 0;
+
+aksv_write_err:
+ tvout_err("aksv start : failed\n");
+ return -1;
+}
+
+static int s5p_hdcp_check_ri(void)
+{
+ tvout_dbg("ri check : start\n");
+
+ if (hdcp_info.auth_status < AKSV_WRITE_DONE) {
+ tvout_dbg("ri check : not ready\n");
+ goto check_ri_err;
+ }
+
+ if (s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() || on_stop_process)
+ goto check_ri_err;
+
+ if (s5p_hdcp_read_ri() < 0)
+ goto check_ri_err;
+
+ if (hdcp_info.is_repeater)
+ hdcp_info.auth_status
+ = SECOND_AUTHENTICATION_RDY;
+ else {
+ hdcp_info.auth_status
+ = FIRST_AUTHENTICATION_DONE;
+ s5p_hdmi_start_encryption();
+ }
+
+ tvout_dbg("ri check : OK\n");
+ return 0;
+
+check_ri_err:
+ tvout_err("ri check : failed\n");
+ return -1;
+}
+
+static void s5p_hdcp_work(void *arg)
+{
+ if (!hdcp_info.hdcp_enable || s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() || on_stop_process)
+ return;
+
+ if (hdcp_info.event & HDCP_EVENT_READ_BKSV_START) {
+ if (s5p_hdcp_bksv() < 0)
+ goto work_err;
+ else
+ hdcp_info.event &= ~HDCP_EVENT_READ_BKSV_START;
+ }
+
+ if (hdcp_info.event & HDCP_EVENT_SECOND_AUTH_START) {
+ if (s5p_hdcp_second_auth() < 0)
+ goto work_err;
+ else
+ hdcp_info.event &= ~HDCP_EVENT_SECOND_AUTH_START;
+ }
+
+ if (hdcp_info.event & HDCP_EVENT_WRITE_AKSV_START) {
+ if (s5p_hdcp_write_aksv() < 0)
+ goto work_err;
+ else
+ hdcp_info.event &= ~HDCP_EVENT_WRITE_AKSV_START;
+ }
+
+ if (hdcp_info.event & HDCP_EVENT_CHECK_RI_START) {
+ if (s5p_hdcp_check_ri() < 0)
+ goto work_err;
+ else
+ hdcp_info.event &= ~HDCP_EVENT_CHECK_RI_START;
+ }
+ return;
+work_err:
+ if (!hdcp_info.hdcp_enable || s5p_hdmi_ctrl_status() == false ||
+ !s5p_hdmi_reg_get_hpd_status() || on_stop_process) {
+ return;
+ }
+ s5p_hdcp_reset_auth();
+}
+
+irqreturn_t s5p_hdcp_irq_handler(int irq, void *dev_id)
+{
+ u32 event = 0;
+ u8 flag;
+
+ event = 0;
+
+ if (s5p_hdmi_ctrl_status() == false) {
+ hdcp_info.event = HDCP_EVENT_STOP;
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+ tvout_dbg("[WARNING] s5p_hdmi_ctrl_status fail\n");
+ return IRQ_HANDLED;
+ }
+
+ flag = readb(hdmi_base + S5P_HDMI_SYS_STATUS);
+ tvout_dbg("flag = 0x%x\n", flag);
+
+ if (flag & S5P_HDMI_WTFORACTIVERX_INT_OCC) {
+ event |= HDCP_EVENT_READ_BKSV_START;
+ writeb(flag | S5P_HDMI_WTFORACTIVERX_INT_OCC,
+ hdmi_base + S5P_HDMI_SYS_STATUS);
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_I2C_INT);
+ }
+
+ if (flag & S5P_HDMI_WRITE_INT_OCC) {
+ event |= HDCP_EVENT_WRITE_AKSV_START;
+ writeb(flag | S5P_HDMI_WRITE_INT_OCC,
+ hdmi_base + S5P_HDMI_SYS_STATUS);
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_AN_INT);
+ }
+
+ if (flag & S5P_HDMI_UPDATE_RI_INT_OCC) {
+ event |= HDCP_EVENT_CHECK_RI_START;
+ writeb(flag | S5P_HDMI_UPDATE_RI_INT_OCC,
+ hdmi_base + S5P_HDMI_SYS_STATUS);
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_RI_INT);
+ }
+
+ if (flag & S5P_HDMI_WATCHDOG_INT_OCC) {
+ event |= HDCP_EVENT_SECOND_AUTH_START;
+ writeb(flag | S5P_HDMI_WATCHDOG_INT_OCC,
+ hdmi_base + S5P_HDMI_SYS_STATUS);
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_WDT_INT);
+ }
+
+ if (!event) {
+ tvout_dbg("unknown irq\n");
+ return IRQ_HANDLED;
+ }
+
+ if (hdcp_info.hdcp_enable && s5p_hdmi_ctrl_status() == true &&
+ s5p_hdmi_reg_get_hpd_status() && !on_stop_process) {
+ hdcp_info.event |= event;
+ queue_work_on(0, hdcp_wq, &hdcp_info.work);
+ } else {
+ hdcp_info.event = HDCP_EVENT_STOP;
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+int s5p_hdcp_init(void)
+{
+ hdcp_wq = create_freezable_workqueue("hdcp work");
+ if (!hdcp_wq)
+ return -1;
+ INIT_WORK(&hdcp_info.work, (work_func_t) s5p_hdcp_work);
+
+ spin_lock_init(&hdcp_info.reset_lock);
+
+ s5p_hdmi_reg_intc_set_isr(s5p_hdcp_irq_handler,
+ (u8) HDMI_IRQ_HDCP);
+
+ return 0;
+}
+
+int s5p_hdcp_encrypt_stop(bool on)
+{
+ u32 reg;
+ unsigned long spin_flags;
+
+ tvout_dbg("\n");
+ spin_lock_irqsave(&hdcp_info.reset_lock, spin_flags);
+
+
+ if (s5p_hdmi_ctrl_status() == false) {
+ hdcp_info.event = HDCP_EVENT_STOP;
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+ spin_unlock_irqrestore(&hdcp_info.reset_lock, spin_flags);
+ return -1;
+ }
+
+ if (hdcp_info.hdcp_enable) {
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_I2C_INT);
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_AN_INT);
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_RI_INT);
+ writeb(0x0, hdmi_base + S5P_HDMI_HDCP_WDT_INT);
+
+ s5p_hdcp_encryption(false);
+
+ if (!sw_reset) {
+ reg = readb(hdmi_base + S5P_HDMI_HDCP_CTRL1);
+
+ if (on) {
+ writeb(reg | S5P_HDMI_HDCP_CP_DESIRED_EN,
+ hdmi_base + S5P_HDMI_HDCP_CTRL1);
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HDCP, 1);
+ } else {
+ hdcp_info.event = HDCP_EVENT_STOP;
+ hdcp_info.auth_status = NOT_AUTHENTICATED;
+
+ writeb(reg & ~S5P_HDMI_HDCP_CP_DESIRED_EN,
+ hdmi_base + S5P_HDMI_HDCP_CTRL1);
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HDCP, 0);
+ }
+ }
+
+ tvout_dbg("stop encryption by HPD\n");
+ }
+
+ spin_unlock_irqrestore(&hdcp_info.reset_lock, spin_flags);
+
+ return 0;
+}
diff --git a/drivers/media/video/samsung/tvout/hw_if/hdmi.c b/drivers/media/video/samsung/tvout/hw_if/hdmi.c
new file mode 100644
index 0000000..dfb3152
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/hw_if/hdmi.c
@@ -0,0 +1,2182 @@
+/* linux/drivers/media/video/samsung/tvout/hw_if/hdmi.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Functions for HDMI of Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <mach/map.h>
+#include <mach/regs-hdmi.h>
+#include <mach/regs-pmu.h>
+
+#include "../s5p_tvout_common_lib.h"
+#include "hw_if.h"
+
+#undef tvout_dbg
+
+#ifdef CONFIG_TVOUT_DEBUG
+#define tvout_dbg(fmt, ...) \
+do { \
+ if (unlikely(tvout_dbg_flag & (1 << DBG_FLAG_HDMI))) { \
+ printk(KERN_INFO "\t\t[HDMI] %s(): " fmt, \
+ __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define tvout_dbg(fmt, ...)
+#endif
+
+
+/****************************************
+ * Definitions for HDMI_PHY
+ ***************************************/
+
+#define PHY_I2C_ADDRESS 0x70
+#define PHY_REG_MODE_SET_DONE 0x1F
+
+#define I2C_ACK (1 << 7)
+#define I2C_INT (1 << 5)
+#define I2C_PEND (1 << 4)
+#define I2C_INT_CLEAR (0 << 4)
+#define I2C_CLK (0x41)
+#define I2C_CLK_PEND_INT (I2C_CLK | I2C_INT_CLEAR | I2C_INT)
+#define I2C_ENABLE (1 << 4)
+#define I2C_START (1 << 5)
+#define I2C_MODE_MTX 0xC0
+#define I2C_MODE_MRX 0x80
+#define I2C_IDLE 0
+
+#define STATE_IDLE 0
+#define STATE_TX_EDDC_SEGADDR 1
+#define STATE_TX_EDDC_SEGNUM 2
+#define STATE_TX_DDC_ADDR 3
+#define STATE_TX_DDC_OFFSET 4
+#define STATE_RX_DDC_ADDR 5
+#define STATE_RX_DDC_DATA 6
+#define STATE_RX_ADDR 7
+#define STATE_RX_DATA 8
+#define STATE_TX_ADDR 9
+#define STATE_TX_DATA 10
+#define STATE_TX_STOP 11
+#define STATE_RX_STOP 12
+
+
+
+
+static struct {
+ s32 state;
+ u8 *buffer;
+ s32 bytes;
+} i2c_hdmi_phy_context;
+
+
+
+
+/****************************************
+ * Definitions for HDMI
+ ***************************************/
+#define HDMI_IRQ_TOTAL_NUM 6
+
+
+/* private data area */
+void __iomem *hdmi_base;
+void __iomem *i2c_hdmi_phy_base;
+
+irqreturn_t (*s5p_hdmi_isr_ftn[HDMI_IRQ_TOTAL_NUM])(int irq, void *);
+spinlock_t lock_hdmi;
+
+#ifdef CONFIG_HDMI_PHY_32N
+static u8 phy_config[][3][32] = {
+ {/* freq = 25.200 MHz */
+ {
+ 0x01, 0x51, 0x2a, 0x75, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0x52, 0x69, 0x75, 0x57, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x3b, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0x52, 0x3f, 0x35, 0x63, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xbd, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 25.175 MHz */
+ {
+ 0x01, 0xd1, 0x1f, 0x50, 0x40, 0x20, 0x1e, 0x08,
+ 0x81, 0xa0, 0xbd, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x27, 0x51, 0x15, 0x40, 0x2b, 0x08,
+ 0x81, 0xa0, 0xec, 0xd8, 0x45, 0xa0, 0x34, 0x80,
+ 0x08, 0x80, 0x32, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x1f, 0x30, 0x23, 0x20, 0x1e, 0x08,
+ 0x81, 0xa0, 0xbd, 0xd8, 0x45, 0xa0, 0x34, 0x80,
+ 0x08, 0x80, 0x32, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 27 MHz */
+ {
+ 0x01, 0x51, 0x2d, 0x75, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x38, 0x74, 0x57, 0x08, 0x04, 0x08,
+ 0x80, 0x80, 0x52, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xb4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x22, 0x31, 0x63, 0x08, 0xfc, 0x08,
+ 0x86, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x98, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 27.027 MHz */
+ {
+ 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x38, 0x74, 0x57, 0x50, 0x31, 0x01,
+ 0x80, 0x80, 0x52, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xb6, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd4, 0x87, 0x31, 0x63, 0x64, 0x1b, 0x20,
+ 0x19, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x98, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 54 MHz */
+ {
+ 0x01, 0x51, 0x2d, 0x35, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe4, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x38, 0x35, 0x53, 0x08, 0x04, 0x08,
+ 0x88, 0xa0, 0x52, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xb6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x22, 0x11, 0x61, 0x08, 0xfc, 0x08,
+ 0x86, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x98, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 54.054 MHz */
+ {
+ 0x01, 0xd1, 0x2d, 0x32, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xe3, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd2, 0x70, 0x34, 0x53, 0x50, 0x31, 0x08,
+ 0x80, 0x80, 0x52, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xb6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd4, 0x87, 0x11, 0x61, 0x64, 0x1b, 0x20,
+ 0x19, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x98, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 74.250 MHz */
+ {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x27, 0x11, 0x51, 0x40, 0xd6, 0x08,
+ 0x81, 0xa0, 0xe8, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x84, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x2e, 0x12, 0x61, 0x40, 0x34, 0x08,
+ 0x82, 0xa0, 0x16, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xb9, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 74.176 MHz */
+ {
+ 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
+ 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x27, 0x14, 0x51, 0x5b, 0xa7, 0x08,
+ 0x84, 0xa0, 0xe8, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x85, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd2, 0x5d, 0x12, 0x61, 0x5b, 0xcd, 0x10,
+ 0x43, 0xa0, 0x16, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xba, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 148.500 MHz - Pre-emph + Higher Tx amp. */
+ {
+ 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
+ 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x27, 0x01, 0x50, 0x40, 0xd6, 0x08,
+ 0x81, 0xa0, 0xe8, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0xad, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x09, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x2e, 0x02, 0x60, 0x40, 0x34, 0x08,
+ 0x82, 0xa0, 0x16, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0xad, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xdd, 0x24, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 148.352 MHz */
+ {
+ 0x01, 0xd2, 0x3e, 0x00, 0x40, 0x5b, 0xef, 0x08,
+ 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x27, 0x04, 0x10, 0x5b, 0xa7, 0x08,
+ 0x84, 0xa0, 0xe8, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0xad, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x09, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd2, 0x5d, 0x02, 0x20, 0x5b, 0xcd, 0x10,
+ 0x43, 0xa0, 0x16, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0xad, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xdd, 0x24, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 108.108 MHz */
+ {
+ 0x01, 0xd1, 0x2d, 0x12, 0x40, 0x64, 0x12, 0x08,
+ 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd2, 0x70, 0x14, 0x51, 0x50, 0x31, 0x08,
+ 0x80, 0x80, 0x5e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x6c, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd4, 0x87, 0x01, 0x60, 0x64, 0x1b, 0x20,
+ 0x19, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x2f, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 72 MHz */
+ {
+ 0x01, 0x51, 0x1e, 0x15, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xb4, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xab, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0x52, 0x4b, 0x15, 0x51, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xe1, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x89, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0x51, 0x2d, 0x15, 0x61, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 25 MHz */
+ {
+ 0x01, 0xd1, 0x2a, 0x72, 0x40, 0x3c, 0xd8, 0x08,
+ 0x86, 0xa0, 0xfa, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xf6, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x27, 0x51, 0x55, 0x40, 0x08, 0x08,
+ 0x81, 0xa0, 0xea, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc5, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd2, 0x1f, 0x30, 0x63, 0x40, 0x20, 0x08,
+ 0x81, 0x80, 0xbc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x08, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xa4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 65 MHz */
+ {
+ 0x01, 0xd1, 0x36, 0x34, 0x40, 0x0c, 0x04, 0x08,
+ 0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x22, 0x11, 0x51, 0x30, 0xf2, 0x08,
+ 0x86, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x97, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x29, 0x12, 0x61, 0x40, 0xd0, 0x08,
+ 0x87, 0xa0, 0xf4, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x7e, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 108 MHz */
+ {
+ 0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x38, 0x14, 0x51, 0x08, 0x04, 0x08,
+ 0x80, 0x80, 0x52, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x6c, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x22, 0x01, 0x60, 0x08, 0xfc, 0x08,
+ 0x86, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0x5a, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x2f, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ }, {/* freq = 162 MHz */
+ {
+ 0x01, 0x54, 0x87, 0x05, 0x40, 0x01, 0x00, 0x08,
+ 0x82, 0x80, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0xad, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0x2f, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x2a, 0x02, 0x50, 0x40, 0x18, 0x08,
+ 0x86, 0xa0, 0xfd, 0xd8, 0x45, 0xa0, 0xac, 0x80,
+ 0xad, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xf3, 0x24, 0x03, 0x00, 0x00, 0x01, 0x00,
+ }, {
+ 0x01, 0xd1, 0x33, 0x04, 0x60, 0x40, 0xd0, 0x08,
+ 0x85, 0xa0, 0x32, 0xd9, 0x45, 0xa0, 0xac, 0x80,
+ 0xad, 0x80, 0x11, 0x84, 0x02, 0x22, 0x44, 0x86,
+ 0x54, 0xca, 0x24, 0x03, 0x00, 0x00, 0x01, 0x00,
+ },
+ },
+};
+#else
+static const u8 phy_config[][3][32] = {
+ { /* freq = 25.200 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0x5f, 0xF1, 0x54, 0x7e,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xf3, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0x9f, 0xF6, 0x54, 0x9e,
+ 0x84, 0x00, 0x32, 0x38, 0x00, 0xB8, 0x10, 0xE0,
+ 0x22, 0x40, 0xc2, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xFf, 0xF3, 0x54, 0xbd,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0xA4, 0x10, 0xE0,
+ 0x22, 0x40, 0xa2, 0x26, 0x00, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 25.175 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x1e, 0x20,
+ 0x6B, 0x50, 0x10, 0x51, 0xf1, 0x31, 0x54, 0xbd,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xf3, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x2b, 0x40,
+ 0x6B, 0x50, 0x10, 0x51, 0xF2, 0x32, 0x54, 0xec,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xB8, 0x10, 0xE0,
+ 0x22, 0x40, 0xc2, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x1e, 0x20,
+ 0x6B, 0x10, 0x02, 0x51, 0xf1, 0x31, 0x54, 0xbd,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xA4, 0x10, 0xE0,
+ 0x22, 0x40, 0xa2, 0x26, 0x00, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 27 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xe3, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x02, 0x08,
+ 0x6A, 0x10, 0x02, 0x51, 0xCf, 0xF1, 0x54, 0xa9,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xB8, 0x10, 0xE0,
+ 0x22, 0x40, 0xb5, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xfc, 0x08,
+ 0x6B, 0x10, 0x02, 0x51, 0x2f, 0xF2, 0x54, 0xcb,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xA4, 0x10, 0xE0,
+ 0x22, 0x40, 0x97, 0x26, 0x00, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 27.027 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xe2, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x31, 0x50,
+ 0x6B, 0x10, 0x02, 0x51, 0x8f, 0xF3, 0x54, 0xa9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0xB8, 0x10, 0xE0,
+ 0x22, 0x40, 0xb5, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0x10, 0x10, 0x9C, 0x1b, 0x64,
+ 0x6F, 0x10, 0x02, 0x51, 0x7f, 0xF8, 0x54, 0xcb,
+ 0x84, 0x00, 0x32, 0x38, 0x00, 0xA4, 0x10, 0xE0,
+ 0x22, 0x40, 0x97, 0x26, 0x00, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 54 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x01, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xe3, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x02, 0x08,
+ 0x6A, 0x10, 0x01, 0x51, 0xCf, 0xF1, 0x54, 0xa9,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x22, 0x40, 0xb5, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xfc, 0x08,
+ 0x6B, 0x10, 0x01, 0x51, 0x2f, 0xF2, 0x54, 0xcb,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x22, 0x40, 0x97, 0x26, 0x01, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 54.054 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xd4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x01, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xe2, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xd4, 0x10, 0x9C, 0x31, 0x50,
+ 0x6B, 0x10, 0x01, 0x51, 0x8f, 0xF3, 0x54, 0xa9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x22, 0x40, 0xb5, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0x10, 0x10, 0x9C, 0x1b, 0x64,
+ 0x6F, 0x10, 0x01, 0x51, 0x7f, 0xF8, 0x54, 0xcb,
+ 0x84, 0x00, 0x32, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x22, 0x40, 0x97, 0x26, 0x01, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 74.250 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x10, 0x01, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xd6, 0x40,
+ 0x6B, 0x10, 0x01, 0x51, 0x7f, 0xF2, 0x54, 0xe8,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x22, 0x40, 0x83, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x34, 0x40,
+ 0x6B, 0x10, 0x01, 0x51, 0xef, 0xF2, 0x54, 0x16,
+ 0x85, 0x00, 0x10, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x22, 0x40, 0xdc, 0x26, 0x02, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 74.176 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0x10, 0x10, 0x9C, 0xab, 0x5B,
+ 0x6F, 0x10, 0x01, 0x51, 0xbf, 0xF9, 0x54, 0xe8,
+ 0x84, 0x00, 0x32, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x22, 0x40, 0x84, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0xcd, 0x5B,
+ 0x6F, 0x10, 0x01, 0x51, 0xdf, 0xF5, 0x54, 0x16,
+ 0x85, 0x00, 0x30, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x22, 0x40, 0xdc, 0x26, 0x02, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 148.500 MHz - Pre-emph + Higher Tx amp. */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xd6, 0x40,
+ 0x6B, 0x18, 0x00, 0x51, 0x7f, 0xF2, 0x54, 0xe8,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x23, 0x41, 0x83, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x34, 0x40,
+ 0x6B, 0x18, 0x00, 0x51, 0xef, 0xF2, 0x54, 0x16,
+ 0x85, 0x00, 0x10, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x23, 0x41, 0x6d, 0x26, 0x02, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 148.352 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x18, 0x00, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0x10, 0x10, 0x9C, 0xab, 0x5B,
+ 0x6F, 0x18, 0x00, 0x51, 0xbf, 0xF9, 0x54, 0xe8,
+ 0x84, 0x00, 0x32, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x23, 0x41, 0x84, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0xcd, 0x5B,
+ 0x6F, 0x18, 0x00, 0x51, 0xdf, 0xF5, 0x54, 0x16,
+ 0x85, 0x00, 0x30, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x23, 0x41, 0x6d, 0x26, 0x02, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 108.108 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x18, 0x00, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xe2, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x31, 0x50,
+ 0x6D, 0x18, 0x00, 0x51, 0x8f, 0xF3, 0x54, 0xa9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x22, 0x40, 0xb5, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0x10, 0x10, 0x9C, 0x1b, 0x64,
+ 0x6F, 0x18, 0x00, 0x51, 0x7f, 0xF8, 0x54, 0xcb,
+ 0x84, 0x00, 0x32, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x22, 0x40, 0x97, 0x26, 0x02, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 72 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x01, 0x51, 0xEf, 0xF1, 0x54, 0xb4,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xaa, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6F, 0x10, 0x01, 0x51, 0xBf, 0xF4, 0x54, 0xe1,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x22, 0x40, 0x88, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x18, 0x00, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x22, 0x40, 0xe3, 0x26, 0x02, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 25 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x20, 0x40,
+ 0x6B, 0x50, 0x10, 0x51, 0xff, 0xF1, 0x54, 0xbc,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xf5, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x08, 0x40,
+ 0x6B, 0x50, 0x10, 0x51, 0x7f, 0xF2, 0x54, 0xea,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xB8, 0x10, 0xE0,
+ 0x22, 0x40, 0xc4, 0x26, 0x00, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x20, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xff, 0xF1, 0x54, 0xbc,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xA4, 0x10, 0xE0,
+ 0x22, 0x40, 0xa3, 0x26, 0x00, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 65 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x02, 0x0c,
+ 0x6B, 0x10, 0x01, 0x51, 0xBf, 0xF1, 0x54, 0xa3,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xbc, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf2, 0x30,
+ 0x6A, 0x10, 0x01, 0x51, 0x2f, 0xF2, 0x54, 0xcb,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x22, 0x40, 0x96, 0x26, 0x01, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xd0, 0x40,
+ 0x6B, 0x10, 0x01, 0x51, 0x9f, 0xF2, 0x54, 0xf4,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x22, 0x40, 0x7D, 0x26, 0x01, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 108 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6D, 0x18, 0x00, 0x51, 0xDf, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xe3, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x02, 0x08,
+ 0x6A, 0x18, 0x00, 0x51, 0xCf, 0xF1, 0x54, 0xa9,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x22, 0x40, 0xb5, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xfc, 0x08,
+ 0x6B, 0x18, 0x00, 0x51, 0x2f, 0xF2, 0x54, 0xcb,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x22, 0x40, 0x97, 0x26, 0x02, 0x00, 0x00, 0x80,
+ },
+ }, { /* freq = 162 MHz */
+ {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6F, 0x18, 0x00, 0x51, 0x7f, 0xF8, 0x54, 0xcb,
+ 0x84, 0x00, 0x32, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0x97, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0x18, 0x40,
+ 0x6B, 0x18, 0x00, 0x51, 0xAf, 0xF2, 0x54, 0xfd,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0xF8, 0x10, 0xE0,
+ 0x23, 0x41, 0x78, 0x26, 0x02, 0x00, 0x00, 0x80,
+ }, {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xd0, 0x40,
+ 0x6B, 0x18, 0x00, 0x51, 0x3f, 0xF3, 0x54, 0x30,
+ 0x85, 0x00, 0x10, 0x38, 0x00, 0xE4, 0x10, 0xE0,
+ 0x23, 0x41, 0x64, 0x26, 0x02, 0x00, 0x00, 0x80,
+ },
+ },
+};
+#endif
+
+#ifndef CONFIG_HDMI_PHY_32N
+static void s5p_hdmi_reg_core_reset(void)
+{
+ writeb(0x0, hdmi_base + S5P_HDMI_CORE_RSTOUT);
+
+ mdelay(10);
+
+ writeb(0x1, hdmi_base + S5P_HDMI_CORE_RSTOUT);
+}
+#endif
+
+static s32 s5p_hdmi_i2c_phy_interruptwait(void)
+{
+ u8 status, reg;
+ s32 retval = 0;
+
+ do {
+ status = readb(i2c_hdmi_phy_base + HDMI_I2C_CON);
+
+ if (status & I2C_PEND) {
+ reg = readb(i2c_hdmi_phy_base + HDMI_I2C_STAT);
+ break;
+ }
+
+ } while (1);
+
+ return retval;
+}
+
+static s32 s5p_hdmi_i2c_phy_read(u8 addr, u8 nbytes, u8 *buffer)
+{
+ u8 reg;
+ s32 ret = 0;
+ u32 proc = true;
+
+ i2c_hdmi_phy_context.state = STATE_RX_ADDR;
+ i2c_hdmi_phy_context.buffer = buffer;
+ i2c_hdmi_phy_context.bytes = nbytes;
+
+ writeb(I2C_CLK | I2C_INT | I2C_ACK, i2c_hdmi_phy_base + HDMI_I2C_CON);
+ writeb(I2C_ENABLE | I2C_MODE_MRX, i2c_hdmi_phy_base + HDMI_I2C_STAT);
+ writeb(addr & 0xFE, i2c_hdmi_phy_base + HDMI_I2C_DS);
+ writeb(I2C_ENABLE | I2C_START | I2C_MODE_MRX,
+ i2c_hdmi_phy_base + HDMI_I2C_STAT);
+
+ while (proc) {
+
+ if (i2c_hdmi_phy_context.state != STATE_RX_STOP) {
+
+ if (s5p_hdmi_i2c_phy_interruptwait() != 0) {
+ tvout_err("interrupt wait failed!!!\n");
+ ret = -1;
+ break;
+ }
+
+ }
+
+ switch (i2c_hdmi_phy_context.state) {
+ case STATE_RX_DATA:
+ reg = readb(i2c_hdmi_phy_base + HDMI_I2C_DS);
+ *(i2c_hdmi_phy_context.buffer) = reg;
+
+ i2c_hdmi_phy_context.buffer++;
+ --(i2c_hdmi_phy_context.bytes);
+
+ if (i2c_hdmi_phy_context.bytes == 1) {
+ i2c_hdmi_phy_context.state = STATE_RX_STOP;
+ writeb(I2C_CLK_PEND_INT,
+ i2c_hdmi_phy_base + HDMI_I2C_CON);
+ } else {
+ writeb(I2C_CLK_PEND_INT | I2C_ACK,
+ i2c_hdmi_phy_base + HDMI_I2C_CON);
+ }
+
+ break;
+
+ case STATE_RX_ADDR:
+ i2c_hdmi_phy_context.state = STATE_RX_DATA;
+
+ if (i2c_hdmi_phy_context.bytes == 1) {
+ i2c_hdmi_phy_context.state = STATE_RX_STOP;
+ writeb(I2C_CLK_PEND_INT,
+ i2c_hdmi_phy_base + HDMI_I2C_CON);
+ } else {
+ writeb(I2C_CLK_PEND_INT | I2C_ACK,
+ i2c_hdmi_phy_base + HDMI_I2C_CON);
+ }
+
+ break;
+
+ case STATE_RX_STOP:
+ i2c_hdmi_phy_context.state = STATE_IDLE;
+
+ reg = readb(i2c_hdmi_phy_base + HDMI_I2C_DS);
+
+ *(i2c_hdmi_phy_context.buffer) = reg;
+
+ writeb(I2C_MODE_MRX|I2C_ENABLE,
+ i2c_hdmi_phy_base + HDMI_I2C_STAT);
+ writeb(I2C_CLK_PEND_INT,
+ i2c_hdmi_phy_base + HDMI_I2C_CON);
+ writeb(I2C_MODE_MRX,
+ i2c_hdmi_phy_base + HDMI_I2C_STAT);
+
+ while (readb(i2c_hdmi_phy_base + HDMI_I2C_STAT) &
+ I2C_START)
+ usleep_range(1000, 1000);
+
+ proc = false;
+ break;
+
+ case STATE_IDLE:
+ default:
+ tvout_err("error state!!!\n");
+
+ ret = -1;
+
+ proc = false;
+ break;
+ }
+
+ }
+
+ return ret;
+}
+
+static s32 s5p_hdmi_i2c_phy_write(u8 addr, u8 nbytes, u8 *buffer)
+{
+ u8 reg;
+ s32 ret = 0;
+ u32 proc = true;
+
+ i2c_hdmi_phy_context.state = STATE_TX_ADDR;
+ i2c_hdmi_phy_context.buffer = buffer;
+ i2c_hdmi_phy_context.bytes = nbytes;
+
+ writeb(I2C_CLK | I2C_INT | I2C_ACK, i2c_hdmi_phy_base + HDMI_I2C_CON);
+ writeb(I2C_ENABLE | I2C_MODE_MTX, i2c_hdmi_phy_base + HDMI_I2C_STAT);
+ writeb(addr & 0xFE, i2c_hdmi_phy_base + HDMI_I2C_DS);
+ writeb(I2C_ENABLE | I2C_START | I2C_MODE_MTX,
+ i2c_hdmi_phy_base + HDMI_I2C_STAT);
+
+ while (proc) {
+
+ if (s5p_hdmi_i2c_phy_interruptwait() != 0) {
+ tvout_err("interrupt wait failed!!!\n");
+ ret = -1;
+
+ break;
+ }
+
+ switch (i2c_hdmi_phy_context.state) {
+ case STATE_TX_ADDR:
+ case STATE_TX_DATA:
+ i2c_hdmi_phy_context.state = STATE_TX_DATA;
+
+ reg = *(i2c_hdmi_phy_context.buffer);
+
+ writeb(reg, i2c_hdmi_phy_base + HDMI_I2C_DS);
+
+ i2c_hdmi_phy_context.buffer++;
+ --(i2c_hdmi_phy_context.bytes);
+
+ if (i2c_hdmi_phy_context.bytes == 0) {
+ i2c_hdmi_phy_context.state = STATE_TX_STOP;
+ writeb(I2C_CLK_PEND_INT,
+ i2c_hdmi_phy_base + HDMI_I2C_CON);
+ } else {
+ writeb(I2C_CLK_PEND_INT | I2C_ACK,
+ i2c_hdmi_phy_base + HDMI_I2C_CON);
+ }
+
+ break;
+
+ case STATE_TX_STOP:
+ i2c_hdmi_phy_context.state = STATE_IDLE;
+
+ writeb(I2C_MODE_MTX | I2C_ENABLE,
+ i2c_hdmi_phy_base + HDMI_I2C_STAT);
+ writeb(I2C_CLK_PEND_INT,
+ i2c_hdmi_phy_base + HDMI_I2C_CON);
+ writeb(I2C_MODE_MTX,
+ i2c_hdmi_phy_base + HDMI_I2C_STAT);
+
+ while (readb(i2c_hdmi_phy_base + HDMI_I2C_STAT) &
+ I2C_START)
+ usleep_range(1000, 1000);
+
+ proc = false;
+ break;
+
+ case STATE_IDLE:
+ default:
+ tvout_err("error state!!!\n");
+
+ ret = -1;
+
+ proc = false;
+ break;
+ }
+
+ }
+
+ return ret;
+}
+
+#ifdef S5P_HDMI_DEBUG
+static void s5p_hdmi_print_phy_config(void)
+{
+ s32 size;
+ int i = 0;
+ u8 read_buffer[0x40] = {0, };
+ size = sizeof(phy_config[0][0])
+ / sizeof(phy_config[0][0][0]);
+
+
+ /* read data */
+ if (s5p_hdmi_i2c_phy_read(PHY_I2C_ADDRESS, size, read_buffer) != 0) {
+ tvout_err("s5p_hdmi_i2c_phy_read failed.\n");
+ return;
+ }
+
+ printk(KERN_WARNING "read buffer :\n");
+
+ for (i = 1; i < size; i++) {
+ printk("0x%02x", read_buffer[i]);
+
+ if (i % 8)
+ printk(" ");
+ else
+ printk("\n");
+ }
+ printk(KERN_WARNING "\n");
+}
+#else
+static inline void s5p_hdmi_print_phy_config(void) {}
+#endif
+
+#ifdef CONFIG_SND_SAMSUNG_SPDIF
+static void s5p_hdmi_audio_set_config(
+ enum s5p_tvout_audio_codec_type audio_codec)
+{
+ u32 data_type = (audio_codec == PCM) ?
+ S5P_HDMI_SPDIFIN_CFG_LINEAR_PCM_TYPE :
+ (audio_codec == AC3) ?
+ S5P_HDMI_SPDIFIN_CFG_NO_LINEAR_PCM_TYPE : 0xff;
+
+ tvout_dbg("audio codec type = %s\n",
+ (audio_codec & PCM) ? "PCM" :
+ (audio_codec & AC3) ? "AC3" :
+ (audio_codec & MP3) ? "MP3" :
+ (audio_codec & WMA) ? "WMA" : "Unknown");
+
+ /* open SPDIF path on HDMI_I2S */
+ writeb(S5P_HDMI_I2S_CLK_EN, hdmi_base + S5P_HDMI_I2S_CLK_CON);
+ writeb(readl(hdmi_base + S5P_HDMI_I2S_MUX_CON) |
+ S5P_HDMI_I2S_CUV_I2S_ENABLE |
+ S5P_HDMI_I2S_MUX_ENABLE,
+ hdmi_base + S5P_HDMI_I2S_MUX_CON);
+ writeb(S5P_HDMI_I2S_CH_ALL_EN, hdmi_base + S5P_HDMI_I2S_MUX_CH);
+ writeb(S5P_HDMI_I2S_CUV_RL_EN, hdmi_base + S5P_HDMI_I2S_MUX_CUV);
+
+ writeb(S5P_HDMI_SPDIFIN_CFG_FILTER_2_SAMPLE | data_type |
+ S5P_HDMI_SPDIFIN_CFG_PCPD_MANUAL_SET |
+ S5P_HDMI_SPDIFIN_CFG_WORD_LENGTH_M_SET |
+ S5P_HDMI_SPDIFIN_CFG_U_V_C_P_REPORT |
+ S5P_HDMI_SPDIFIN_CFG_BURST_SIZE_2 |
+ S5P_HDMI_SPDIFIN_CFG_DATA_ALIGN_32BIT,
+ hdmi_base + S5P_HDMI_SPDIFIN_CONFIG_1);
+
+ writeb(S5P_HDMI_SPDIFIN_CFG2_NO_CLK_DIV,
+ hdmi_base + S5P_HDMI_SPDIFIN_CONFIG_2);
+}
+
+static void s5p_hdmi_audio_clock_enable(void)
+{
+ writeb(S5P_HDMI_SPDIFIN_CLK_ON, hdmi_base + S5P_HDMI_SPDIFIN_CLK_CTRL);
+ writeb(S5P_HDMI_SPDIFIN_STATUS_CHK_OP_MODE,
+ hdmi_base + S5P_HDMI_SPDIFIN_OP_CTRL);
+}
+
+static void s5p_hdmi_audio_set_repetition_time(
+ enum s5p_tvout_audio_codec_type audio_codec,
+ u32 bits, u32 frame_size_code)
+{
+ /* Only 4'b1011 24bit */
+ u32 wl = 5 << 1 | 1;
+ u32 rpt_cnt = (audio_codec == AC3) ? 1536 * 2 - 1 : 0;
+
+ tvout_dbg("repetition count = %d\n", rpt_cnt);
+
+ /* 24bit and manual mode */
+ writeb(((rpt_cnt & 0xf) << 4) | wl,
+ hdmi_base + S5P_HDMI_SPDIFIN_USER_VALUE_1);
+ /* if PCM this value is 0 */
+ writeb((rpt_cnt >> 4) & 0xff,
+ hdmi_base + S5P_HDMI_SPDIFIN_USER_VALUE_2);
+ /* if PCM this value is 0 */
+ writeb(frame_size_code & 0xff,
+ hdmi_base + S5P_HDMI_SPDIFIN_USER_VALUE_3);
+ /* if PCM this value is 0 */
+ writeb((frame_size_code >> 8) & 0xff,
+ hdmi_base + S5P_HDMI_SPDIFIN_USER_VALUE_4);
+}
+
+static void s5p_hdmi_audio_irq_enable(u32 irq_en)
+{
+ writeb(irq_en, hdmi_base + S5P_HDMI_SPDIFIN_IRQ_MASK);
+}
+#else
+static void s5p_hdmi_audio_i2s_config(
+ enum s5p_tvout_audio_codec_type audio_codec,
+ u32 sample_rate, u32 bits_per_sample,
+ u32 frame_size_code,
+ struct s5p_hdmi_audio *audio)
+{
+ u32 data_num, bit_ch, sample_frq;
+
+ if (bits_per_sample == 20) {
+ data_num = 2;
+ bit_ch = 1;
+ } else if (bits_per_sample == 24) {
+ data_num = 3;
+ bit_ch = 1;
+ } else {
+ data_num = 1;
+ bit_ch = 0;
+ }
+
+ writeb((S5P_HDMI_I2S_IN_DISABLE | S5P_HDMI_I2S_AUD_I2S |
+ S5P_HDMI_I2S_CUV_I2S_ENABLE | S5P_HDMI_I2S_MUX_ENABLE),
+ hdmi_base + S5P_HDMI_I2S_MUX_CON);
+
+ writeb(S5P_HDMI_I2S_CH0_EN | S5P_HDMI_I2S_CH1_EN | S5P_HDMI_I2S_CH2_EN,
+ hdmi_base + S5P_HDMI_I2S_MUX_CH);
+
+ writeb(S5P_HDMI_I2S_CUV_RL_EN, hdmi_base + S5P_HDMI_I2S_MUX_CUV);
+
+ sample_frq = (sample_rate == 44100) ? 0 :
+ (sample_rate == 48000) ? 2 :
+ (sample_rate == 32000) ? 3 :
+ (sample_rate == 96000) ? 0xa : 0x0;
+
+ /* readl(hdmi_base + S5P_HDMI_YMAX) */
+ writeb(S5P_HDMI_I2S_CLK_DIS, hdmi_base + S5P_HDMI_I2S_CLK_CON);
+ writeb(S5P_HDMI_I2S_CLK_EN, hdmi_base + S5P_HDMI_I2S_CLK_CON);
+
+ writeb(readl(hdmi_base + S5P_HDMI_I2S_DSD_CON) | 0x01,
+ hdmi_base + S5P_HDMI_I2S_DSD_CON);
+
+ /* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
+ writeb(S5P_HDMI_I2S_SEL_SCLK(5) | S5P_HDMI_I2S_SEL_LRCK(6),
+ hdmi_base + S5P_HDMI_I2S_PIN_SEL_0);
+ if (audio->channel == 2)
+ /* I2S 2 channel */
+ writeb(S5P_HDMI_I2S_SEL_SDATA1(1) | S5P_HDMI_I2S_SEL_SDATA2(4),
+ hdmi_base + S5P_HDMI_I2S_PIN_SEL_1);
+ else
+ /* I2S 5.1 channel */
+ writeb(S5P_HDMI_I2S_SEL_SDATA1(3) | S5P_HDMI_I2S_SEL_SDATA2(4),
+ hdmi_base + S5P_HDMI_I2S_PIN_SEL_1);
+
+ writeb(S5P_HDMI_I2S_SEL_SDATA3(1) | S5P_HDMI_I2S_SEL_SDATA2(2),
+ hdmi_base + S5P_HDMI_I2S_PIN_SEL_2);
+ writeb(S5P_HDMI_I2S_SEL_DSD(0), hdmi_base + S5P_HDMI_I2S_PIN_SEL_3);
+
+ /* I2S_CON_1 & 2 */
+ writeb(S5P_HDMI_I2S_SCLK_FALLING_EDGE | S5P_HDMI_I2S_L_CH_LOW_POL,
+ hdmi_base + S5P_HDMI_I2S_CON_1);
+ writeb(S5P_HDMI_I2S_MSB_FIRST_MODE |
+ S5P_HDMI_I2S_SET_BIT_CH(bit_ch) |
+ S5P_HDMI_I2S_SET_SDATA_BIT(data_num) |
+ S5P_HDMI_I2S_BASIC_FORMAT,
+ hdmi_base + S5P_HDMI_I2S_CON_2);
+
+ /* Configure register related to CUV information */
+ writeb(S5P_HDMI_I2S_CH_STATUS_MODE_0 |
+ S5P_HDMI_I2S_2AUD_CH_WITHOUT_PREEMPH |
+ S5P_HDMI_I2S_COPYRIGHT |
+ S5P_HDMI_I2S_LINEAR_PCM |
+ S5P_HDMI_I2S_CONSUMER_FORMAT,
+ hdmi_base + S5P_HDMI_I2S_CH_ST_0);
+ writeb(S5P_HDMI_I2S_CD_PLAYER,
+ hdmi_base + S5P_HDMI_I2S_CH_ST_1);
+
+ if (audio->channel == 2)
+ /* Audio channel to 5.1 */
+ writeb(S5P_HDMI_I2S_SET_SOURCE_NUM(0),
+ hdmi_base + S5P_HDMI_I2S_CH_ST_2);
+ else
+ writeb(S5P_HDMI_I2S_SET_SOURCE_NUM(0) |
+ S5P_HDMI_I2S_SET_CHANNEL_NUM(0x6),
+ hdmi_base + S5P_HDMI_I2S_CH_ST_2);
+
+ writeb(S5P_HDMI_I2S_CLK_ACCUR_LEVEL_2 |
+ S5P_HDMI_I2S_SET_SAMPLING_FREQ(sample_frq),
+ hdmi_base + S5P_HDMI_I2S_CH_ST_3);
+ writeb(S5P_HDMI_I2S_ORG_SAMPLING_FREQ_44_1 |
+ S5P_HDMI_I2S_WORD_LENGTH_MAX24_24BITS |
+ S5P_HDMI_I2S_WORD_LENGTH_MAX_24BITS,
+ hdmi_base + S5P_HDMI_I2S_CH_ST_4);
+
+ writeb(S5P_HDMI_I2S_CH_STATUS_RELOAD,
+ hdmi_base + S5P_HDMI_I2S_CH_ST_CON);
+}
+#endif
+
+static u8 s5p_hdmi_checksum(int sum, int size, u8 *data)
+{
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ sum += (u32)(data[i]);
+
+ return (u8)(0x100 - (sum & 0xff));
+}
+
+
+static int s5p_hdmi_phy_control(bool on, u8 addr, u8 offset, u8 *read_buffer)
+{
+ u8 buff[2] = {0};
+
+ buff[0] = addr;
+ buff[1] = (on) ? (read_buffer[addr] & (~(1 << offset))) :
+ (read_buffer[addr] | (1 << offset));
+ read_buffer[addr] = buff[1];
+
+ if (s5p_hdmi_i2c_phy_write(PHY_I2C_ADDRESS, 2, buff) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int s5p_hdmi_phy_enable_oscpad(bool on, u8 *read_buffer)
+{
+ u8 buff[2];
+
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ buff[0] = 0x0b;
+ if (on)
+ buff[1] = 0xd8;
+ else
+ buff[1] = 0x18;
+ read_buffer[0x0b] = buff[1];
+#else
+ buff[0] = 0x19;
+ if (on)
+ buff[1] = (read_buffer[0x19] & (~(3<<6))) | (1<<6);
+ else
+ buff[1] = (read_buffer[0x19] & (~(3<<6))) | (2<<6);
+ read_buffer[0x19] = buff[1];
+#endif
+
+ if (s5p_hdmi_i2c_phy_write(PHY_I2C_ADDRESS, 2, buff) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool s5p_hdmi_phy_is_enable(void)
+{
+ u32 reg;
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ reg = readl(S5P_HDMI_PHY_CONTROL);
+#endif
+
+ return reg & (1 << 0);
+}
+
+static void s5p_hdmi_phy_enable(bool on)
+{
+ u32 reg;
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ reg = readl(S5P_HDMI_PHY_CONTROL);
+
+ if (on)
+ reg |= (1 << 0);
+ else
+ reg &= ~(1 << 0);
+
+ writeb(reg, S5P_HDMI_PHY_CONTROL);
+#endif
+
+}
+
+void s5p_hdmi_reg_sw_reset(void)
+{
+ tvout_dbg("\n");
+ s5p_hdmi_ctrl_clock(1);
+
+ writeb(0x1, hdmi_base + S5P_HDMI_PHY_RSTOUT);
+ mdelay(10);
+ writeb(0x0, hdmi_base + S5P_HDMI_PHY_RSTOUT);
+
+ s5p_hdmi_ctrl_clock(0);
+}
+
+int s5p_hdmi_phy_power(bool on)
+{
+ u32 size;
+ u8 *buffer;
+ u8 read_buffer[0x40] = {0, };
+
+ size = sizeof(phy_config[0][0])
+ / sizeof(phy_config[0][0][0]);
+
+ buffer = (u8 *) phy_config[0][0];
+
+ tvout_dbg("(on:%d)\n", on);
+ if (on) {
+ if (!s5p_hdmi_phy_is_enable()) {
+ s5p_hdmi_phy_enable(1);
+ s5p_hdmi_reg_sw_reset();
+
+ if (s5p_hdmi_i2c_phy_write(
+ PHY_I2C_ADDRESS, 1, buffer) != 0)
+ goto ret_on_err;
+
+ if (s5p_hdmi_i2c_phy_read(
+ PHY_I2C_ADDRESS, size, read_buffer) != 0) {
+ tvout_err("s5p_hdmi_i2c_phy_read failed.\n");
+ goto ret_on_err;
+ }
+
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ s5p_hdmi_phy_control(true, 0x1d, 0x7, read_buffer);
+ s5p_hdmi_phy_control(true, 0x1d, 0x0, read_buffer);
+ s5p_hdmi_phy_control(true, 0x1d, 0x1, read_buffer);
+ s5p_hdmi_phy_control(true, 0x1d, 0x2, read_buffer);
+ s5p_hdmi_phy_control(true, 0x1d, 0x4, read_buffer);
+ s5p_hdmi_phy_control(true, 0x1d, 0x5, read_buffer);
+ s5p_hdmi_phy_control(true, 0x1d, 0x6, read_buffer);
+#else
+ s5p_hdmi_phy_control(true, 0x1, 0x5, read_buffer);
+ s5p_hdmi_phy_control(true, 0x1, 0x7, read_buffer);
+ s5p_hdmi_phy_control(true, 0x5, 0x5, read_buffer);
+ s5p_hdmi_phy_control(true, 0x17, 0x0, read_buffer);
+ s5p_hdmi_phy_control(true, 0x17, 0x1, read_buffer);
+#endif
+
+ s5p_hdmi_print_phy_config();
+ }
+ } else {
+ if (s5p_hdmi_phy_is_enable()) {
+ if (s5p_hdmi_i2c_phy_write(
+ PHY_I2C_ADDRESS, 1, buffer) != 0)
+ goto ret_on_err;
+
+ if (s5p_hdmi_i2c_phy_read(
+ PHY_I2C_ADDRESS, size, read_buffer) != 0) {
+ tvout_err("s5p_hdmi_i2c_phy_read failed.\n");
+ goto ret_on_err;
+ }
+ /* Disable OSC pad */
+ s5p_hdmi_phy_enable_oscpad(false, read_buffer);
+
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ s5p_hdmi_phy_control(false, 0x1d, 0x7, read_buffer);
+ s5p_hdmi_phy_control(false, 0x1d, 0x0, read_buffer);
+ s5p_hdmi_phy_control(false, 0x1d, 0x1, read_buffer);
+ s5p_hdmi_phy_control(false, 0x1d, 0x2, read_buffer);
+ s5p_hdmi_phy_control(false, 0x1d, 0x4, read_buffer);
+ s5p_hdmi_phy_control(false, 0x1d, 0x5, read_buffer);
+ s5p_hdmi_phy_control(false, 0x1d, 0x6, read_buffer);
+ s5p_hdmi_phy_control(false, 0x4, 0x3, read_buffer);
+#else
+ s5p_hdmi_phy_control(false, 0x1, 0x5, read_buffer);
+ s5p_hdmi_phy_control(false, 0x1, 0x7, read_buffer);
+ s5p_hdmi_phy_control(false, 0x5, 0x5, read_buffer);
+ s5p_hdmi_phy_control(false, 0x17, 0x0, read_buffer);
+ s5p_hdmi_phy_control(false, 0x17, 0x1, read_buffer);
+#endif
+
+ s5p_hdmi_print_phy_config();
+
+ s5p_hdmi_phy_enable(0);
+ }
+ }
+
+ return 0;
+
+ret_on_err:
+ return -1;
+}
+
+s32 s5p_hdmi_phy_config(
+ enum phy_freq freq, enum s5p_hdmi_color_depth cd)
+{
+ s32 index;
+ s32 size;
+ u8 buffer[32] = {0, };
+ u8 reg;
+
+ switch (cd) {
+ case HDMI_CD_24:
+ index = 0;
+ break;
+
+ case HDMI_CD_30:
+ index = 1;
+ break;
+
+ case HDMI_CD_36:
+ index = 2;
+ break;
+
+ default:
+ return -1;
+ }
+
+ buffer[0] = PHY_REG_MODE_SET_DONE;
+ buffer[1] = 0x00;
+
+ if (s5p_hdmi_i2c_phy_write(PHY_I2C_ADDRESS, 2, buffer) != 0) {
+ tvout_err("s5p_hdmi_i2c_phy_write failed.\n");
+ return -1;
+ }
+
+ writeb(0x5, i2c_hdmi_phy_base + HDMI_I2C_LC);
+
+ size = sizeof(phy_config[freq][index])
+ / sizeof(phy_config[freq][index][0]);
+
+ memcpy(buffer, phy_config[freq][index], sizeof(buffer));
+
+ if (s5p_hdmi_i2c_phy_write(PHY_I2C_ADDRESS, size, buffer) != 0)
+ return -1;
+
+#ifdef CONFIG_HDMI_PHY_32N
+ buffer[0] = PHY_REG_MODE_SET_DONE;
+ buffer[1] = 0x80;
+
+ if (s5p_hdmi_i2c_phy_write(PHY_I2C_ADDRESS, 2, buffer) != 0) {
+ tvout_err("s5p_hdmi_i2c_phy_write failed.\n");
+ return -1;
+ }
+#else
+ buffer[0] = 0x01;
+
+ if (s5p_hdmi_i2c_phy_write(PHY_I2C_ADDRESS, 1, buffer) != 0) {
+ tvout_err("s5p_hdmi_i2c_phy_write failed.\n");
+ return -1;
+ }
+#endif
+
+ s5p_hdmi_print_phy_config();
+
+#ifndef CONFIG_HDMI_PHY_32N
+ s5p_hdmi_reg_core_reset();
+#endif
+
+#ifdef CONFIG_HDMI_PHY_32N
+ do {
+ reg = readb(hdmi_base + S5P_HDMI_PHY_STATUS0);
+ } while (!(reg & S5P_HDMI_PHY_STATUS_READY));
+#else
+ do {
+ reg = readb(hdmi_base + S5P_HDMI_PHY_STATUS);
+ } while (!(reg & S5P_HDMI_PHY_STATUS_READY));
+#endif
+
+ writeb(I2C_CLK_PEND_INT, i2c_hdmi_phy_base + HDMI_I2C_CON);
+ writeb(I2C_IDLE, i2c_hdmi_phy_base + HDMI_I2C_STAT);
+
+ return 0;
+}
+
+void s5p_hdmi_set_gcp(enum s5p_hdmi_color_depth depth, u8 *gcp)
+{
+ switch (depth) {
+ case HDMI_CD_48:
+ gcp[1] = S5P_HDMI_GCP_48BPP; break;
+ case HDMI_CD_36:
+ gcp[1] = S5P_HDMI_GCP_36BPP; break;
+ case HDMI_CD_30:
+ gcp[1] = S5P_HDMI_GCP_30BPP; break;
+ case HDMI_CD_24:
+ gcp[1] = S5P_HDMI_GCP_24BPP; break;
+
+ default:
+ break;
+ }
+}
+
+void s5p_hdmi_reg_acr(u8 *acr)
+{
+ u32 n = acr[4] << 16 | acr[5] << 8 | acr[6];
+ u32 cts = acr[1] << 16 | acr[2] << 8 | acr[3];
+
+ hdmi_write_24(n, hdmi_base + S5P_HDMI_ACR_N0);
+ hdmi_write_24(cts, hdmi_base + S5P_HDMI_ACR_MCTS0);
+ hdmi_write_24(cts, hdmi_base + S5P_HDMI_ACR_CTS0);
+
+ writeb(4, hdmi_base + S5P_HDMI_ACR_CON);
+}
+
+void s5p_hdmi_reg_asp(u8 *asp, struct s5p_hdmi_audio *audio)
+{
+ if (audio->channel == 2)
+ writeb(S5P_HDMI_AUD_NO_DST_DOUBLE | S5P_HDMI_AUD_TYPE_SAMPLE |
+ S5P_HDMI_AUD_MODE_TWO_CH | S5P_HDMI_AUD_SP_ALL_DIS,
+ hdmi_base + S5P_HDMI_ASP_CON);
+ else
+ writeb(S5P_HDMI_AUD_MODE_MULTI_CH | S5P_HDMI_AUD_SP_AUD2_EN |
+ S5P_HDMI_AUD_SP_AUD1_EN | S5P_HDMI_AUD_SP_AUD0_EN,
+ hdmi_base + S5P_HDMI_ASP_CON);
+
+ writeb(S5P_HDMI_ASP_SP_FLAT_AUD_SAMPLE,
+ hdmi_base + S5P_HDMI_ASP_SP_FLAT);
+
+ if (audio->channel == 2) {
+ writeb(S5P_HDMI_SPK0R_SEL_I_PCM0R | S5P_HDMI_SPK0L_SEL_I_PCM0L,
+ hdmi_base + S5P_HDMI_ASP_CHCFG0);
+ writeb(S5P_HDMI_SPK0R_SEL_I_PCM0R | S5P_HDMI_SPK0L_SEL_I_PCM0L,
+ hdmi_base + S5P_HDMI_ASP_CHCFG1);
+ writeb(S5P_HDMI_SPK0R_SEL_I_PCM0R | S5P_HDMI_SPK0L_SEL_I_PCM0L,
+ hdmi_base + S5P_HDMI_ASP_CHCFG2);
+ writeb(S5P_HDMI_SPK0R_SEL_I_PCM0R | S5P_HDMI_SPK0L_SEL_I_PCM0L,
+ hdmi_base + S5P_HDMI_ASP_CHCFG3);
+ } else {
+ writeb(S5P_HDMI_SPK0R_SEL_I_PCM0R | S5P_HDMI_SPK0L_SEL_I_PCM0L,
+ hdmi_base + S5P_HDMI_ASP_CHCFG0);
+ writeb(S5P_HDMI_SPK0R_SEL_I_PCM1L | S5P_HDMI_SPK0L_SEL_I_PCM1R,
+ hdmi_base + S5P_HDMI_ASP_CHCFG1);
+ writeb(S5P_HDMI_SPK0R_SEL_I_PCM2R | S5P_HDMI_SPK0L_SEL_I_PCM2L,
+ hdmi_base + S5P_HDMI_ASP_CHCFG2);
+ writeb(S5P_HDMI_SPK0R_SEL_I_PCM3R | S5P_HDMI_SPK0L_SEL_I_PCM3L,
+ hdmi_base + S5P_HDMI_ASP_CHCFG3);
+ }
+}
+
+void s5p_hdmi_reg_gcp(u8 i_p, u8 *gcp)
+{
+ u32 gcp_con;
+
+ writeb(gcp[2], hdmi_base + S5P_HDMI_GCP_BYTE2);
+
+ gcp_con = readb(hdmi_base + S5P_HDMI_GCP_CON);
+
+ if (i_p)
+ gcp_con |= S5P_HDMI_GCP_CON_EN_1ST_VSYNC |
+ S5P_HDMI_GCP_CON_EN_2ST_VSYNC;
+ else
+ gcp_con &= (~(S5P_HDMI_GCP_CON_EN_1ST_VSYNC |
+ S5P_HDMI_GCP_CON_EN_2ST_VSYNC));
+
+ writeb(gcp_con, hdmi_base + S5P_HDMI_GCP_CON);
+
+}
+
+void s5p_hdmi_reg_acp(u8 *header, u8 *acp)
+{
+ writeb(header[1], hdmi_base + S5P_HDMI_ACP_TYPE);
+}
+
+void s5p_hdmi_reg_isrc(u8 *isrc1, u8 *isrc2)
+{
+}
+
+void s5p_hdmi_reg_gmp(u8 *gmp)
+{
+}
+
+#ifdef CONFIG_HDMI_14A_3D
+
+#define VENDOR_HEADER00 0x81
+#define VENDOR_HEADER01 0x01
+#define VENDOR_HEADER02 0x05
+#define VENDOR_INFOFRAME_HEADER (0x1 + 0x01 + 0x06)
+#define VENDOR_PACKET_BYTE_LENGTH 0x06
+#define TRANSMIT_EVERY_VSYNC (1<<1)
+
+void s5p_hdmi_reg_infoframe(struct s5p_hdmi_infoframe *info,
+ u8 *data, u8 type_3D)
+{
+ u32 start_addr = 0, sum_addr = 0;
+ u8 sum;
+ u32 uSpdCon;
+ u8 ucChecksum, i;
+
+ switch (info->type) {
+ case HDMI_VSI_INFO:
+ writeb((u8)VENDOR_HEADER00, hdmi_base + S5P_HDMI_VSI_HEADER0);
+ writeb((u8)VENDOR_HEADER01, hdmi_base + S5P_HDMI_VSI_HEADER1);
+
+ if (type_3D == HDMI_3D_FP_FORMAT) {
+ writeb((u8)VENDOR_HEADER02,
+ hdmi_base + S5P_HDMI_VSI_HEADER2);
+ ucChecksum = VENDOR_HEADER00 +
+ VENDOR_HEADER01 + VENDOR_HEADER02;
+
+ for (i = 0; i < VENDOR_PACKET_BYTE_LENGTH; i++)
+ ucChecksum += readb(hdmi_base +
+ S5P_HDMI_VSI_DATA01+4*i);
+
+ writeb((u8)0x2a, hdmi_base + S5P_HDMI_VSI_DATA00);
+ writeb((u8)0x03, hdmi_base + S5P_HDMI_VSI_DATA01);
+ writeb((u8)0x0c, hdmi_base + S5P_HDMI_VSI_DATA02);
+ writeb((u8)0x00, hdmi_base + S5P_HDMI_VSI_DATA03);
+ writeb((u8)0x40, hdmi_base + S5P_HDMI_VSI_DATA04);
+ writeb((u8)0x00, hdmi_base + S5P_HDMI_VSI_DATA05);
+
+ } else if (type_3D == HDMI_3D_TB_FORMAT) {
+ writeb((u8)VENDOR_HEADER02, hdmi_base +
+ S5P_HDMI_VSI_HEADER2);
+ ucChecksum = VENDOR_HEADER00 +
+ VENDOR_HEADER01 + VENDOR_HEADER02;
+
+ for (i = 0; i < VENDOR_PACKET_BYTE_LENGTH; i++)
+ ucChecksum += readb(hdmi_base +
+ S5P_HDMI_VSI_DATA01+4*i);
+
+ writeb((u8)0xca, hdmi_base + S5P_HDMI_VSI_DATA00);
+ writeb((u8)0x03, hdmi_base + S5P_HDMI_VSI_DATA01);
+ writeb((u8)0x0c, hdmi_base + S5P_HDMI_VSI_DATA02);
+ writeb((u8)0x00, hdmi_base + S5P_HDMI_VSI_DATA03);
+ writeb((u8)0x40, hdmi_base + S5P_HDMI_VSI_DATA04);
+ writeb((u8)0x60, hdmi_base + S5P_HDMI_VSI_DATA05);
+
+ } else if (type_3D == HDMI_3D_SSH_FORMAT) {
+ writeb((u8)0x06, hdmi_base + S5P_HDMI_VSI_HEADER2);
+ ucChecksum = VENDOR_HEADER00 + VENDOR_HEADER01 + 0x06;
+
+ for (i = 0; i < 7; i++)
+ ucChecksum += readb(hdmi_base +
+ S5P_HDMI_VSI_DATA01+4*i);
+
+ writeb((u8)0x99, hdmi_base + S5P_HDMI_VSI_DATA00);
+ writeb((u8)0x03, hdmi_base + S5P_HDMI_VSI_DATA01);
+ writeb((u8)0x0c, hdmi_base + S5P_HDMI_VSI_DATA02);
+ writeb((u8)0x00, hdmi_base + S5P_HDMI_VSI_DATA03);
+ writeb((u8)0x40, hdmi_base + S5P_HDMI_VSI_DATA04);
+ writeb((u8)0x80, hdmi_base + S5P_HDMI_VSI_DATA05);
+ writeb((u8)0x10, hdmi_base + S5P_HDMI_VSI_DATA06);
+
+ } else {
+ writeb((u8)0x0, hdmi_base + S5P_HDMI_VSI_HEADER2);
+ ucChecksum = VENDOR_HEADER00 + VENDOR_HEADER01 + 0x06;
+
+ for (i = 0; i < 7; i++)
+ ucChecksum += readb(hdmi_base +
+ S5P_HDMI_VSI_DATA01+4*i);
+
+ writeb((u8)0x0, hdmi_base + S5P_HDMI_VSI_DATA00);
+ writeb((u8)0x0, hdmi_base + S5P_HDMI_VSI_DATA01);
+ writeb((u8)0x0, hdmi_base + S5P_HDMI_VSI_DATA02);
+ writeb((u8)0x0, hdmi_base + S5P_HDMI_VSI_DATA03);
+ writeb((u8)0x0, hdmi_base + S5P_HDMI_VSI_DATA04);
+ writeb((u8)0x0, hdmi_base + S5P_HDMI_VSI_DATA05);
+ writeb((u8)0x0, hdmi_base + S5P_HDMI_VSI_DATA06);
+ tvout_dbg("2D format is supported.\n");
+ return ;
+ }
+
+ uSpdCon = readb(hdmi_base + S5P_HDMI_VSI_CON);
+ uSpdCon = (uSpdCon&(~(3<<0)))|(TRANSMIT_EVERY_VSYNC);
+ writeb((u8)uSpdCon, hdmi_base + S5P_HDMI_VSI_CON);
+ break;
+ case HDMI_AVI_INFO:
+ writeb((u8)0x82, hdmi_base + S5P_HDMI_AVI_HEADER0);
+ writeb((u8)0x02, hdmi_base + S5P_HDMI_AVI_HEADER1);
+ writeb((u8)0x0d, hdmi_base + S5P_HDMI_AVI_HEADER2);
+
+ sum_addr = S5P_HDMI_AVI_CHECK_SUM;
+ start_addr = S5P_HDMI_AVI_BYTE1;
+ break;
+ case HDMI_SPD_INFO:
+ sum_addr = S5P_HDMI_SPD_DATA00;
+ start_addr = S5P_HDMI_SPD_DATA01 + 4;
+ /* write header */
+ writeb((u8)info->type, hdmi_base + S5P_HDMI_SPD_HEADER0);
+ writeb((u8)info->version, hdmi_base + S5P_HDMI_SPD_HEADER1);
+ writeb((u8)info->length, hdmi_base + S5P_HDMI_SPD_HEADER2);
+ break;
+ case HDMI_AUI_INFO:
+ writeb((u8)0x84, hdmi_base + S5P_HDMI_AUI_HEADER0);
+ writeb((u8)0x01, hdmi_base + S5P_HDMI_AUI_HEADER1);
+ writeb((u8)0x0a, hdmi_base + S5P_HDMI_AUI_HEADER2);
+ sum_addr = S5P_HDMI_AUI_CHECK_SUM;
+ start_addr = S5P_HDMI_AUI_BYTE1;
+ break;
+ case HDMI_MPG_INFO:
+ sum_addr = S5P_HDMI_MPG_CHECK_SUM;
+ start_addr = S5P_HDMI_MPG_BYTE1;
+ break;
+ default:
+ tvout_dbg("undefined infoframe\n");
+ return;
+ }
+
+ /* calculate checksum */
+ sum = (u8)info->type + info->version + info->length;
+ sum = s5p_hdmi_checksum(sum, info->length, data);
+
+ /* write checksum */
+ writeb(sum, hdmi_base + sum_addr);
+ /* write data */
+ hdmi_write_l(data, hdmi_base, start_addr, info->length);
+}
+
+void s5p_hdmi_reg_tg(struct s5p_hdmi_v_format *v)
+{
+ u8 tg;
+ struct s5p_hdmi_v_frame *frame = &(v->frame);
+
+ hdmi_write_16(v->tg_H_FSZ, hdmi_base + S5P_HDMI_TG_H_FSZ_L);
+ hdmi_write_16(v->tg_HACT_ST, hdmi_base + S5P_HDMI_TG_HACT_ST_L);
+ hdmi_write_16(v->tg_HACT_SZ, hdmi_base + S5P_HDMI_TG_HACT_SZ_L);
+
+ hdmi_write_16(v->tg_V_FSZ, hdmi_base + S5P_HDMI_TG_V_FSZ_L);
+ hdmi_write_16(v->tg_VACT_SZ, hdmi_base + S5P_HDMI_TG_VACT_SZ_L);
+ hdmi_write_16(v->tg_VACT_ST, hdmi_base + S5P_HDMI_TG_VACT_ST_L);
+ hdmi_write_16(v->tg_VACT_ST2, hdmi_base + S5P_HDMI_TG_VACT_ST2_L);
+ hdmi_write_16(v->tg_VACT_ST3, hdmi_base + S5P_HDMI_TG_VACT_ST3_L);
+ hdmi_write_16(v->tg_VACT_ST4, hdmi_base + S5P_HDMI_TG_VACT_ST4_L);
+
+ hdmi_write_16(v->tg_VSYNC_BOT_HDMI, hdmi_base +
+ S5P_HDMI_TG_VSYNC_BOT_HDMI_L);
+ hdmi_write_16(v->tg_VSYNC_TOP_HDMI, hdmi_base +
+ S5P_HDMI_TG_VSYNC_TOP_HDMI_L);
+ hdmi_write_16(v->tg_FIELD_TOP_HDMI, hdmi_base +
+ S5P_HDMI_TG_FIELD_TOP_HDMI_L);
+ hdmi_write_16(v->tg_FIELD_BOT_HDMI, hdmi_base +
+ S5P_HDMI_TG_FIELD_BOT_HDMI_L);
+
+ /* write reg default value */
+ hdmi_write_16(v->tg_VSYNC, hdmi_base + S5P_HDMI_TG_VSYNC_L);
+ hdmi_write_16(v->tg_VSYNC2, hdmi_base + S5P_HDMI_TG_VSYNC2_L);
+ hdmi_write_16(v->tg_FIELD_CHG, hdmi_base + S5P_HDMI_TG_FIELD_CHG_L);
+
+ tg = readb(hdmi_base + S5P_HDMI_TG_CMD);
+
+ hdmi_bit_set(frame->interlaced, tg, S5P_HDMI_FIELD);
+
+ writeb(tg, hdmi_base + S5P_HDMI_TG_CMD);
+}
+
+void s5p_hdmi_reg_v_timing(struct s5p_hdmi_v_format *v)
+{
+ u32 uTemp32;
+
+ struct s5p_hdmi_v_frame *frame = &(v->frame);
+
+ uTemp32 = frame->vH_Line;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_H_LINE_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_H_LINE_1);
+
+ uTemp32 = frame->vV_Line;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_LINE_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_LINE_1);
+
+ uTemp32 = frame->vH_SYNC_START;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_H_SYNC_START_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_H_SYNC_START_1);
+
+ uTemp32 = frame->vH_SYNC_END;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_H_SYNC_END_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_H_SYNC_END_1);
+
+ uTemp32 = frame->vV1_Blank;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V1_BLANK_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V1_BLANK_1);
+
+ uTemp32 = frame->vV2_Blank;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V2_BLANK_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V2_BLANK_1);
+
+ uTemp32 = frame->vHBlank;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_H_BLANK_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_H_BLANK_1);
+
+ uTemp32 = frame->VBLANK_F0;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_BLANK_F0_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_BLANK_F0_1);
+
+ uTemp32 = frame->VBLANK_F1;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_BLANK_F1_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_BLANK_F1_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_BLANK_F2_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_BLANK_F2_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_BLANK_F3_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_BLANK_F3_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_BLANK_F4_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_BLANK_F4_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_BLANK_F5_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_BLANK_F5_1);
+
+ uTemp32 = frame->vVSYNC_LINE_BEF_1;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_SYNC_LINE_BEF_1_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_SYNC_LINE_BEF_1_1);
+
+ uTemp32 = frame->vVSYNC_LINE_BEF_2;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_SYNC_LINE_BEF_2_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_SYNC_LINE_BEF_2_1);
+
+ uTemp32 = frame->vVSYNC_LINE_AFT_1;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_1_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_1_1);
+
+ uTemp32 = frame->vVSYNC_LINE_AFT_2;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_2_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_2_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_3_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_3_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_4_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_4_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_5_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_5_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_6_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_V_SYNC_LINE_AFT_6_1);
+
+ uTemp32 = frame->vVSYNC_LINE_AFT_PXL_1;
+ writeb((u8)(uTemp32&0xff), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_1_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_1_1);
+
+ uTemp32 = frame->vVSYNC_LINE_AFT_PXL_2;
+ writeb((u8)(uTemp32&0xff), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_2_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_2_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_3_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_3_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_4_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_4_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_5_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_5_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_6_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base +
+ S5P_HDMI_V_SYNC_LINE_AFT_PXL_6_1);
+
+ uTemp32 = frame->vVACT_SPACE_1;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_VACT_SPACE_1_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_VACT_SPACE_1_1);
+
+ uTemp32 = frame->vVACT_SPACE_2;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_VACT_SPACE_2_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_VACT_SPACE_2_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_VACT_SPACE_3_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_VACT_SPACE_3_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_VACT_SPACE_4_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_VACT_SPACE_4_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_VACT_SPACE_5_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_VACT_SPACE_5_1);
+
+ uTemp32 = 0xffff;
+ writeb((u8)(uTemp32&0xff), hdmi_base + S5P_HDMI_VACT_SPACE_6_0);
+ writeb((u8)(uTemp32 >> 8), hdmi_base + S5P_HDMI_VACT_SPACE_6_1);
+
+ writeb(frame->Hsync_polarity, hdmi_base + S5P_HDMI_HSYNC_POL);
+
+ writeb(frame->Vsync_polarity, hdmi_base + S5P_HDMI_VSYNC_POL);
+
+ writeb(frame->interlaced, hdmi_base + S5P_HDMI_INT_PRO_MODE);
+}
+
+void s5p_hdmi_reg_bluescreen_clr(u16 b, u16 g, u16 r)
+{
+ writeb((u8)(b>>8), hdmi_base + S5P_HDMI_BLUE_SCREEN_B_0);
+ writeb((u8)(b&0xff), hdmi_base + S5P_HDMI_BLUE_SCREEN_B_0);
+ writeb((u8)(g>>8), hdmi_base + S5P_HDMI_BLUE_SCREEN_G_0);
+ writeb((u8)(g&0xff), hdmi_base + S5P_HDMI_BLUE_SCREEN_G_1);
+ writeb((u8)(r>>8), hdmi_base + S5P_HDMI_BLUE_SCREEN_R_0);
+ writeb((u8)(r&0xff), hdmi_base + S5P_HDMI_BLUE_SCREEN_R_1);
+}
+
+#else
+void s5p_hdmi_reg_infoframe(struct s5p_hdmi_infoframe *info, u8 *data)
+{
+ u32 start_addr = 0, sum_addr = 0;
+ u8 sum;
+
+ switch (info->type) {
+ case HDMI_VSI_INFO:
+ break;
+ case HDMI_AVI_INFO:
+ sum_addr = S5P_HDMI_AVI_CHECK_SUM;
+ start_addr = S5P_HDMI_AVI_DATA;
+ break;
+ case HDMI_SPD_INFO:
+ sum_addr = S5P_HDMI_SPD_DATA;
+ start_addr = S5P_HDMI_SPD_DATA + 4;
+ /* write header */
+ writeb((u8)info->type, hdmi_base + S5P_HDMI_SPD_HEADER);
+ writeb((u8)info->version, hdmi_base + S5P_HDMI_SPD_HEADER + 4);
+ writeb((u8)info->length, hdmi_base + S5P_HDMI_SPD_HEADER + 8);
+ break;
+ case HDMI_AUI_INFO:
+ sum_addr = S5P_HDMI_AUI_CHECK_SUM;
+ start_addr = S5P_HDMI_AUI_BYTE1;
+ break;
+ case HDMI_MPG_INFO:
+ sum_addr = S5P_HDMI_MPG_CHECK_SUM;
+ start_addr = S5P_HDMI_MPG_DATA;
+ break;
+ default:
+ tvout_dbg("undefined infoframe\n");
+ return;
+ }
+
+ /* calculate checksum */
+ sum = (u8)info->type + info->version + info->length;
+ sum = s5p_hdmi_checksum(sum, info->length, data);
+
+ /* write checksum */
+ writeb(sum, hdmi_base + sum_addr);
+
+ /* write data */
+ hdmi_write_l(data, hdmi_base, start_addr, info->length);
+}
+
+void s5p_hdmi_reg_tg(struct s5p_hdmi_v_frame *frame)
+{
+ u16 reg;
+ u8 tg;
+
+ hdmi_write_16(frame->h_total, hdmi_base + S5P_HDMI_TG_H_FSZ_L);
+ hdmi_write_16((frame->h_blank)-1, hdmi_base + S5P_HDMI_TG_HACT_ST_L);
+ hdmi_write_16((frame->h_active)+1, hdmi_base + S5P_HDMI_TG_HACT_SZ_L);
+
+ hdmi_write_16(frame->v_total, hdmi_base + S5P_HDMI_TG_V_FSZ_L);
+ hdmi_write_16(frame->v_active, hdmi_base + S5P_HDMI_TG_VACT_SZ_L);
+
+
+ reg = (frame->i_p) ? (frame->v_total - frame->v_active*2) / 2 :
+ frame->v_total - frame->v_active;
+ hdmi_write_16(reg, hdmi_base + S5P_HDMI_TG_VACT_ST_L);
+
+ reg = (frame->i_p) ? 0x249 : 0x248;
+ hdmi_write_16(reg, hdmi_base + S5P_HDMI_TG_VACT_ST2_L);
+
+ reg = (frame->i_p) ? 0x233 : 1;
+ hdmi_write_16(reg, hdmi_base + S5P_HDMI_TG_VSYNC_BOT_HDMI_L);
+
+ /* write reg default value */
+ hdmi_write_16(0x1, hdmi_base + S5P_HDMI_TG_VSYNC_L);
+ hdmi_write_16(0x233, hdmi_base + S5P_HDMI_TG_VSYNC2_L);
+ hdmi_write_16(0x233, hdmi_base + S5P_HDMI_TG_FIELD_CHG_L);
+ hdmi_write_16(0x1, hdmi_base + S5P_HDMI_TG_VSYNC_TOP_HDMI_L);
+ hdmi_write_16(0x1, hdmi_base + S5P_HDMI_TG_FIELD_TOP_HDMI_L);
+ hdmi_write_16(0x233, hdmi_base + S5P_HDMI_TG_FIELD_BOT_HDMI_L);
+
+ tg = readb(hdmi_base + S5P_HDMI_TG_CMD);
+
+ hdmi_bit_set(frame->i_p, tg, S5P_HDMI_FIELD);
+
+ writeb(tg, hdmi_base + S5P_HDMI_TG_CMD);
+}
+
+void s5p_hdmi_reg_v_timing(struct s5p_hdmi_v_format *v)
+{
+ u32 reg32;
+
+ struct s5p_hdmi_v_frame *frame = &(v->frame);
+
+ writeb(frame->polarity, hdmi_base + S5P_HDMI_SYNC_MODE);
+ writeb(frame->i_p, hdmi_base + S5P_HDMI_INT_PRO_MODE);
+
+ hdmi_write_16(frame->h_blank, hdmi_base + S5P_HDMI_H_BLANK_0);
+
+ reg32 = (frame->v_blank << 11) | (frame->v_blank + frame->v_active);
+ hdmi_write_24(reg32, hdmi_base + S5P_HDMI_V_BLANK_0);
+
+ reg32 = (frame->h_total << 12) | frame->v_total;
+ hdmi_write_24(reg32, hdmi_base + S5P_HDMI_H_V_LINE_0);
+
+ reg32 = frame->polarity << 20 | v->h_sync.end << 10 | v->h_sync.begin;
+ hdmi_write_24(reg32, hdmi_base + S5P_HDMI_H_SYNC_GEN_0);
+
+ reg32 = v->v_sync_top.begin << 12 | v->v_sync_top.end;
+ hdmi_write_24(reg32, hdmi_base + S5P_HDMI_V_SYNC_GEN_1_0);
+
+ if (frame->i_p) {
+ reg32 = v->v_blank_f.end << 11 | v->v_blank_f.begin;
+ hdmi_write_24(reg32, hdmi_base + S5P_HDMI_V_BLANK_F_0);
+
+ reg32 = v->v_sync_bottom.begin << 12 | v->v_sync_bottom.end;
+ hdmi_write_24(reg32, hdmi_base + S5P_HDMI_V_SYNC_GEN_2_0);
+
+ reg32 = v->v_sync_h_pos.begin << 12 | v->v_sync_h_pos.end;
+ hdmi_write_24(reg32, hdmi_base + S5P_HDMI_V_SYNC_GEN_3_0);
+ } else {
+ hdmi_write_24(0x0, hdmi_base + S5P_HDMI_V_BLANK_F_0);
+ hdmi_write_24(0x1001, hdmi_base + S5P_HDMI_V_SYNC_GEN_2_0);
+ hdmi_write_24(0x1001, hdmi_base + S5P_HDMI_V_SYNC_GEN_3_0);
+ }
+}
+
+void s5p_hdmi_reg_bluescreen_clr(u8 cb_b, u8 y_g, u8 cr_r)
+{
+ writeb(cb_b, hdmi_base + S5P_HDMI_BLUE_SCREEN_0);
+ writeb(y_g, hdmi_base + S5P_HDMI_BLUE_SCREEN_1);
+ writeb(cr_r, hdmi_base + S5P_HDMI_BLUE_SCREEN_2);
+}
+#endif
+
+void s5p_hdmi_reg_bluescreen(bool en)
+{
+ u8 reg = readl(hdmi_base + S5P_HDMI_CON_0);
+
+ hdmi_bit_set(en, reg, S5P_HDMI_BLUE_SCR_EN);
+
+ writeb(reg, hdmi_base + S5P_HDMI_CON_0);
+}
+
+void s5p_hdmi_reg_clr_range(u8 y_min, u8 y_max, u8 c_min, u8 c_max)
+{
+ writeb(y_max, hdmi_base + S5P_HDMI_YMAX);
+ writeb(y_min, hdmi_base + S5P_HDMI_YMIN);
+ writeb(c_max, hdmi_base + S5P_HDMI_CMAX);
+ writeb(c_min, hdmi_base + S5P_HDMI_CMIN);
+}
+
+void s5p_hdmi_reg_tg_cmd(bool time, bool bt656, bool tg)
+{
+ u8 reg = 0;
+
+ reg = readb(hdmi_base + S5P_HDMI_TG_CMD);
+
+ hdmi_bit_set(time, reg, S5P_HDMI_GETSYNC_TYPE);
+ hdmi_bit_set(bt656, reg, S5P_HDMI_GETSYNC);
+ hdmi_bit_set(tg, reg, S5P_HDMI_TG);
+
+ writeb(reg, hdmi_base + S5P_HDMI_TG_CMD);
+}
+
+void s5p_hdmi_reg_enable(bool en)
+{
+ u8 reg;
+
+ reg = readb(hdmi_base + S5P_HDMI_CON_0);
+
+ if (en)
+ reg |= S5P_HDMI_EN;
+ else
+ reg &= ~(S5P_HDMI_EN | S5P_HDMI_ASP_EN);
+
+ writeb(reg, hdmi_base + S5P_HDMI_CON_0);
+
+ if (!en) {
+ do {
+ reg = readb(hdmi_base + S5P_HDMI_CON_0);
+ } while (reg & S5P_HDMI_EN);
+ }
+}
+
+u8 s5p_hdmi_reg_intc_status(void)
+{
+#ifdef CONFIG_HDMI_14A_3D
+ return readb(hdmi_base + S5P_HDMI_INTC_FLAG0);
+#else
+ return readb(hdmi_base + S5P_HDMI_INTC_FLAG);
+#endif
+}
+
+u8 s5p_hdmi_reg_intc_get_enabled(void)
+{
+#ifdef CONFIG_HDMI_14A_3D
+ return readb(hdmi_base + S5P_HDMI_INTC_CON0);
+#else
+ return readb(hdmi_base + S5P_HDMI_INTC_CON);
+#endif
+}
+
+void s5p_hdmi_reg_intc_clear_pending(enum s5p_hdmi_interrrupt intr)
+{
+ u8 reg;
+#ifdef CONFIG_HDMI_14A_3D
+ reg = readb(hdmi_base + S5P_HDMI_INTC_FLAG0);
+ writeb(reg | (1 << intr), hdmi_base + S5P_HDMI_INTC_FLAG0);
+#else
+ reg = readb(hdmi_base + S5P_HDMI_INTC_FLAG);
+ writeb(reg | (1 << intr), hdmi_base + S5P_HDMI_INTC_FLAG);
+#endif
+}
+
+
+void s5p_hdmi_reg_sw_hpd_enable(bool enable)
+{
+ u8 reg;
+
+ reg = readb(hdmi_base + S5P_HDMI_HPD);
+ reg &= ~S5P_HDMI_HPD_SEL_I_HPD;
+
+ if (enable)
+ writeb(reg | S5P_HDMI_HPD_SEL_I_HPD, hdmi_base + S5P_HDMI_HPD);
+ else
+ writeb(reg, hdmi_base + S5P_HDMI_HPD);
+}
+
+void s5p_hdmi_reg_set_hpd_onoff(bool on_off)
+{
+ u8 reg;
+
+ reg = readb(hdmi_base + S5P_HDMI_HPD);
+ reg &= ~S5P_HDMI_SW_HPD_PLUGGED;
+
+ if (on_off)
+ writel(reg | S5P_HDMI_SW_HPD_PLUGGED,
+ hdmi_base + S5P_HDMI_HPD);
+ else
+ writel(reg | S5P_HDMI_SW_HPD_UNPLUGGED,
+ hdmi_base + S5P_HDMI_HPD);
+
+}
+
+u8 s5p_hdmi_reg_get_hpd_status(void)
+{
+ return readb(hdmi_base + S5P_HDMI_HPD_STATUS);
+}
+
+void s5p_hdmi_reg_hpd_gen(void)
+{
+#ifdef CONFIG_HDMI_14A_3D
+ writeb(0xFF, hdmi_base + S5P_HDMI_HPD_GEN0);
+#else
+ writeb(0xFF, hdmi_base + S5P_HDMI_HPD_GEN);
+#endif
+}
+
+int s5p_hdmi_reg_intc_set_isr(irqreturn_t (*isr)(int, void *), u8 num)
+{
+ if (!isr) {
+ tvout_err("invalid irq routine\n");
+ return -1;
+ }
+
+ if (num >= HDMI_IRQ_TOTAL_NUM) {
+ tvout_err("max irq_num exceeded\n");
+ return -1;
+ }
+
+ if (s5p_hdmi_isr_ftn[num])
+ tvout_dbg("irq %d already registered\n", num);
+
+ s5p_hdmi_isr_ftn[num] = isr;
+
+ tvout_dbg("success to register irq : %d\n", num);
+
+ return 0;
+}
+EXPORT_SYMBOL(s5p_hdmi_reg_intc_set_isr);
+
+void s5p_hdmi_reg_intc_enable(enum s5p_hdmi_interrrupt intr, u8 en)
+{
+ u8 reg;
+
+ reg = s5p_hdmi_reg_intc_get_enabled();
+
+ if (en) {
+ if (!reg)
+ reg |= S5P_HDMI_INTC_EN_GLOBAL;
+
+ reg |= (1 << intr);
+ } else {
+ reg &= ~(1 << intr);
+
+ if (!reg)
+ reg &= ~S5P_HDMI_INTC_EN_GLOBAL;
+ }
+#ifdef CONFIG_HDMI_14A_3D
+ writeb(reg, hdmi_base + S5P_HDMI_INTC_CON0);
+#else
+ writeb(reg, hdmi_base + S5P_HDMI_INTC_CON);
+#endif
+}
+
+void s5p_hdmi_reg_audio_enable(u8 en)
+{
+ u8 con, mod;
+ con = readb(hdmi_base + S5P_HDMI_CON_0);
+ mod = readb(hdmi_base + S5P_HDMI_MODE_SEL);
+
+ if (en) {
+#ifndef CONFIG_HDMI_EARJACK_MUTE
+ if (mod & S5P_HDMI_DVI_MODE_EN)
+#else
+ if ((mod & S5P_HDMI_DVI_MODE_EN) || hdmi_audio_ext)
+#endif
+ return;
+
+ con |= S5P_HDMI_ASP_EN;
+ writeb(HDMI_TRANS_EVERY_SYNC, hdmi_base + S5P_HDMI_AUI_CON);
+ } else {
+ con &= ~S5P_HDMI_ASP_EN;
+ writeb(HDMI_DO_NOT_TANS, hdmi_base + S5P_HDMI_AUI_CON);
+ }
+
+ writeb(con, hdmi_base + S5P_HDMI_CON_0);
+}
+
+int s5p_hdmi_audio_init(
+ enum s5p_tvout_audio_codec_type audio_codec,
+ u32 sample_rate, u32 bits, u32 frame_size_code,
+ struct s5p_hdmi_audio *audio)
+{
+#ifdef CONFIG_SND_SAMSUNG_SPDIF
+ s5p_hdmi_audio_set_config(audio_codec);
+ s5p_hdmi_audio_set_repetition_time(audio_codec, bits, frame_size_code);
+ s5p_hdmi_audio_irq_enable(S5P_HDMI_SPDIFIN_IRQ_OVERFLOW_EN);
+ s5p_hdmi_audio_clock_enable();
+#else
+ s5p_hdmi_audio_i2s_config(audio_codec, sample_rate, bits,
+ frame_size_code, audio);
+#endif
+ return 0;
+}
+
+void s5p_hdmi_reg_mute(bool en)
+{
+ s5p_hdmi_reg_bluescreen(en);
+
+ s5p_hdmi_reg_audio_enable(!en);
+}
+
+irqreturn_t s5p_hdmi_irq(int irq, void *dev_id)
+{
+ u8 state, num = 0;
+ unsigned long spin_flags;
+
+ spin_lock_irqsave(&lock_hdmi, spin_flags);
+
+#ifdef CONFIG_HDMI_14A_3D
+ state = readb(hdmi_base + S5P_HDMI_INTC_FLAG0);
+#else
+ state = readb(hdmi_base + S5P_HDMI_INTC_FLAG);
+#endif
+
+ if (!state) {
+ tvout_err("undefined irq : %d\n", state);
+ goto irq_handled;
+ }
+
+ for (num = 0; num < HDMI_IRQ_TOTAL_NUM; num++) {
+
+ if (!(state & (1 << num)))
+ continue;
+
+ if (s5p_hdmi_isr_ftn[num]) {
+ tvout_dbg("call by s5p_hdmi_isr_ftn num : %d\n", num);
+ (s5p_hdmi_isr_ftn[num])(num, NULL);
+ } else
+ tvout_dbg("unregistered irq : %d\n", num);
+ }
+
+irq_handled:
+ spin_unlock_irqrestore(&lock_hdmi, spin_flags);
+
+ return IRQ_HANDLED;
+}
+
+void s5p_hdmi_init(void __iomem *hdmi_addr)
+{
+ hdmi_base = hdmi_addr;
+ spin_lock_init(&lock_hdmi);
+}
+
+void s5p_hdmi_phy_init(void __iomem *hdmi_phy_addr)
+{
+ i2c_hdmi_phy_base = hdmi_phy_addr;
+ if (i2c_hdmi_phy_base != NULL)
+ writeb(0x5, i2c_hdmi_phy_base + HDMI_I2C_LC);
+}
+
+void s5p_hdmi_reg_output(struct s5p_hdmi_o_reg *reg)
+{
+ writeb(reg->pxl_limit, hdmi_base + S5P_HDMI_CON_1);
+ writeb(reg->preemble, hdmi_base + S5P_HDMI_CON_2);
+ writeb(reg->mode, hdmi_base + S5P_HDMI_MODE_SEL);
+}
+
+void s5p_hdmi_reg_packet_trans(struct s5p_hdmi_o_trans *trans)
+{
+ u8 reg;
+
+ writeb(trans->avi, hdmi_base + S5P_HDMI_AVI_CON);
+ writeb(trans->mpg, hdmi_base + S5P_HDMI_MPG_CON);
+ writeb(trans->spd, hdmi_base + S5P_HDMI_SPD_CON);
+ writeb(trans->gmp, hdmi_base + S5P_HDMI_GAMUT_CON);
+ writeb(trans->aui, hdmi_base + S5P_HDMI_AUI_CON);
+
+ reg = trans->gcp | readb(hdmi_base + S5P_HDMI_GCP_CON);
+ writeb(reg, hdmi_base + S5P_HDMI_GCP_CON);
+
+ reg = trans->isrc | readb(hdmi_base + S5P_HDMI_ISRC_CON);
+ writeb(reg, hdmi_base + S5P_HDMI_ISRC_CON);
+
+ reg = trans->acp | readb(hdmi_base + S5P_HDMI_ACP_CON);
+ writeb(reg, hdmi_base + S5P_HDMI_ACP_CON);
+
+ reg = trans->acr | readb(hdmi_base + S5P_HDMI_ACP_CON);
+ writeb(reg, hdmi_base + S5P_HDMI_ACR_CON);
+}
diff --git a/drivers/media/video/samsung/tvout/hw_if/hw_if.h b/drivers/media/video/samsung/tvout/hw_if/hw_if.h
new file mode 100644
index 0000000..11bda99
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/hw_if/hw_if.h
@@ -0,0 +1,1005 @@
+/* linux/drivers/media/video/samsung/tvout/hw_if/hw_if.h
+ *
+ * Copyright (c) 2010 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Header file for interface of Samsung TVOUT-related hardware
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _SAMSUNG_TVOUT_HW_IF_H_
+#define _SAMSUNG_TVOUT_HW_IF_H_ __FILE__
+
+/*****************************************************************************
+ * This file includes declarations for external functions of
+ * Samsung TVOUT-related hardware. So only external functions
+ * to be used by higher layer must exist in this file.
+ *
+ * Higher layer must use only the declarations included in this file.
+ ****************************************************************************/
+
+#include <linux/irqreturn.h>
+#include <linux/stddef.h>
+
+#include "../s5p_tvout_common_lib.h"
+
+/*****************************************************************************
+ * Common
+ ****************************************************************************/
+enum s5p_tvout_endian {
+ TVOUT_LITTLE_ENDIAN = 0,
+ TVOUT_BIG_ENDIAN = 1
+};
+
+
+
+/*****************************************************************************
+ * for MIXER
+ ****************************************************************************/
+enum s5p_mixer_layer {
+ MIXER_VIDEO_LAYER = 2,
+ MIXER_GPR0_LAYER = 0,
+ MIXER_GPR1_LAYER = 1
+};
+
+enum s5p_mixer_bg_color_num {
+ MIXER_BG_COLOR_0 = 0,
+ MIXER_BG_COLOR_1 = 1,
+ MIXER_BG_COLOR_2 = 2
+};
+
+enum s5p_mixer_color_fmt {
+ MIXER_RGB565 = 4,
+ MIXER_RGB1555 = 5,
+ MIXER_RGB4444 = 6,
+ MIXER_RGB8888 = 7
+};
+
+enum s5p_mixer_rgb {
+ MIXER_RGB601_0_255 = 0,
+ MIXER_RGB601_16_235,
+ MIXER_RGB709_0_255,
+ MIXER_RGB709_16_235
+};
+
+enum s5p_mixer_out_type {
+ MIXER_YUV444,
+ MIXER_RGB888
+};
+
+extern int s5p_mixer_set_show(enum s5p_mixer_layer layer, bool show);
+extern int s5p_mixer_set_priority(enum s5p_mixer_layer layer, u32 priority);
+extern void s5p_mixer_set_pre_mul_mode(enum s5p_mixer_layer layer, bool enable);
+extern int s5p_mixer_set_pixel_blend(enum s5p_mixer_layer layer, bool enable);
+extern int s5p_mixer_set_layer_blend(enum s5p_mixer_layer layer, bool enable);
+extern int s5p_mixer_set_alpha(enum s5p_mixer_layer layer, u32 alpha);
+extern int s5p_mixer_set_grp_base_address(enum s5p_mixer_layer layer,
+ u32 baseaddr);
+extern int s5p_mixer_set_grp_layer_dst_pos(enum s5p_mixer_layer layer,
+ u32 dst_offs_x, u32 dst_offs_y);
+extern int s5p_mixer_set_grp_layer_src_pos(enum s5p_mixer_layer layer, u32 span,
+ u32 width, u32 height, u32 src_offs_x, u32 src_offs_y);
+extern void s5p_mixer_set_bg_color(enum s5p_mixer_bg_color_num colornum,
+ u32 color_y, u32 color_cb, u32 color_cr);
+extern void s5p_mixer_set_video_limiter(u32 upper_y, u32 lower_y,
+ u32 upper_c, u32 lower_c, bool enable);
+extern void s5p_mixer_init_status_reg(enum s5p_mixer_burst_mode burst,
+ enum s5p_tvout_endian endian);
+extern int s5p_mixer_init_display_mode(enum s5p_tvout_disp_mode mode,
+ enum s5p_tvout_o_mode output_mode, enum s5p_mixer_rgb
+ rgb_type);
+extern void s5p_mixer_scaling(enum s5p_mixer_layer layer,
+ struct s5ptvfb_user_scaling scaling);
+extern void s5p_mixer_set_color_format(enum s5p_mixer_layer layer,
+ enum s5p_mixer_color_fmt format);
+extern void s5p_mixer_set_chroma_key(enum s5p_mixer_layer layer, bool enabled,
+ u32 key);
+extern void s5p_mixer_init_bg_dither_enable(bool cr_dither_enable,
+ bool cdither_enable, bool y_dither_enable);
+extern void s5p_mixer_init_csc_coef_default(enum s5p_mixer_rgb csc_type);
+extern void s5p_mixer_start(void);
+extern void s5p_mixer_stop(void);
+extern void s5p_mixer_set_underflow_int_enable(enum s5p_mixer_layer layer,
+ bool en);
+extern void s5p_mixer_set_vsync_interrupt(bool);
+extern void s5p_mixer_clear_pend_all(void);
+extern irqreturn_t s5p_mixer_irq(int irq, void *dev_id);
+extern void s5p_mixer_init(void __iomem *addr);
+
+
+/*****************************************************************************
+ * for HDMI
+ ****************************************************************************/
+#define hdmi_mask_8(x) ((x) & 0xFF)
+#define hdmi_mask_16(x) (((x) >> 8) & 0xFF)
+#define hdmi_mask_24(x) (((x) >> 16) & 0xFF)
+#define hdmi_mask_32(x) (((x) >> 24) & 0xFF)
+
+#define hdmi_write_16(x, y) \
+ do { \
+ writeb(hdmi_mask_8(x), y); \
+ writeb(hdmi_mask_16(x), y + 4); \
+ } while (0);
+
+#define hdmi_write_24(x, y) \
+ do { \
+ writeb(hdmi_mask_8(x), y); \
+ writeb(hdmi_mask_16(x), y + 4); \
+ writeb(hdmi_mask_24(x), y + 8); \
+ } while (0);
+
+#define hdmi_write_32(x, y) \
+ do { \
+ writeb(hdmi_mask_8(x), y); \
+ writeb(hdmi_mask_16(x), y + 4); \
+ writeb(hdmi_mask_24(x), y + 8); \
+ writeb(hdmi_mask_32(x), y + 12); \
+ } while (0);
+
+#define hdmi_write_l(buff, base, start, count) \
+ do { \
+ u8 *ptr = buff; \
+ int i = 0; \
+ int a = start; \
+ do { \
+ writeb(ptr[i], base + a); \
+ a += 4; \
+ i++; \
+ } while (i <= (count - 1)); \
+ } while (0);
+
+#define hdmi_read_l(buff, base, start, count) \
+ do { \
+ u8 *ptr = buff; \
+ int i = 0; \
+ int a = start; \
+ do { \
+ ptr[i] = readb(base + a); \
+ a += 4; \
+ i++; \
+ } while (i <= (count - 1)); \
+ } while (0);
+
+#define hdmi_bit_set(en, reg, val) \
+ do { \
+ if (en) \
+ reg |= val; \
+ else \
+ reg &= ~val; \
+ } while (0);
+
+enum s5p_hdmi_transmit {
+ HDMI_DO_NOT_TANS,
+ HDMI_TRANS_ONCE,
+ HDMI_TRANS_EVERY_SYNC,
+};
+
+enum s5p_tvout_audio_codec_type {
+ PCM = 1,
+ AC3,
+ MP3,
+ WMA
+};
+
+enum s5p_hdmi_infoframe_type {
+ HDMI_VSI_INFO = 0x81,
+ HDMI_AVI_INFO,
+ HDMI_SPD_INFO,
+ HDMI_AUI_INFO,
+ HDMI_MPG_INFO,
+};
+
+enum s5p_hdmi_color_depth {
+ HDMI_CD_48,
+ HDMI_CD_36,
+ HDMI_CD_30,
+ HDMI_CD_24
+};
+
+enum s5p_hdmi_q_range {
+ HDMI_Q_DEFAULT = 0,
+ HDMI_Q_LIMITED_RANGE,
+ HDMI_Q_FULL_RANGE,
+ HDMI_Q_RESERVED,
+};
+
+enum s5p_hdmi_avi_yq {
+ HDMI_AVI_YQ_LIMITED_RANGE = 0,
+ HDMI_AVI_YQ_FULL_RANGE,
+};
+
+enum s5p_hdmi_interrrupt {
+ HDMI_IRQ_PIN_POLAR_CTL = 7,
+ HDMI_IRQ_GLOBAL = 6,
+ HDMI_IRQ_I2S = 5,
+ HDMI_IRQ_CEC = 4,
+ HDMI_IRQ_HPD_PLUG = 3,
+ HDMI_IRQ_HPD_UNPLUG = 2,
+ HDMI_IRQ_SPDIF = 1,
+ HDMI_IRQ_HDCP = 0
+};
+
+enum phy_freq {
+ ePHY_FREQ_25_200,
+ ePHY_FREQ_25_175,
+ ePHY_FREQ_27,
+ ePHY_FREQ_27_027,
+ ePHY_FREQ_54,
+ ePHY_FREQ_54_054,
+ ePHY_FREQ_74_250,
+ ePHY_FREQ_74_176,
+ ePHY_FREQ_148_500,
+ ePHY_FREQ_148_352,
+ ePHY_FREQ_108_108,
+ ePHY_FREQ_72,
+ ePHY_FREQ_25,
+ ePHY_FREQ_65,
+ ePHY_FREQ_108,
+ ePHY_FREQ_162
+};
+
+struct s5p_hdmi_infoframe {
+ enum s5p_hdmi_infoframe_type type;
+ u8 version;
+ u8 length;
+};
+
+struct s5p_hdmi_o_trans {
+ enum s5p_hdmi_transmit avi;
+ enum s5p_hdmi_transmit mpg;
+ enum s5p_hdmi_transmit spd;
+ enum s5p_hdmi_transmit gcp;
+ enum s5p_hdmi_transmit gmp;
+ enum s5p_hdmi_transmit isrc;
+ enum s5p_hdmi_transmit acp;
+ enum s5p_hdmi_transmit aui;
+ enum s5p_hdmi_transmit acr;
+};
+
+struct s5p_hdmi_o_reg {
+ u8 pxl_fmt;
+ u8 preemble;
+ u8 mode;
+ u8 pxl_limit;
+ u8 dvi;
+};
+
+struct s5p_hdmi_v_frame {
+#ifdef CONFIG_HDMI_14A_3D
+ u32 vH_Line;
+ u32 vV_Line;
+ u32 vH_SYNC_START;
+ u32 vH_SYNC_END;
+ u32 vV1_Blank;
+ u32 vV2_Blank;
+ u16 vHBlank;
+ u32 VBLANK_F0;
+ u32 VBLANK_F1;
+ u32 vVSYNC_LINE_BEF_1;
+ u32 vVSYNC_LINE_BEF_2;
+ u32 vVSYNC_LINE_AFT_1;
+ u32 vVSYNC_LINE_AFT_2;
+ u32 vVSYNC_LINE_AFT_PXL_1;
+ u32 vVSYNC_LINE_AFT_PXL_2;
+ u32 vVACT_SPACE_1;
+ u32 vVACT_SPACE_2;
+ u8 Hsync_polarity;
+ u8 Vsync_polarity;
+ u8 interlaced;
+ u8 vAVI_VIC;
+ u8 vAVI_VIC_16_9;
+ u8 repetition;
+#else
+ u8 vic;
+ u8 vic_16_9;
+ u8 repetition;
+ u8 polarity;
+ u8 i_p;
+
+ u16 h_active;
+ u16 v_active;
+
+ u16 h_total;
+ u16 h_blank;
+
+ u16 v_total;
+ u16 v_blank;
+#endif
+ enum phy_freq pixel_clock;
+};
+
+enum s5p_hdmi_audio_type {
+ HDMI_GENERIC_AUDIO,
+ HDMI_60958_AUDIO,
+ HDMI_DVD_AUDIO,
+ HDMI_SUPER_AUDIO,
+};
+
+struct s5p_hdmi_audio {
+ enum s5p_hdmi_audio_type type;
+ u32 freq;
+ u32 bit;
+ u32 channel;
+
+ u8 on;
+};
+
+struct s5p_hdmi_tg_sync {
+ u16 begin;
+ u16 end;
+};
+
+struct s5p_hdmi_v_format {
+ struct s5p_hdmi_v_frame frame;
+
+#ifdef CONFIG_HDMI_14A_3D
+ u16 tg_H_FSZ;
+ u16 tg_HACT_ST;
+ u16 tg_HACT_SZ;
+ u16 tg_V_FSZ;
+ u16 tg_VSYNC;
+ u16 tg_VSYNC2;
+ u16 tg_VACT_ST;
+ u16 tg_VACT_SZ;
+ u16 tg_FIELD_CHG;
+ u16 tg_VACT_ST2;
+ u16 tg_VACT_ST3;
+ u16 tg_VACT_ST4;
+ u16 tg_VSYNC_TOP_HDMI;
+ u16 tg_VSYNC_BOT_HDMI;
+ u16 tg_FIELD_TOP_HDMI;
+ u16 tg_FIELD_BOT_HDMI;
+#else
+ struct s5p_hdmi_tg_sync h_sync;
+ struct s5p_hdmi_tg_sync v_sync_top;
+ struct s5p_hdmi_tg_sync v_sync_bottom;
+ struct s5p_hdmi_tg_sync v_sync_h_pos;
+
+ struct s5p_hdmi_tg_sync v_blank_f;
+#endif
+ u8 mhl_hsync;
+ u8 mhl_vsync;
+};
+
+extern int s5p_hdmi_phy_power(bool on);
+extern s32 s5p_hdmi_phy_config(
+ enum phy_freq freq, enum s5p_hdmi_color_depth cd);
+
+extern void s5p_hdmi_set_gcp(enum s5p_hdmi_color_depth depth, u8 *gcp);
+extern void s5p_hdmi_reg_acr(u8 *acr);
+extern void s5p_hdmi_reg_asp(u8 *asp, struct s5p_hdmi_audio *audio);
+extern void s5p_hdmi_reg_gcp(u8 i_p, u8 *gcp);
+extern void s5p_hdmi_reg_acp(u8 *header, u8 *acp);
+extern void s5p_hdmi_reg_isrc(u8 *isrc1, u8 *isrc2);
+extern void s5p_hdmi_reg_gmp(u8 *gmp);
+#ifdef CONFIG_HDMI_14A_3D
+extern void s5p_hdmi_reg_infoframe(
+ struct s5p_hdmi_infoframe *info, u8 *data, u8 type_3D);
+extern void s5p_hdmi_reg_tg(struct s5p_hdmi_v_format *v);
+#else
+extern void s5p_hdmi_reg_infoframe(struct s5p_hdmi_infoframe *info, u8 *data);
+extern void s5p_hdmi_reg_tg(struct s5p_hdmi_v_frame *frame);
+#endif
+extern void s5p_hdmi_reg_v_timing(struct s5p_hdmi_v_format *v);
+#ifdef CONFIG_HDMI_14A_3D
+extern void s5p_hdmi_reg_bluescreen_clr(u16 b, u16 g, u16 r);
+#else
+extern void s5p_hdmi_reg_bluescreen_clr(u8 cb_b, u8 y_g, u8 cr_r);
+#endif
+extern void s5p_hdmi_reg_bluescreen(bool en);
+extern void s5p_hdmi_reg_clr_range(u8 y_min, u8 y_max, u8 c_min, u8 c_max);
+extern void s5p_hdmi_reg_tg_cmd(bool time, bool bt656, bool tg);
+extern void s5p_hdmi_reg_enable(bool en);
+extern u8 s5p_hdmi_reg_intc_status(void);
+extern u8 s5p_hdmi_reg_intc_get_enabled(void);
+extern void s5p_hdmi_reg_intc_clear_pending(enum s5p_hdmi_interrrupt intr);
+extern void s5p_hdmi_reg_sw_hpd_enable(bool enable);
+extern void s5p_hdmi_reg_set_hpd_onoff(bool on_off);
+extern u8 s5p_hdmi_reg_get_hpd_status(void);
+extern void s5p_hdmi_reg_hpd_gen(void);
+extern int s5p_hdmi_reg_intc_set_isr(irqreturn_t (*isr)(int, void *), u8 num);
+extern void s5p_hdmi_reg_intc_enable(enum s5p_hdmi_interrrupt intr, u8 en);
+#ifdef CONFIG_HDMI_EARJACK_MUTE
+extern bool hdmi_audio_ext;
+#endif
+extern void s5p_hdmi_reg_audio_enable(u8 en);
+extern int s5p_hdmi_audio_init(
+ enum s5p_tvout_audio_codec_type audio_codec,
+ u32 sample_rate, u32 bits, u32 frame_size_code,
+ struct s5p_hdmi_audio *audio);
+extern irqreturn_t s5p_hdmi_irq(int irq, void *dev_id);
+extern void s5p_hdmi_init(void __iomem *hdmi_addr);
+extern void s5p_hdmi_phy_init(void __iomem *hdmi_phy_addr);
+extern void s5p_hdmi_reg_output(struct s5p_hdmi_o_reg *reg);
+extern void s5p_hdmi_reg_packet_trans(struct s5p_hdmi_o_trans *trans);
+extern void s5p_hdmi_reg_mute(bool en);
+
+
+
+
+
+/*****************************************************************************
+ * for SDO
+ ****************************************************************************/
+#ifdef CONFIG_ANALOG_TVENC
+
+enum s5p_sdo_level {
+ SDO_LEVEL_0IRE,
+ SDO_LEVEL_75IRE
+};
+
+enum s5p_sdo_vsync_ratio {
+ SDO_VTOS_RATIO_10_4,
+ SDO_VTOS_RATIO_7_3
+};
+
+enum s5p_sdo_order {
+ SDO_O_ORDER_COMPONENT_RGB_PRYPB,
+ SDO_O_ORDER_COMPONENT_RBG_PRPBY,
+ SDO_O_ORDER_COMPONENT_BGR_PBYPR,
+ SDO_O_ORDER_COMPONENT_BRG_PBPRY,
+ SDO_O_ORDER_COMPONENT_GRB_YPRPB,
+ SDO_O_ORDER_COMPONENT_GBR_YPBPR,
+ SDO_O_ORDER_COMPOSITE_CVBS_Y_C,
+ SDO_O_ORDER_COMPOSITE_CVBS_C_Y,
+ SDO_O_ORDER_COMPOSITE_Y_C_CVBS,
+ SDO_O_ORDER_COMPOSITE_Y_CVBS_C,
+ SDO_O_ORDER_COMPOSITE_C_CVBS_Y,
+ SDO_O_ORDER_COMPOSITE_C_Y_CVBS
+};
+
+enum s5p_sdo_sync_sig_pin {
+ SDO_SYNC_SIG_NO,
+ SDO_SYNC_SIG_YG,
+ SDO_SYNC_SIG_ALL
+};
+
+enum s5p_sdo_closed_caption_type {
+ SDO_NO_INS,
+ SDO_INS_1,
+ SDO_INS_2,
+ SDO_INS_OTHERS
+};
+
+enum s5p_sdo_525_copy_permit {
+ SDO_525_COPY_PERMIT,
+ SDO_525_ONECOPY_PERMIT,
+ SDO_525_NOCOPY_PERMIT
+};
+
+enum s5p_sdo_525_mv_psp {
+ SDO_525_MV_PSP_OFF,
+ SDO_525_MV_PSP_ON_2LINE_BURST,
+ SDO_525_MV_PSP_ON_BURST_OFF,
+ SDO_525_MV_PSP_ON_4LINE_BURST,
+};
+
+enum s5p_sdo_525_copy_info {
+ SDO_525_COPY_INFO,
+ SDO_525_DEFAULT,
+};
+
+enum s5p_sdo_525_aspect_ratio {
+ SDO_525_4_3_NORMAL,
+ SDO_525_16_9_ANAMORPIC,
+ SDO_525_4_3_LETTERBOX
+};
+
+enum s5p_sdo_625_subtitles {
+ SDO_625_NO_OPEN_SUBTITLES,
+ SDO_625_INACT_OPEN_SUBTITLES,
+ SDO_625_OUTACT_OPEN_SUBTITLES
+};
+
+enum s5p_sdo_625_camera_film {
+ SDO_625_CAMERA,
+ SDO_625_FILM
+};
+
+enum s5p_sdo_625_color_encoding {
+ SDO_625_NORMAL_PAL,
+ SDO_625_MOTION_ADAPTIVE_COLORPLUS
+};
+
+enum s5p_sdo_625_aspect_ratio {
+ SDO_625_4_3_FULL_576,
+ SDO_625_14_9_LETTERBOX_CENTER_504,
+ SDO_625_14_9_LETTERBOX_TOP_504,
+ SDO_625_16_9_LETTERBOX_CENTER_430,
+ SDO_625_16_9_LETTERBOX_TOP_430,
+ SDO_625_16_9_LETTERBOX_CENTER,
+ SDO_625_14_9_FULL_CENTER_576,
+ SDO_625_16_9_ANAMORPIC_576
+};
+
+struct s5p_sdo_cvbs_compensation {
+ bool cvbs_color_compen;
+ u32 y_lower_mid;
+ u32 y_bottom;
+ u32 y_top;
+ u32 y_upper_mid;
+ u32 radius;
+};
+
+struct s5p_sdo_bright_hue_saturation {
+ bool bright_hue_sat_adj;
+ u32 gain_brightness;
+ u32 offset_brightness;
+ u32 gain0_cb_hue_sat;
+ u32 gain1_cb_hue_sat;
+ u32 gain0_cr_hue_sat;
+ u32 gain1_cr_hue_sat;
+ u32 offset_cb_hue_sat;
+ u32 offset_cr_hue_sat;
+};
+
+struct s5p_sdo_525_data {
+ bool analog_on;
+ enum s5p_sdo_525_copy_permit copy_permit;
+ enum s5p_sdo_525_mv_psp mv_psp;
+ enum s5p_sdo_525_copy_info copy_info;
+ enum s5p_sdo_525_aspect_ratio display_ratio;
+};
+
+struct s5p_sdo_625_data {
+ bool surround_sound;
+ bool copyright;
+ bool copy_protection;
+ bool text_subtitles;
+ enum s5p_sdo_625_subtitles open_subtitles;
+ enum s5p_sdo_625_camera_film camera_film;
+ enum s5p_sdo_625_color_encoding color_encoding;
+ bool helper_signal;
+ enum s5p_sdo_625_aspect_ratio display_ratio;
+};
+
+extern int s5p_sdo_set_video_scale_cfg(
+ enum s5p_sdo_level composite_level,
+ enum s5p_sdo_vsync_ratio composite_ratio);
+extern int s5p_sdo_set_vbi(
+ bool wss_cvbs, enum s5p_sdo_closed_caption_type caption_cvbs);
+extern void s5p_sdo_set_offset_gain(u32 offset, u32 gain);
+extern void s5p_sdo_set_delay(
+ u32 delay_y, u32 offset_video_start, u32 offset_video_end);
+extern void s5p_sdo_set_schlock(bool color_sucarrier_pha_adj);
+extern void s5p_sdo_set_brightness_hue_saturation(
+ struct s5p_sdo_bright_hue_saturation bri_hue_sat);
+extern void s5p_sdo_set_cvbs_color_compensation(
+ struct s5p_sdo_cvbs_compensation cvbs_comp);
+extern void s5p_sdo_set_component_porch(
+ u32 back_525, u32 front_525, u32 back_625, u32 front_625);
+extern void s5p_sdo_set_ch_xtalk_cancel_coef(u32 coeff2, u32 coeff1);
+extern void s5p_sdo_set_closed_caption(u32 display_cc, u32 non_display_cc);
+
+extern int s5p_sdo_set_wss525_data(struct s5p_sdo_525_data wss525);
+extern int s5p_sdo_set_wss625_data(struct s5p_sdo_625_data wss625);
+extern int s5p_sdo_set_cgmsa525_data(struct s5p_sdo_525_data cgmsa525);
+extern int s5p_sdo_set_cgmsa625_data(struct s5p_sdo_625_data cgmsa625);
+extern int s5p_sdo_set_display_mode(
+ enum s5p_tvout_disp_mode disp_mode, enum s5p_sdo_order order);
+extern void s5p_sdo_clock_on(bool on);
+extern void s5p_sdo_dac_on(bool on);
+extern void s5p_sdo_sw_reset(bool active);
+extern void s5p_sdo_set_interrupt_enable(bool vsync_intc_en);
+extern void s5p_sdo_clear_interrupt_pending(void);
+extern void s5p_sdo_init(void __iomem *addr);
+#endif
+
+/*****************************************************************************
+ * for VP
+ ****************************************************************************/
+enum s5p_vp_field {
+ VP_TOP_FIELD,
+ VP_BOTTOM_FIELD
+};
+
+enum s5p_vp_line_eq {
+ VP_LINE_EQ_0,
+ VP_LINE_EQ_1,
+ VP_LINE_EQ_2,
+ VP_LINE_EQ_3,
+ VP_LINE_EQ_4,
+ VP_LINE_EQ_5,
+ VP_LINE_EQ_6,
+ VP_LINE_EQ_7,
+ VP_LINE_EQ_DEFAULT
+};
+
+enum s5p_vp_mem_type {
+ VP_YUV420_NV12,
+ VP_YUV420_NV21
+};
+
+enum s5p_vp_mem_mode {
+ VP_LINEAR_MODE,
+ VP_2D_TILE_MODE
+};
+
+enum s5p_vp_chroma_expansion {
+ VP_C_TOP,
+ VP_C_TOP_BOTTOM
+};
+
+enum s5p_vp_pxl_rate {
+ VP_PXL_PER_RATE_1_1,
+ VP_PXL_PER_RATE_1_2,
+ VP_PXL_PER_RATE_1_3,
+ VP_PXL_PER_RATE_1_4
+};
+
+enum s5p_vp_sharpness_control {
+ VP_SHARPNESS_NO,
+ VP_SHARPNESS_MIN,
+ VP_SHARPNESS_MOD,
+ VP_SHARPNESS_MAX
+};
+
+enum s5p_vp_csc_type {
+ VP_CSC_SD_HD,
+ VP_CSC_HD_SD
+};
+
+enum s5p_vp_csc_coeff {
+ VP_CSC_Y2Y_COEF,
+ VP_CSC_CB2Y_COEF,
+ VP_CSC_CR2Y_COEF,
+ VP_CSC_Y2CB_COEF,
+ VP_CSC_CB2CB_COEF,
+ VP_CSC_CR2CB_COEF,
+ VP_CSC_Y2CR_COEF,
+ VP_CSC_CB2CR_COEF,
+ VP_CSC_CR2CR_COEF
+};
+
+
+extern void s5p_vp_set_poly_filter_coef_default(
+ u32 src_width, u32 src_height,
+ u32 dst_width, u32 dst_height, bool ipc_2d);
+extern void s5p_vp_set_field_id(enum s5p_vp_field mode);
+extern int s5p_vp_set_top_field_address(u32 top_y_addr, u32 top_c_addr);
+extern int s5p_vp_set_bottom_field_address(
+ u32 bottom_y_addr, u32 bottom_c_addr);
+extern int s5p_vp_set_img_size(u32 img_width, u32 img_height);
+extern void s5p_vp_set_src_position(
+ u32 src_off_x, u32 src_x_fract_step, u32 src_off_y);
+extern void s5p_vp_set_dest_position(u32 dst_off_x, u32 dst_off_y);
+extern void s5p_vp_set_src_dest_size(
+ u32 src_width, u32 src_height,
+ u32 dst_width, u32 dst_height, bool ipc_2d);
+extern void s5p_vp_set_op_mode(
+ bool line_skip,
+ enum s5p_vp_mem_type mem_type,
+ enum s5p_vp_mem_mode mem_mode,
+ enum s5p_vp_chroma_expansion chroma_exp,
+ bool auto_toggling);
+extern void s5p_vp_set_pixel_rate_control(enum s5p_vp_pxl_rate rate);
+extern void s5p_vp_set_endian(enum s5p_tvout_endian endian);
+extern void s5p_vp_set_bypass_post_process(bool bypass);
+extern void s5p_vp_set_saturation(u32 sat);
+extern void s5p_vp_set_sharpness(
+ u32 th_h_noise, enum s5p_vp_sharpness_control sharpness);
+extern void s5p_vp_set_brightness_contrast(u16 b, u8 c);
+extern void s5p_vp_set_brightness_offset(u32 offset);
+extern int s5p_vp_set_brightness_contrast_control(
+ enum s5p_vp_line_eq eq_num, u32 intc, u32 slope);
+extern void s5p_vp_set_csc_control(bool sub_y_offset_en, bool csc_en);
+extern int s5p_vp_set_csc_coef(enum s5p_vp_csc_coeff csc_coeff, u32 coeff);
+extern int s5p_vp_set_csc_coef_default(enum s5p_vp_csc_type csc_type);
+extern int s5p_vp_update(void);
+extern int s5p_vp_get_update_status(void);
+extern void s5p_vp_sw_reset(void);
+extern int s5p_vp_start(void);
+extern int s5p_vp_stop(void);
+extern void s5p_vp_init(void __iomem *addr);
+
+/*****************************************************************************
+ * for CEC
+ ****************************************************************************/
+enum cec_state {
+ STATE_RX,
+ STATE_TX,
+ STATE_DONE,
+ STATE_ERROR
+};
+
+struct cec_rx_struct {
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ atomic_t state;
+ u8 *buffer;
+ unsigned int size;
+};
+
+struct cec_tx_struct {
+ wait_queue_head_t waitq;
+ atomic_t state;
+};
+
+extern struct cec_rx_struct cec_rx_struct;
+extern struct cec_tx_struct cec_tx_struct;
+
+void s5p_cec_set_divider(void);
+void s5p_cec_enable_rx(void);
+void s5p_cec_mask_rx_interrupts(void);
+void s5p_cec_unmask_rx_interrupts(void);
+void s5p_cec_mask_tx_interrupts(void);
+void s5p_cec_unmask_tx_interrupts(void);
+void s5p_cec_reset(void);
+void s5p_cec_tx_reset(void);
+void s5p_cec_rx_reset(void);
+void s5p_cec_threshold(void);
+void s5p_cec_set_tx_state(enum cec_state state);
+void s5p_cec_set_rx_state(enum cec_state state);
+void s5p_cec_copy_packet(char *data, size_t count);
+void s5p_cec_set_addr(u32 addr);
+u32 s5p_cec_get_status(void);
+void s5p_clr_pending_tx(void);
+void s5p_clr_pending_rx(void);
+void s5p_cec_get_rx_buf(u32 size, u8 *buffer);
+int __init s5p_cec_mem_probe(struct platform_device *pdev);
+
+
+
+/*****************************************************************************
+ * for HDCP
+ ****************************************************************************/
+extern int s5p_hdcp_encrypt_stop(bool on);
+extern int __init s5p_hdcp_init(void);
+extern int s5p_hdcp_start(void);
+extern int s5p_hdcp_stop(void);
+
+/****************************************
+ * Definitions for sdo ctrl class
+ ***************************************/
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+#define BUSFREQ_400MHZ 400200
+#define BUSFREQ_133MHZ 133133
+#endif
+
+#ifdef CONFIG_ANALOG_TVENC
+
+enum {
+ SDO_PCLK = 0,
+ SDO_MUX,
+ SDO_NO_OF_CLK
+};
+
+struct s5p_sdo_vscale_cfg {
+ enum s5p_sdo_level composite_level;
+ enum s5p_sdo_vsync_ratio composite_ratio;
+};
+
+struct s5p_sdo_vbi {
+ bool wss_cvbs;
+ enum s5p_sdo_closed_caption_type caption_cvbs;
+};
+
+struct s5p_sdo_offset_gain {
+ u32 offset;
+ u32 gain;
+};
+
+struct s5p_sdo_delay {
+ u32 delay_y;
+ u32 offset_video_start;
+ u32 offset_video_end;
+};
+
+struct s5p_sdo_component_porch {
+ u32 back_525;
+ u32 front_525;
+ u32 back_625;
+ u32 front_625;
+};
+
+struct s5p_sdo_ch_xtalk_cancellat_coeff {
+ u32 coeff1;
+ u32 coeff2;
+};
+
+struct s5p_sdo_closed_caption {
+ u32 display_cc;
+ u32 nondisplay_cc;
+};
+
+#endif
+
+
+
+/****************************************
+ * Definitions for hdmi ctrl class
+ ***************************************/
+
+#define AVI_SAME_WITH_PICTURE_AR (0x1<<3)
+
+enum {
+ HDMI_PCLK = 0,
+ HDMI_MUX,
+ HDMI_NO_OF_CLK
+};
+
+enum {
+ HDMI = 0,
+ HDMI_PHY,
+ HDMI_NO_OF_MEM_RES
+};
+
+enum s5p_hdmi_pic_aspect {
+ HDMI_PIC_RATIO_4_3 = 1,
+ HDMI_PIC_RATIO_16_9 = 2
+};
+
+enum s5p_hdmi_colorimetry {
+ HDMI_CLRIMETRY_NO = 0x00,
+ HDMI_CLRIMETRY_601 = 0x40,
+ HDMI_CLRIMETRY_709 = 0x80,
+ HDMI_CLRIMETRY_X_VAL = 0xc0,
+};
+
+enum s5p_hdmi_v_mode {
+ v640x480p_60Hz,
+ v720x480p_60Hz,
+ v1280x720p_60Hz,
+ v1920x1080i_60Hz,
+ v720x480i_60Hz,
+ v720x240p_60Hz,
+ v2880x480i_60Hz,
+ v2880x240p_60Hz,
+ v1440x480p_60Hz,
+ v1920x1080p_60Hz,
+ v720x576p_50Hz,
+ v1280x720p_50Hz,
+ v1920x1080i_50Hz,
+ v720x576i_50Hz,
+ v720x288p_50Hz,
+ v2880x576i_50Hz,
+ v2880x288p_50Hz,
+ v1440x576p_50Hz,
+ v1920x1080p_50Hz,
+ v1920x1080p_24Hz,
+ v1920x1080p_25Hz,
+ v1920x1080p_30Hz,
+ v2880x480p_60Hz,
+ v2880x576p_50Hz,
+ v1920x1080i_50Hz_1250,
+ v1920x1080i_100Hz,
+ v1280x720p_100Hz,
+ v720x576p_100Hz,
+ v720x576i_100Hz,
+ v1920x1080i_120Hz,
+ v1280x720p_120Hz,
+ v720x480p_120Hz,
+ v720x480i_120Hz,
+ v720x576p_200Hz,
+ v720x576i_200Hz,
+ v720x480p_240Hz,
+ v720x480i_240Hz,
+ v720x480p_59Hz,
+ v1280x720p_59Hz,
+ v1920x1080i_59Hz,
+ v1920x1080p_59Hz,
+#ifdef CONFIG_HDMI_14A_3D
+ v1280x720p_60Hz_SBS_HALF,
+ v1280x720p_59Hz_SBS_HALF,
+ v1280x720p_50Hz_TB,
+ v1920x1080p_24Hz_TB,
+ v1920x1080p_23Hz_TB,
+#endif
+};
+
+#ifdef CONFIG_HDMI_14A_3D
+struct s5p_hdmi_bluescreen {
+ bool enable;
+ u16 b;
+ u16 g;
+ u16 r;
+};
+#else
+struct s5p_hdmi_bluescreen {
+ bool enable;
+ u8 cb_b;
+ u8 y_g;
+ u8 cr_r;
+};
+#endif
+
+struct s5p_hdmi_packet {
+ u8 acr[7];
+ u8 asp[7];
+ u8 gcp[7];
+ u8 acp[28];
+ u8 isrc1[16];
+ u8 isrc2[16];
+ u8 obas[7];
+ u8 dst[28];
+ u8 gmp[28];
+
+ u8 spd_vendor[8];
+ u8 spd_product[16];
+
+ u8 vsi[27];
+ u8 avi[27];
+ u8 spd[27];
+ u8 aui[27];
+ u8 mpg[27];
+
+ struct s5p_hdmi_infoframe vsi_info;
+ struct s5p_hdmi_infoframe avi_info;
+ struct s5p_hdmi_infoframe spd_info;
+ struct s5p_hdmi_infoframe aui_info;
+ struct s5p_hdmi_infoframe mpg_info;
+
+ u8 h_asp[3];
+ u8 h_acp[3];
+ u8 h_isrc[3];
+};
+
+struct s5p_hdmi_color_range {
+ u8 y_min;
+ u8 y_max;
+ u8 c_min;
+ u8 c_max;
+};
+
+struct s5p_hdmi_tg {
+ bool correction_en;
+ bool bt656_en;
+};
+
+struct s5p_hdmi_video {
+ struct s5p_hdmi_color_range color_r;
+ enum s5p_hdmi_pic_aspect aspect;
+ enum s5p_hdmi_colorimetry colorimetry;
+ enum s5p_hdmi_color_depth depth;
+ enum s5p_hdmi_q_range q_range;
+};
+
+struct s5p_hdmi_o_params {
+ struct s5p_hdmi_o_trans trans;
+ struct s5p_hdmi_o_reg reg;
+};
+
+struct s5p_hdmi_ctrl_private_data {
+ u8 vendor[8];
+ u8 product[16];
+
+ enum s5p_tvout_o_mode out;
+ enum s5p_hdmi_v_mode mode;
+
+ struct s5p_hdmi_bluescreen blue_screen;
+ struct s5p_hdmi_packet packet;
+ struct s5p_hdmi_tg tg;
+ struct s5p_hdmi_audio audio;
+ struct s5p_hdmi_video video;
+
+ bool hpd_status;
+ bool hdcp_en;
+
+ bool av_mute;
+
+ bool running;
+ char *pow_name;
+ struct s5p_tvout_clk_info clk[HDMI_NO_OF_CLK];
+ struct reg_mem_info reg_mem[HDMI_NO_OF_MEM_RES];
+ struct irq_info irq;
+};
+
+/****************************************
+ * Definitions for tvif ctrl class
+ ***************************************/
+struct s5p_tvif_ctrl_private_data {
+ enum s5p_tvout_disp_mode curr_std;
+ enum s5p_tvout_o_mode curr_if;
+
+ bool running;
+
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ struct device *bus_dev; /* for BusFreq with Opp */
+#endif
+ struct device *dev; /* hpd device pointer */
+};
+
+#endif /* _SAMSUNG_TVOUT_HW_IF_H_ */
diff --git a/drivers/media/video/samsung/tvout/hw_if/mixer.c b/drivers/media/video/samsung/tvout/hw_if/mixer.c
new file mode 100644
index 0000000..b2753f2
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/hw_if/mixer.c
@@ -0,0 +1,874 @@
+/* linux/drivers/media/video/samsung/tvout/hw_if/mixer.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Mixer raw ftn file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/sched.h>
+
+#include <mach/regs-mixer.h>
+
+#include "../s5p_tvout_common_lib.h"
+#include "../s5p_tvout_ctrl.h"
+#include "hw_if.h"
+
+#undef tvout_dbg
+
+#ifdef CONFIG_MIXER_DEBUG
+#define tvout_dbg(fmt, ...) \
+ printk(KERN_INFO "\t[MIXER] %s(): " fmt, \
+ __func__, ##__VA_ARGS__)
+#else
+#define tvout_dbg(fmt, ...)
+#endif
+
+void __iomem *mixer_base;
+spinlock_t lock_mixer;
+
+
+extern int s5p_vp_ctrl_get_src_addr(u32* top_y_addr, u32* top_c_addr);
+int s5p_mixer_set_show(enum s5p_mixer_layer layer, bool show)
+{
+ u32 mxr_config;
+
+ tvout_dbg("%d, %d\n", layer, show);
+
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ mxr_config = (show) ?
+ (readl(mixer_base + S5P_MXR_CFG) |
+ S5P_MXR_CFG_VIDEO_ENABLE) :
+ (readl(mixer_base + S5P_MXR_CFG) &
+ ~S5P_MXR_CFG_VIDEO_ENABLE);
+ break;
+
+ case MIXER_GPR0_LAYER:
+ mxr_config = (show) ?
+ (readl(mixer_base + S5P_MXR_CFG) |
+ S5P_MXR_CFG_GRAPHIC0_ENABLE) :
+ (readl(mixer_base + S5P_MXR_CFG) &
+ ~S5P_MXR_CFG_GRAPHIC0_ENABLE);
+ break;
+
+ case MIXER_GPR1_LAYER:
+ mxr_config = (show) ?
+ (readl(mixer_base + S5P_MXR_CFG) |
+ S5P_MXR_CFG_GRAPHIC1_ENABLE) :
+ (readl(mixer_base + S5P_MXR_CFG) &
+ ~S5P_MXR_CFG_GRAPHIC1_ENABLE);
+ break;
+
+ default:
+ tvout_err("invalid layer parameter = %d\n", layer);
+ return -1;
+ }
+
+ writel(mxr_config, mixer_base + S5P_MXR_CFG);
+
+ return 0;
+}
+
+int s5p_mixer_set_priority(enum s5p_mixer_layer layer, u32 priority)
+{
+ u32 layer_cfg;
+
+ tvout_dbg("%d, %d\n", layer, priority);
+
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ layer_cfg = S5P_MXR_LAYER_CFG_VID_PRIORITY_CLR(
+ readl(mixer_base + S5P_MXR_LAYER_CFG)) |
+ S5P_MXR_LAYER_CFG_VID_PRIORITY(priority);
+ break;
+
+ case MIXER_GPR0_LAYER:
+ layer_cfg = S5P_MXR_LAYER_CFG_GRP0_PRIORITY_CLR(
+ readl(mixer_base + S5P_MXR_LAYER_CFG)) |
+ S5P_MXR_LAYER_CFG_GRP0_PRIORITY(priority);
+ break;
+
+ case MIXER_GPR1_LAYER:
+ layer_cfg = S5P_MXR_LAYER_CFG_GRP1_PRIORITY_CLR(
+ readl(mixer_base + S5P_MXR_LAYER_CFG)) |
+ S5P_MXR_LAYER_CFG_GRP1_PRIORITY(priority);
+ break;
+
+ default:
+ tvout_err("invalid layer parameter = %d\n", layer);
+ return -1;
+ }
+
+ writel(layer_cfg, mixer_base + S5P_MXR_LAYER_CFG);
+
+ return 0;
+}
+
+void s5p_mixer_set_pre_mul_mode(enum s5p_mixer_layer layer, bool enable)
+{
+ u32 reg;
+
+ switch (layer) {
+ case MIXER_GPR0_LAYER:
+ reg = readl(mixer_base + S5P_MXR_GRAPHIC0_CFG);
+
+ if (enable)
+ reg |= S5P_MXR_PRE_MUL_MODE;
+ else
+ reg &= ~S5P_MXR_PRE_MUL_MODE;
+
+ writel(reg, mixer_base + S5P_MXR_GRAPHIC0_CFG);
+ break;
+ case MIXER_GPR1_LAYER:
+ reg = readl(mixer_base + S5P_MXR_GRAPHIC1_CFG);
+
+ if (enable)
+ reg |= S5P_MXR_PRE_MUL_MODE;
+ else
+ reg &= ~S5P_MXR_PRE_MUL_MODE;
+
+ writel(reg, mixer_base + S5P_MXR_GRAPHIC1_CFG);
+ break;
+ case MIXER_VIDEO_LAYER:
+ break;
+ }
+}
+
+int s5p_mixer_set_pixel_blend(enum s5p_mixer_layer layer, bool enable)
+{
+ u32 temp_reg;
+
+ tvout_dbg("%d, %d\n", layer, enable);
+
+ switch (layer) {
+ case MIXER_GPR0_LAYER:
+ temp_reg = readl(mixer_base + S5P_MXR_GRAPHIC0_CFG)
+ & (~S5P_MXR_PIXEL_BLEND_ENABLE) ;
+
+ if (enable)
+ temp_reg |= S5P_MXR_PIXEL_BLEND_ENABLE;
+ else
+ temp_reg |= S5P_MXR_PIXEL_BLEND_DISABLE;
+
+ writel(temp_reg, mixer_base + S5P_MXR_GRAPHIC0_CFG);
+ break;
+
+ case MIXER_GPR1_LAYER:
+ temp_reg = readl(mixer_base + S5P_MXR_GRAPHIC1_CFG)
+ & (~S5P_MXR_PIXEL_BLEND_ENABLE) ;
+
+ if (enable)
+ temp_reg |= S5P_MXR_PIXEL_BLEND_ENABLE;
+ else
+ temp_reg |= S5P_MXR_PIXEL_BLEND_DISABLE;
+
+ writel(temp_reg, mixer_base + S5P_MXR_GRAPHIC1_CFG);
+ break;
+
+ default:
+ tvout_err("invalid layer parameter = %d\n", layer);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int s5p_mixer_set_layer_blend(enum s5p_mixer_layer layer, bool enable)
+{
+ u32 temp_reg;
+
+ tvout_dbg("%d, %d\n", layer, enable);
+
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ temp_reg = readl(mixer_base + S5P_MXR_VIDEO_CFG)
+ & (~S5P_MXR_VIDEO_CFG_BLEND_EN) ;
+
+ if (enable)
+ temp_reg |= S5P_MXR_VIDEO_CFG_BLEND_EN;
+ else
+ temp_reg |= S5P_MXR_VIDEO_CFG_BLEND_DIS;
+
+ writel(temp_reg, mixer_base + S5P_MXR_VIDEO_CFG);
+ break;
+
+ case MIXER_GPR0_LAYER:
+ temp_reg = readl(mixer_base + S5P_MXR_GRAPHIC0_CFG)
+ & (~S5P_MXR_WIN_BLEND_ENABLE) ;
+
+ if (enable)
+ temp_reg |= S5P_MXR_WIN_BLEND_ENABLE;
+ else
+ temp_reg |= S5P_MXR_WIN_BLEND_DISABLE;
+
+ writel(temp_reg, mixer_base + S5P_MXR_GRAPHIC0_CFG);
+ break;
+
+ case MIXER_GPR1_LAYER:
+ temp_reg = readl(mixer_base + S5P_MXR_GRAPHIC1_CFG)
+ & (~S5P_MXR_WIN_BLEND_ENABLE) ;
+
+ if (enable)
+ temp_reg |= S5P_MXR_WIN_BLEND_ENABLE;
+ else
+ temp_reg |= S5P_MXR_WIN_BLEND_DISABLE;
+
+ writel(temp_reg, mixer_base + S5P_MXR_GRAPHIC1_CFG);
+ break;
+
+ default:
+ tvout_err("invalid layer parameter = %d\n", layer);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int s5p_mixer_set_alpha(enum s5p_mixer_layer layer, u32 alpha)
+{
+ u32 temp_reg;
+
+ tvout_dbg("%d, %d\n", layer, alpha);
+
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ temp_reg = readl(mixer_base + S5P_MXR_VIDEO_CFG)
+ & (~S5P_MXR_VIDEO_CFG_ALPHA_MASK) ;
+ temp_reg |= S5P_MXR_VIDEO_CFG_ALPHA_VALUE(alpha);
+ writel(temp_reg, mixer_base + S5P_MXR_VIDEO_CFG);
+ break;
+
+ case MIXER_GPR0_LAYER:
+ temp_reg = readl(mixer_base + S5P_MXR_GRAPHIC0_CFG)
+ & (~S5P_MXR_VIDEO_CFG_ALPHA_MASK) ;
+ temp_reg |= S5P_MXR_GRP_ALPHA_VALUE(alpha);
+ writel(temp_reg, mixer_base + S5P_MXR_GRAPHIC0_CFG);
+ break;
+
+ case MIXER_GPR1_LAYER:
+ temp_reg = readl(mixer_base + S5P_MXR_GRAPHIC1_CFG)
+ & (~S5P_MXR_VIDEO_CFG_ALPHA_MASK) ;
+ temp_reg |= S5P_MXR_GRP_ALPHA_VALUE(alpha);
+ writel(temp_reg, mixer_base + S5P_MXR_GRAPHIC1_CFG);
+ break;
+
+ default:
+ tvout_err("invalid layer parameter = %d\n", layer);
+ return -1;
+ }
+
+ return 0;
+}
+
+int s5p_mixer_set_grp_base_address(enum s5p_mixer_layer layer, u32 base_addr)
+{
+ tvout_dbg("%d, 0x%x\n", layer, base_addr);
+
+ if (S5P_MXR_GRP_ADDR_ILLEGAL(base_addr)) {
+ tvout_err("address is not word align = %d\n", base_addr);
+ return -1;
+ }
+
+ switch (layer) {
+ case MIXER_GPR0_LAYER:
+ writel(S5P_MXR_GPR_BASE(base_addr),
+ mixer_base + S5P_MXR_GRAPHIC0_BASE);
+ break;
+
+ case MIXER_GPR1_LAYER:
+ writel(S5P_MXR_GPR_BASE(base_addr),
+ mixer_base + S5P_MXR_GRAPHIC1_BASE);
+ break;
+
+ default:
+ tvout_err("invalid layer parameter = %d\n", layer);
+ return -1;
+ }
+
+ return 0;
+}
+
+int s5p_mixer_set_grp_layer_dst_pos(enum s5p_mixer_layer layer,
+ u32 dst_offs_x, u32 dst_offs_y)
+{
+ tvout_dbg("%d, %d, %d\n", layer, dst_offs_x, dst_offs_y);
+
+ switch (layer) {
+ case MIXER_GPR0_LAYER:
+ writel(S5P_MXR_GRP_DESTX(dst_offs_x) |
+ S5P_MXR_GRP_DESTY(dst_offs_y),
+ mixer_base + S5P_MXR_GRAPHIC0_DXY);
+ break;
+
+ case MIXER_GPR1_LAYER:
+ writel(S5P_MXR_GRP_DESTX(dst_offs_x) |
+ S5P_MXR_GRP_DESTY(dst_offs_y),
+ mixer_base + S5P_MXR_GRAPHIC1_DXY);
+ break;
+
+ default:
+ tvout_err("invalid layer parameter = %d\n", layer);
+ return -1;
+ }
+
+ return 0;
+}
+
+int s5p_mixer_set_grp_layer_src_pos(enum s5p_mixer_layer layer, u32 src_offs_x,
+ u32 src_offs_y, u32 span, u32 width, u32 height)
+{
+ tvout_dbg("%d, %d, %d, %d, %d, %d\n", layer, span, width, height,
+ src_offs_x, src_offs_y);
+
+ switch (layer) {
+ case MIXER_GPR0_LAYER:
+ writel(S5P_MXR_GRP_SPAN(span),
+ mixer_base + S5P_MXR_GRAPHIC0_SPAN);
+ writel(S5P_MXR_GRP_WIDTH(width) | S5P_MXR_GRP_HEIGHT(height),
+ mixer_base + S5P_MXR_GRAPHIC0_WH);
+ writel(S5P_MXR_GRP_STARTX(src_offs_x) |
+ S5P_MXR_GRP_STARTY(src_offs_y),
+ mixer_base + S5P_MXR_GRAPHIC0_SXY);
+ break;
+
+ case MIXER_GPR1_LAYER:
+ writel(S5P_MXR_GRP_SPAN(span),
+ mixer_base + S5P_MXR_GRAPHIC1_SPAN);
+ writel(S5P_MXR_GRP_WIDTH(width) | S5P_MXR_GRP_HEIGHT(height),
+ mixer_base + S5P_MXR_GRAPHIC1_WH);
+ writel(S5P_MXR_GRP_STARTX(src_offs_x) |
+ S5P_MXR_GRP_STARTY(src_offs_y),
+ mixer_base + S5P_MXR_GRAPHIC1_SXY);
+ break;
+
+ default:
+ tvout_err(" invalid layer parameter = %d\n", layer);
+ return -1;
+ }
+
+ return 0;
+}
+
+void s5p_mixer_set_bg_color(enum s5p_mixer_bg_color_num colornum,
+ u32 color_y, u32 color_cb, u32 color_cr)
+{
+ u32 reg_value;
+
+ reg_value = S5P_MXR_BG_COLOR_Y(color_y) |
+ S5P_MXR_BG_COLOR_CB(color_cb) |
+ S5P_MXR_BG_COLOR_CR(color_cr);
+
+ switch (colornum) {
+ case MIXER_BG_COLOR_0:
+ writel(reg_value, mixer_base + S5P_MXR_BG_COLOR0);
+ break;
+
+ case MIXER_BG_COLOR_1:
+ writel(reg_value, mixer_base + S5P_MXR_BG_COLOR1);
+ break;
+
+ case MIXER_BG_COLOR_2:
+ writel(reg_value, mixer_base + S5P_MXR_BG_COLOR2);
+ break;
+ }
+}
+void s5p_mixer_set_video_limiter(u32 y_min, u32 y_max,
+ u32 c_min, u32 c_max, bool enable)
+{
+ u32 reg_value;
+
+ reg_value = readl(mixer_base + S5P_MXR_VIDEO_CFG)
+ & (~S5P_MXR_VIDEO_CFG_LIMITER_EN) ;
+
+ if (enable)
+ reg_value |= S5P_MXR_VIDEO_CFG_LIMITER_EN;
+ else
+ reg_value |= S5P_MXR_VIDEO_CFG_LIMITER_DIS;
+
+ writel(reg_value, mixer_base + S5P_MXR_VIDEO_CFG);
+
+ reg_value = S5P_MXR_VIDEO_LIMITER_PARA_Y_UPPER(y_max) |
+ S5P_MXR_VIDEO_LIMITER_PARA_Y_LOWER(y_min) |
+ S5P_MXR_VIDEO_LIMITER_PARA_C_UPPER(c_max) |
+ S5P_MXR_VIDEO_LIMITER_PARA_C_LOWER(c_min);
+
+ writel(reg_value, mixer_base + S5P_MXR_VIDEO_LIMITER_PARA_CFG);
+
+}
+
+void s5p_mixer_init_status_reg(enum s5p_mixer_burst_mode burst,
+ enum s5p_tvout_endian endian)
+{
+ u32 temp_reg = 0;
+
+ temp_reg = S5P_MXR_STATUS_SYNC_ENABLE | S5P_MXR_STATUS_OPERATING;
+
+ switch (burst) {
+ case MIXER_BURST_8:
+ temp_reg |= S5P_MXR_STATUS_8_BURST;
+ break;
+ case MIXER_BURST_16:
+ temp_reg |= S5P_MXR_STATUS_16_BURST;
+ break;
+ }
+
+ switch (endian) {
+ case TVOUT_BIG_ENDIAN:
+ temp_reg |= S5P_MXR_STATUS_BIG_ENDIAN;
+ break;
+ case TVOUT_LITTLE_ENDIAN:
+ temp_reg |= S5P_MXR_STATUS_LITTLE_ENDIAN;
+ break;
+ }
+
+ writel(temp_reg, mixer_base + S5P_MXR_STATUS);
+}
+
+int s5p_mixer_init_display_mode(enum s5p_tvout_disp_mode mode,
+ enum s5p_tvout_o_mode output_mode,
+ enum s5p_mixer_rgb rgb_type)
+{
+ u32 temp_reg = readl(mixer_base + S5P_MXR_CFG);
+
+ tvout_dbg("%d, %d\n", mode, output_mode);
+
+ switch (mode) {
+ case TVOUT_NTSC_M:
+ case TVOUT_NTSC_443:
+ temp_reg &= ~S5P_MXR_CFG_HD;
+ temp_reg &= ~S5P_MXR_CFG_PAL;
+ temp_reg &= S5P_MXR_CFG_INTERLACE;
+ break;
+
+ case TVOUT_PAL_BDGHI:
+ case TVOUT_PAL_M:
+ case TVOUT_PAL_N:
+ case TVOUT_PAL_NC:
+ case TVOUT_PAL_60:
+ temp_reg &= ~S5P_MXR_CFG_HD;
+ temp_reg |= S5P_MXR_CFG_PAL;
+ temp_reg &= S5P_MXR_CFG_INTERLACE;
+ break;
+
+ case TVOUT_480P_60_16_9:
+ case TVOUT_480P_60_4_3:
+ case TVOUT_480P_59:
+ temp_reg &= ~S5P_MXR_CFG_HD;
+ temp_reg &= ~S5P_MXR_CFG_PAL;
+ temp_reg |= S5P_MXR_CFG_PROGRASSIVE;
+ break;
+
+ case TVOUT_576P_50_16_9:
+ case TVOUT_576P_50_4_3:
+ temp_reg &= ~S5P_MXR_CFG_HD;
+ temp_reg |= S5P_MXR_CFG_PAL;
+ temp_reg |= S5P_MXR_CFG_PROGRASSIVE;
+ break;
+
+ case TVOUT_720P_50:
+ case TVOUT_720P_59:
+ case TVOUT_720P_60:
+ temp_reg |= S5P_MXR_CFG_HD;
+ temp_reg &= ~S5P_MXR_CFG_HD_1080I;
+ temp_reg |= S5P_MXR_CFG_PROGRASSIVE;
+ break;
+
+#ifdef CONFIG_HDMI_14A_3D
+ case TVOUT_720P_60_SBS_HALF:
+ case TVOUT_720P_59_SBS_HALF:
+ case TVOUT_720P_50_TB:
+ temp_reg |= S5P_MXR_CFG_HD;
+ temp_reg &= ~S5P_MXR_CFG_HD_1080I;
+ temp_reg |= S5P_MXR_CFG_PROGRASSIVE;
+ break;
+#endif
+
+ case TVOUT_1080I_50:
+ case TVOUT_1080I_59:
+ case TVOUT_1080I_60:
+ temp_reg |= S5P_MXR_CFG_HD;
+ temp_reg |= S5P_MXR_CFG_HD_1080I;
+ temp_reg &= S5P_MXR_CFG_INTERLACE;
+ break;
+
+ case TVOUT_1080P_50:
+ case TVOUT_1080P_59:
+ case TVOUT_1080P_60:
+ case TVOUT_1080P_30:
+ temp_reg |= S5P_MXR_CFG_HD;
+ temp_reg |= S5P_MXR_CFG_HD_1080P;
+ temp_reg |= S5P_MXR_CFG_PROGRASSIVE;
+ break;
+
+#ifdef CONFIG_HDMI_14A_3D
+ case TVOUT_1080P_24_TB:
+ case TVOUT_1080P_23_TB:
+ temp_reg |= S5P_MXR_CFG_HD;
+ temp_reg |= S5P_MXR_CFG_HD_1080P;
+ temp_reg |= S5P_MXR_CFG_PROGRASSIVE;
+ break;
+#endif
+ default:
+ tvout_err("invalid mode parameter = %d\n", mode);
+ return -1;
+ }
+
+ switch (output_mode) {
+ case TVOUT_COMPOSITE:
+ temp_reg &= S5P_MXR_CFG_TV_OUT;
+ temp_reg &= ~(0x1<<8);
+ temp_reg |= MIXER_YUV444<<8;
+ break;
+
+ case TVOUT_HDMI_RGB:
+ case TVOUT_DVI:
+ temp_reg |= S5P_MXR_CFG_HDMI_OUT;
+ temp_reg &= ~(0x1<<8);
+ temp_reg |= MIXER_RGB888<<8;
+ break;
+
+ case TVOUT_HDMI:
+ temp_reg |= S5P_MXR_CFG_HDMI_OUT;
+ temp_reg &= ~(0x1<<8);
+ temp_reg |= MIXER_YUV444<<8;
+ break;
+
+ default:
+ tvout_err("invalid mode parameter = %d\n", mode);
+ return -1;
+ }
+
+ if (0 <= rgb_type && rgb_type <= 3)
+ temp_reg |= rgb_type<<9;
+ else
+ printk(KERN_INFO "Wrong rgb type!!\n");
+
+ tvout_dbg(KERN_INFO "Color range RGB Type : %x\n", rgb_type);
+ writel(temp_reg, mixer_base + S5P_MXR_CFG);
+
+ return 0;
+}
+
+void s5p_mixer_scaling(enum s5p_mixer_layer layer,
+ struct s5ptvfb_user_scaling scaling)
+{
+ u32 reg, ver_val = 0, hor_val = 0;
+
+ switch (scaling.ver) {
+ case VERTICAL_X1:
+ ver_val = 0;
+ break;
+ case VERTICAL_X2:
+ ver_val = 1;
+ break;
+ }
+
+ switch (scaling.hor) {
+ case HORIZONTAL_X1:
+ hor_val = 0;
+ break;
+ case HORIZONTAL_X2:
+ hor_val = 1;
+ break;
+ }
+
+ switch (layer) {
+ case MIXER_GPR0_LAYER:
+ reg = readl(mixer_base + S5P_MXR_GRAPHIC0_WH);
+ reg |= S5P_MXR_GRP_V_SCALE(ver_val);
+ reg |= S5P_MXR_GRP_H_SCALE(hor_val);
+ writel(reg, mixer_base + S5P_MXR_GRAPHIC0_WH);
+ break;
+ case MIXER_GPR1_LAYER:
+ reg = readl(mixer_base + S5P_MXR_GRAPHIC1_WH);
+ reg |= S5P_MXR_GRP_V_SCALE(ver_val);
+ reg |= S5P_MXR_GRP_H_SCALE(hor_val);
+ writel(reg, mixer_base + S5P_MXR_GRAPHIC1_WH);
+ break;
+ case MIXER_VIDEO_LAYER:
+ break;
+ }
+}
+
+void s5p_mixer_set_color_format(enum s5p_mixer_layer layer,
+ enum s5p_mixer_color_fmt format)
+{
+ u32 reg;
+
+ switch (layer) {
+ case MIXER_GPR0_LAYER:
+ reg = readl(mixer_base + S5P_MXR_GRAPHIC0_CFG);
+ reg &= ~(S5P_MXR_EG_COLOR_FORMAT(0xf));
+ reg |= S5P_MXR_EG_COLOR_FORMAT(format);
+ writel(reg, mixer_base + S5P_MXR_GRAPHIC0_CFG);
+ break;
+ case MIXER_GPR1_LAYER:
+ reg = readl(mixer_base + S5P_MXR_GRAPHIC1_CFG);
+ reg &= ~(S5P_MXR_EG_COLOR_FORMAT(0xf));
+ reg |= S5P_MXR_EG_COLOR_FORMAT(format);
+ writel(reg, mixer_base + S5P_MXR_GRAPHIC1_CFG);
+ break;
+ case MIXER_VIDEO_LAYER:
+ break;
+ }
+}
+
+void s5p_mixer_set_chroma_key(enum s5p_mixer_layer layer, bool enabled, u32 key)
+{
+ u32 reg;
+
+ switch (layer) {
+ case MIXER_GPR0_LAYER:
+ reg = readl(mixer_base + S5P_MXR_GRAPHIC0_CFG);
+
+ if (enabled)
+ reg &= ~S5P_MXR_BLANK_CHANGE_NEW_PIXEL;
+ else
+ reg |= S5P_MXR_BLANK_CHANGE_NEW_PIXEL;
+
+ writel(reg, mixer_base + S5P_MXR_GRAPHIC0_CFG);
+ writel(S5P_MXR_GPR_BLANK_COLOR(key),
+ mixer_base + S5P_MXR_GRAPHIC0_BLANK);
+ break;
+ case MIXER_GPR1_LAYER:
+ reg = readl(mixer_base + S5P_MXR_GRAPHIC1_CFG);
+
+ if (enabled)
+ reg &= ~S5P_MXR_BLANK_CHANGE_NEW_PIXEL;
+ else
+ reg |= S5P_MXR_BLANK_CHANGE_NEW_PIXEL;
+
+ writel(reg, mixer_base + S5P_MXR_GRAPHIC1_CFG);
+ writel(S5P_MXR_GPR_BLANK_COLOR(key),
+ mixer_base + S5P_MXR_GRAPHIC1_BLANK);
+ break;
+ case MIXER_VIDEO_LAYER:
+ break;
+ }
+}
+
+void s5p_mixer_init_bg_dither_enable(bool cr_dither_enable,
+ bool cb_dither_enable,
+ bool y_dither_enable)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d, %d\n", cr_dither_enable, cb_dither_enable,
+ y_dither_enable);
+
+ temp_reg = (cr_dither_enable) ?
+ (temp_reg | S5P_MXR_BG_CR_DIHER_EN) :
+ (temp_reg & ~S5P_MXR_BG_CR_DIHER_EN);
+ temp_reg = (cb_dither_enable) ?
+ (temp_reg | S5P_MXR_BG_CB_DIHER_EN) :
+ (temp_reg & ~S5P_MXR_BG_CB_DIHER_EN);
+ temp_reg = (y_dither_enable) ?
+ (temp_reg | S5P_MXR_BG_Y_DIHER_EN) :
+ (temp_reg & ~S5P_MXR_BG_Y_DIHER_EN);
+
+ writel(temp_reg, mixer_base + S5P_MXR_BG_CFG);
+
+}
+
+void s5p_mixer_init_csc_coef_default(enum s5p_mixer_rgb csc_type)
+{
+ tvout_dbg("%d\n", csc_type);
+
+ switch (csc_type) {
+ case MIXER_RGB601_16_235:
+ writel((0 << 30) | (153 << 20) | (300 << 10) | (58 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_Y);
+ writel((936 << 20) | (851 << 10) | (262 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_CB);
+ writel((262 << 20) | (805 << 10) | (982 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_CR);
+ break;
+
+ case MIXER_RGB601_0_255:
+ writel((1 << 30) | (132 << 20) | (258 << 10) | (50 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_Y);
+ writel((949 << 20) | (876 << 10) | (225 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_CB);
+ writel((225 << 20) | (836 << 10) | (988 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_CR);
+ break;
+
+ case MIXER_RGB709_16_235:
+ writel((0 << 30) | (109 << 20) | (366 << 10) | (36 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_Y);
+ writel((964 << 20) | (822 << 10) | (216 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_CB);
+ writel((262 << 20) | (787 << 10) | (1000 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_CR);
+ break;
+
+ case MIXER_RGB709_0_255:
+ writel((1 << 30) | (94 << 20) | (314 << 10) | (32 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_Y);
+ writel((972 << 20) | (851 << 10) | (225 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_CB);
+ writel((225 << 20) | (820 << 10) | (1004 << 0),
+ mixer_base + S5P_MXR_CM_COEFF_CR);
+ break;
+
+ default:
+ tvout_err("invalid csc_type parameter = %d\n", csc_type);
+ break;
+ }
+}
+
+void s5p_mixer_start(void)
+{
+ writel((readl(mixer_base + S5P_MXR_STATUS) | S5P_MXR_STATUS_RUN),
+ mixer_base + S5P_MXR_STATUS);
+}
+
+void s5p_mixer_stop(void)
+{
+ u32 reg = readl(mixer_base + S5P_MXR_STATUS);
+
+ reg &= ~S5P_MXR_STATUS_RUN;
+
+ writel(reg, mixer_base + S5P_MXR_STATUS);
+
+ do {
+ reg = readl(mixer_base + S5P_MXR_STATUS);
+ } while (!(reg & S5P_MXR_STATUS_IDLE_MODE));
+}
+
+void s5p_mixer_set_underflow_int_enable(enum s5p_mixer_layer layer, bool en)
+{
+ u32 enable_mask = 0;
+
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ enable_mask = S5P_MXR_INT_EN_VP_ENABLE;
+ break;
+
+ case MIXER_GPR0_LAYER:
+ enable_mask = S5P_MXR_INT_EN_GRP0_ENABLE;
+ break;
+
+ case MIXER_GPR1_LAYER:
+ enable_mask = S5P_MXR_INT_EN_GRP1_ENABLE;
+ break;
+ }
+
+ if (en) {
+ writel((readl(mixer_base + S5P_MXR_INT_EN) | enable_mask),
+ mixer_base + S5P_MXR_INT_EN);
+ } else {
+ writel((readl(mixer_base + S5P_MXR_INT_EN) & ~enable_mask),
+ mixer_base + S5P_MXR_INT_EN);
+ }
+}
+
+void s5p_mixer_set_vsync_interrupt(bool en)
+{
+ if (en) {
+ writel(S5P_MXR_INT_STATUS_VSYNC_CLEARED, mixer_base +
+ S5P_MXR_INT_STATUS);
+ writel((readl(mixer_base + S5P_MXR_INT_EN) |
+ S5P_MXR_INT_EN_VSYNC_ENABLE),
+ mixer_base + S5P_MXR_INT_EN);
+ } else {
+ writel((readl(mixer_base + S5P_MXR_INT_EN) &
+ ~S5P_MXR_INT_EN_VSYNC_ENABLE),
+ mixer_base + S5P_MXR_INT_EN);
+ }
+
+ tvout_dbg("%s mixer VSYNC interrupt.\n", en? "Enable": "Disable");
+}
+
+void s5p_mixer_clear_pend_all(void)
+{
+ writel(S5P_MXR_INT_STATUS_INT_FIRED | S5P_MXR_INT_STATUS_VP_FIRED |
+ S5P_MXR_INT_STATUS_GRP0_FIRED | S5P_MXR_INT_STATUS_GRP1_FIRED,
+ mixer_base + S5P_MXR_INT_STATUS);
+}
+
+irqreturn_t s5p_mixer_irq(int irq, void *dev_id)
+{
+ bool v_i_f;
+ bool g0_i_f;
+ bool g1_i_f;
+ bool mxr_i_f;
+ u32 temp_reg = 0;
+ unsigned long spin_flags;
+ u32 top_y_addr, top_c_addr;
+ int i = 0;
+ unsigned int pre_vp_buff_idx;
+
+ spin_lock_irqsave(&lock_mixer, spin_flags);
+
+ v_i_f = (readl(mixer_base + S5P_MXR_INT_STATUS)
+ & S5P_MXR_INT_STATUS_VP_FIRED) ? true : false;
+ g0_i_f = (readl(mixer_base + S5P_MXR_INT_STATUS)
+ & S5P_MXR_INT_STATUS_GRP0_FIRED) ? true : false;
+ g1_i_f = (readl(mixer_base + S5P_MXR_INT_STATUS)
+ & S5P_MXR_INT_STATUS_GRP1_FIRED) ? true : false;
+ mxr_i_f = (readl(mixer_base + S5P_MXR_INT_STATUS)
+ & S5P_MXR_INT_STATUS_INT_FIRED) ? true : false;
+
+ if (mxr_i_f) {
+ temp_reg |= S5P_MXR_INT_STATUS_INT_FIRED;
+
+ if (v_i_f) {
+ temp_reg |= S5P_MXR_INT_STATUS_VP_FIRED;
+ tvout_dbg("VP fifo under run!!\n");
+ }
+
+ if (g0_i_f) {
+ temp_reg |= S5P_MXR_INT_STATUS_GRP0_FIRED;
+ tvout_dbg("GRP0 fifo under run!!\n");
+ }
+
+ if (g1_i_f) {
+ temp_reg |= S5P_MXR_INT_STATUS_GRP1_FIRED;
+ tvout_dbg("GRP1 fifo under run!!\n");
+ }
+
+ if (!v_i_f && !g0_i_f && !g1_i_f) {
+ writel(S5P_MXR_INT_STATUS_VSYNC_CLEARED,
+ mixer_base + S5P_MXR_INT_STATUS);
+ s5p_vp_ctrl_get_src_addr(&top_y_addr, &top_c_addr);
+
+ pre_vp_buff_idx = s5ptv_vp_buff.vp_access_buff_idx;
+ for (i = 0; i < S5PTV_VP_BUFF_CNT; i++) {
+ if (top_y_addr == s5ptv_vp_buff.vp_buffs[i].phy_base) {
+ s5ptv_vp_buff.vp_access_buff_idx = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < S5PTV_VP_BUFF_CNT - 1; i++) {
+ if (s5ptv_vp_buff.copy_buff_idxs[i]
+ == s5ptv_vp_buff.vp_access_buff_idx) {
+ s5ptv_vp_buff.copy_buff_idxs[i] = pre_vp_buff_idx;
+ break;
+ }
+ }
+ wake_up(&s5ptv_wq);
+ } else {
+ writel(temp_reg, mixer_base + S5P_MXR_INT_STATUS);
+ }
+ }
+ spin_unlock_irqrestore(&lock_mixer, spin_flags);
+
+ return IRQ_HANDLED;
+}
+
+void s5p_mixer_init(void __iomem *addr)
+{
+ mixer_base = addr;
+
+ spin_lock_init(&lock_mixer);
+}
diff --git a/drivers/media/video/samsung/tvout/hw_if/sdo.c b/drivers/media/video/samsung/tvout/hw_if/sdo.c
new file mode 100644
index 0000000..8d6b661
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/hw_if/sdo.c
@@ -0,0 +1,1122 @@
+/* linux/drivers/media/video/samsung/tvout/hw_if/sdo.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Hardware interface functions for SDO (Standard Definition Output)
+ * - SDO: Analog TV encoder + DAC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <mach/regs-clock.h>
+#include <mach/regs-sdo.h>
+
+#include "../s5p_tvout_common_lib.h"
+#include "hw_if.h"
+
+#undef tvout_dbg
+
+#ifdef CONFIG_SDO_DEBUG
+#define tvout_dbg(fmt, ...) \
+ printk(KERN_INFO "\t\t[SDO] %s(): " fmt, \
+ __func__, ##__VA_ARGS__)
+#else
+#define tvout_dbg(fmt, ...)
+#endif
+
+void __iomem *sdo_base;
+
+static u32 s5p_sdo_calc_wss_cgms_crc(u32 value)
+{
+ u8 i;
+ u8 cgms[14], crc[6], old_crc;
+ u32 temp_in;
+
+ temp_in = value;
+
+ for (i = 0; i < 14; i++)
+ cgms[i] = (u8)(temp_in >> i) & 0x1;
+
+ /* initialize state */
+ for (i = 0; i < 6; i++)
+ crc[i] = 0x1;
+
+ /* round 20 */
+ for (i = 0; i < 14; i++) {
+ old_crc = crc[0];
+ crc[0] = crc[1];
+ crc[1] = crc[2];
+ crc[2] = crc[3];
+ crc[3] = crc[4];
+ crc[4] = old_crc ^ cgms[i] ^ crc[5];
+ crc[5] = old_crc ^ cgms[i];
+ }
+
+ /* recompose to return crc */
+ temp_in &= 0x3fff;
+
+ for (i = 0; i < 6; i++)
+ temp_in |= ((u32)(crc[i] & 0x1) << i);
+
+ return temp_in;
+}
+
+static int s5p_sdo_set_antialias_filter_coeff_default(
+ enum s5p_sdo_level composite_level,
+ enum s5p_sdo_vsync_ratio composite_ratio)
+{
+ tvout_dbg("%d, %d\n", composite_level, composite_ratio);
+
+ switch (composite_level) {
+ case SDO_LEVEL_0IRE:
+ switch (composite_ratio) {
+ case SDO_VTOS_RATIO_10_4:
+ writel(0x00000000, sdo_base + S5P_SDO_Y3);
+ writel(0x00000000, sdo_base + S5P_SDO_Y4);
+ writel(0x00000000, sdo_base + S5P_SDO_Y5);
+ writel(0x00000000, sdo_base + S5P_SDO_Y6);
+ writel(0x00000000, sdo_base + S5P_SDO_Y7);
+ writel(0x00000000, sdo_base + S5P_SDO_Y8);
+ writel(0x00000000, sdo_base + S5P_SDO_Y9);
+ writel(0x00000000, sdo_base + S5P_SDO_Y10);
+ writel(0x0000029a, sdo_base + S5P_SDO_Y11);
+ writel(0x00000000, sdo_base + S5P_SDO_CB0);
+ writel(0x00000000, sdo_base + S5P_SDO_CB1);
+ writel(0x00000000, sdo_base + S5P_SDO_CB2);
+ writel(0x00000000, sdo_base + S5P_SDO_CB3);
+ writel(0x00000000, sdo_base + S5P_SDO_CB4);
+ writel(0x00000001, sdo_base + S5P_SDO_CB5);
+ writel(0x00000007, sdo_base + S5P_SDO_CB6);
+ writel(0x00000015, sdo_base + S5P_SDO_CB7);
+ writel(0x0000002b, sdo_base + S5P_SDO_CB8);
+ writel(0x00000045, sdo_base + S5P_SDO_CB9);
+ writel(0x00000059, sdo_base + S5P_SDO_CB10);
+ writel(0x00000061, sdo_base + S5P_SDO_CB11);
+ writel(0x00000000, sdo_base + S5P_SDO_CR1);
+ writel(0x00000000, sdo_base + S5P_SDO_CR2);
+ writel(0x00000000, sdo_base + S5P_SDO_CR3);
+ writel(0x00000000, sdo_base + S5P_SDO_CR4);
+ writel(0x00000002, sdo_base + S5P_SDO_CR5);
+ writel(0x0000000a, sdo_base + S5P_SDO_CR6);
+ writel(0x0000001e, sdo_base + S5P_SDO_CR7);
+ writel(0x0000003d, sdo_base + S5P_SDO_CR8);
+ writel(0x00000061, sdo_base + S5P_SDO_CR9);
+ writel(0x0000007a, sdo_base + S5P_SDO_CR10);
+ writel(0x0000008f, sdo_base + S5P_SDO_CR11);
+ break;
+
+ case SDO_VTOS_RATIO_7_3:
+ writel(0x00000000, sdo_base + S5P_SDO_Y0);
+ writel(0x00000000, sdo_base + S5P_SDO_Y1);
+ writel(0x00000000, sdo_base + S5P_SDO_Y2);
+ writel(0x00000000, sdo_base + S5P_SDO_Y3);
+ writel(0x00000000, sdo_base + S5P_SDO_Y4);
+ writel(0x00000000, sdo_base + S5P_SDO_Y5);
+ writel(0x00000000, sdo_base + S5P_SDO_Y6);
+ writel(0x00000000, sdo_base + S5P_SDO_Y7);
+ writel(0x00000000, sdo_base + S5P_SDO_Y8);
+ writel(0x00000000, sdo_base + S5P_SDO_Y9);
+ writel(0x00000000, sdo_base + S5P_SDO_Y10);
+ writel(0x00000281, sdo_base + S5P_SDO_Y11);
+ writel(0x00000000, sdo_base + S5P_SDO_CB0);
+ writel(0x00000000, sdo_base + S5P_SDO_CB1);
+ writel(0x00000000, sdo_base + S5P_SDO_CB2);
+ writel(0x00000000, sdo_base + S5P_SDO_CB3);
+ writel(0x00000000, sdo_base + S5P_SDO_CB4);
+ writel(0x00000001, sdo_base + S5P_SDO_CB5);
+ writel(0x00000007, sdo_base + S5P_SDO_CB6);
+ writel(0x00000015, sdo_base + S5P_SDO_CB7);
+ writel(0x0000002a, sdo_base + S5P_SDO_CB8);
+ writel(0x00000044, sdo_base + S5P_SDO_CB9);
+ writel(0x00000057, sdo_base + S5P_SDO_CB10);
+ writel(0x0000005f, sdo_base + S5P_SDO_CB11);
+ writel(0x00000000, sdo_base + S5P_SDO_CR1);
+ writel(0x00000000, sdo_base + S5P_SDO_CR2);
+ writel(0x00000000, sdo_base + S5P_SDO_CR3);
+ writel(0x00000000, sdo_base + S5P_SDO_CR4);
+ writel(0x00000002, sdo_base + S5P_SDO_CR5);
+ writel(0x0000000a, sdo_base + S5P_SDO_CR6);
+ writel(0x0000001d, sdo_base + S5P_SDO_CR7);
+ writel(0x0000003c, sdo_base + S5P_SDO_CR8);
+ writel(0x0000005f, sdo_base + S5P_SDO_CR9);
+ writel(0x0000007b, sdo_base + S5P_SDO_CR10);
+ writel(0x00000086, sdo_base + S5P_SDO_CR11);
+ break;
+
+ default:
+ tvout_err("invalid composite_ratio parameter(%d)\n",
+ composite_ratio);
+ return -1;
+ }
+
+ break;
+
+ case SDO_LEVEL_75IRE:
+ switch (composite_ratio) {
+ case SDO_VTOS_RATIO_10_4:
+ writel(0x00000000, sdo_base + S5P_SDO_Y0);
+ writel(0x00000000, sdo_base + S5P_SDO_Y1);
+ writel(0x00000000, sdo_base + S5P_SDO_Y2);
+ writel(0x00000000, sdo_base + S5P_SDO_Y3);
+ writel(0x00000000, sdo_base + S5P_SDO_Y4);
+ writel(0x00000000, sdo_base + S5P_SDO_Y5);
+ writel(0x00000000, sdo_base + S5P_SDO_Y6);
+ writel(0x00000000, sdo_base + S5P_SDO_Y7);
+ writel(0x00000000, sdo_base + S5P_SDO_Y8);
+ writel(0x00000000, sdo_base + S5P_SDO_Y9);
+ writel(0x00000000, sdo_base + S5P_SDO_Y10);
+ writel(0x0000025d, sdo_base + S5P_SDO_Y11);
+ writel(0x00000000, sdo_base + S5P_SDO_CB0);
+ writel(0x00000000, sdo_base + S5P_SDO_CB1);
+ writel(0x00000000, sdo_base + S5P_SDO_CB2);
+ writel(0x00000000, sdo_base + S5P_SDO_CB3);
+ writel(0x00000000, sdo_base + S5P_SDO_CB4);
+ writel(0x00000001, sdo_base + S5P_SDO_CB5);
+ writel(0x00000007, sdo_base + S5P_SDO_CB6);
+ writel(0x00000014, sdo_base + S5P_SDO_CB7);
+ writel(0x00000028, sdo_base + S5P_SDO_CB8);
+ writel(0x0000003f, sdo_base + S5P_SDO_CB9);
+ writel(0x00000052, sdo_base + S5P_SDO_CB10);
+ writel(0x0000005a, sdo_base + S5P_SDO_CB11);
+ writel(0x00000000, sdo_base + S5P_SDO_CR1);
+ writel(0x00000000, sdo_base + S5P_SDO_CR2);
+ writel(0x00000000, sdo_base + S5P_SDO_CR3);
+ writel(0x00000000, sdo_base + S5P_SDO_CR4);
+ writel(0x00000001, sdo_base + S5P_SDO_CR5);
+ writel(0x00000009, sdo_base + S5P_SDO_CR6);
+ writel(0x0000001c, sdo_base + S5P_SDO_CR7);
+ writel(0x00000039, sdo_base + S5P_SDO_CR8);
+ writel(0x0000005a, sdo_base + S5P_SDO_CR9);
+ writel(0x00000074, sdo_base + S5P_SDO_CR10);
+ writel(0x0000007e, sdo_base + S5P_SDO_CR11);
+ break;
+
+ case SDO_VTOS_RATIO_7_3:
+ writel(0x00000000, sdo_base + S5P_SDO_Y0);
+ writel(0x00000000, sdo_base + S5P_SDO_Y1);
+ writel(0x00000000, sdo_base + S5P_SDO_Y2);
+ writel(0x00000000, sdo_base + S5P_SDO_Y3);
+ writel(0x00000000, sdo_base + S5P_SDO_Y4);
+ writel(0x00000000, sdo_base + S5P_SDO_Y5);
+ writel(0x00000000, sdo_base + S5P_SDO_Y6);
+ writel(0x00000000, sdo_base + S5P_SDO_Y7);
+ writel(0x00000000, sdo_base + S5P_SDO_Y8);
+ writel(0x00000000, sdo_base + S5P_SDO_Y9);
+ writel(0x00000000, sdo_base + S5P_SDO_Y10);
+ writel(0x00000251, sdo_base + S5P_SDO_Y11);
+ writel(0x00000000, sdo_base + S5P_SDO_CB0);
+ writel(0x00000000, sdo_base + S5P_SDO_CB1);
+ writel(0x00000000, sdo_base + S5P_SDO_CB2);
+ writel(0x00000000, sdo_base + S5P_SDO_CB3);
+ writel(0x00000000, sdo_base + S5P_SDO_CB4);
+ writel(0x00000001, sdo_base + S5P_SDO_CB5);
+ writel(0x00000006, sdo_base + S5P_SDO_CB6);
+ writel(0x00000013, sdo_base + S5P_SDO_CB7);
+ writel(0x00000028, sdo_base + S5P_SDO_CB8);
+ writel(0x0000003f, sdo_base + S5P_SDO_CB9);
+ writel(0x00000051, sdo_base + S5P_SDO_CB10);
+ writel(0x00000056, sdo_base + S5P_SDO_CB11);
+ writel(0x00000000, sdo_base + S5P_SDO_CR1);
+ writel(0x00000000, sdo_base + S5P_SDO_CR2);
+ writel(0x00000000, sdo_base + S5P_SDO_CR3);
+ writel(0x00000000, sdo_base + S5P_SDO_CR4);
+ writel(0x00000002, sdo_base + S5P_SDO_CR5);
+ writel(0x00000005, sdo_base + S5P_SDO_CR6);
+ writel(0x00000018, sdo_base + S5P_SDO_CR7);
+ writel(0x00000037, sdo_base + S5P_SDO_CR8);
+ writel(0x0000005A, sdo_base + S5P_SDO_CR9);
+ writel(0x00000076, sdo_base + S5P_SDO_CR10);
+ writel(0x0000007e, sdo_base + S5P_SDO_CR11);
+ break;
+
+ default:
+ tvout_err("invalid composite_ratio parameter(%d)\n",
+ composite_ratio);
+ return -1;
+ }
+
+ break;
+
+ default:
+ tvout_err("invalid composite_level parameter(%d)\n",
+ composite_level);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int s5p_sdo_set_video_scale_cfg(
+ enum s5p_sdo_level composite_level,
+ enum s5p_sdo_vsync_ratio composite_ratio)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d\n", composite_level, composite_ratio);
+
+ switch (composite_level) {
+ case SDO_LEVEL_0IRE:
+ temp_reg |= S5P_SDO_COMPOSITE_LEVEL_SEL_0IRE;
+ break;
+
+ case SDO_LEVEL_75IRE:
+ temp_reg |= S5P_SDO_COMPOSITE_LEVEL_SEL_75IRE;
+ break;
+
+ default:
+ tvout_err("invalid composite_level parameter(%d)\n",
+ composite_ratio);
+ return -1;
+ }
+
+ switch (composite_ratio) {
+ case SDO_VTOS_RATIO_10_4:
+ temp_reg |= S5P_SDO_COMPOSITE_VTOS_RATIO_10_4;
+ break;
+
+ case SDO_VTOS_RATIO_7_3:
+ temp_reg |= S5P_SDO_COMPOSITE_VTOS_RATIO_7_3;
+ break;
+
+ default:
+ tvout_err("invalid composite_ratio parameter(%d)\n",
+ composite_ratio);
+ return -1;
+ }
+
+ writel(temp_reg, sdo_base + S5P_SDO_SCALE);
+
+ return 0;
+}
+
+int s5p_sdo_set_vbi(
+ bool wss_cvbs, enum s5p_sdo_closed_caption_type caption_cvbs)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d\n", wss_cvbs, caption_cvbs);
+
+ if (wss_cvbs)
+ temp_reg = S5P_SDO_CVBS_WSS_INS;
+ else
+ temp_reg = S5P_SDO_CVBS_NO_WSS;
+
+ switch (caption_cvbs) {
+ case SDO_NO_INS:
+ temp_reg |= S5P_SDO_CVBS_NO_CLOSED_CAPTION;
+ break;
+
+ case SDO_INS_1:
+ temp_reg |= S5P_SDO_CVBS_21H_CLOSED_CAPTION;
+ break;
+
+ case SDO_INS_2:
+ temp_reg |= S5P_SDO_CVBS_21H_284H_CLOSED_CAPTION;
+ break;
+
+ case SDO_INS_OTHERS:
+ temp_reg |= S5P_SDO_CVBS_USE_OTHERS;
+ break;
+
+ default:
+ tvout_err("invalid caption_cvbs parameter(%d)\n",
+ caption_cvbs);
+ return -1;
+ }
+
+
+ writel(temp_reg, sdo_base + S5P_SDO_VBI);
+
+ return 0;
+}
+
+void s5p_sdo_set_offset_gain(u32 offset, u32 gain)
+{
+ tvout_dbg("%d, %d\n", offset, gain);
+
+ writel(S5P_SDO_SCALE_CONV_OFFSET(offset) |
+ S5P_SDO_SCALE_CONV_GAIN(gain),
+ sdo_base + S5P_SDO_SCALE_CH0);
+}
+
+void s5p_sdo_set_delay(
+ u32 delay_y, u32 offset_video_start, u32 offset_video_end)
+{
+ tvout_dbg("%d, %d, %d\n", delay_y, offset_video_start,
+ offset_video_end);
+
+ writel(S5P_SDO_DELAY_YTOC(delay_y) |
+ S5P_SDO_ACTIVE_START_OFFSET(offset_video_start) |
+ S5P_SDO_ACTIVE_END_OFFSET(offset_video_end),
+ sdo_base + S5P_SDO_YCDELAY);
+}
+
+void s5p_sdo_set_schlock(bool color_sucarrier_pha_adj)
+{
+ tvout_dbg("%d\n", color_sucarrier_pha_adj);
+
+ if (color_sucarrier_pha_adj)
+ writel(S5P_SDO_COLOR_SC_PHASE_ADJ,
+ sdo_base + S5P_SDO_SCHLOCK);
+ else
+ writel(S5P_SDO_COLOR_SC_PHASE_NOADJ,
+ sdo_base + S5P_SDO_SCHLOCK);
+}
+
+void s5p_sdo_set_brightness_hue_saturation(
+ struct s5p_sdo_bright_hue_saturation bri_hue_sat)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d, %d, %d, %d, %d, %d, %d, %d\n",
+ bri_hue_sat.bright_hue_sat_adj, bri_hue_sat.gain_brightness,
+ bri_hue_sat.offset_brightness, bri_hue_sat.gain0_cb_hue_sat,
+ bri_hue_sat.gain1_cb_hue_sat, bri_hue_sat.gain0_cr_hue_sat,
+ bri_hue_sat.gain1_cr_hue_sat, bri_hue_sat.offset_cb_hue_sat,
+ bri_hue_sat.offset_cr_hue_sat);
+
+ temp_reg = readl(sdo_base + S5P_SDO_CCCON);
+
+ if (bri_hue_sat.bright_hue_sat_adj)
+ temp_reg &= ~S5P_SDO_COMPENSATION_BHS_ADJ_OFF;
+ else
+ temp_reg |= S5P_SDO_COMPENSATION_BHS_ADJ_OFF;
+
+ writel(temp_reg, sdo_base + S5P_SDO_CCCON);
+
+
+ writel(S5P_SDO_BRIGHTNESS_GAIN(bri_hue_sat.gain_brightness) |
+ S5P_SDO_BRIGHTNESS_OFFSET(bri_hue_sat.offset_brightness),
+ sdo_base + S5P_SDO_YSCALE);
+
+ writel(S5P_SDO_HS_CB_GAIN0(bri_hue_sat.gain0_cb_hue_sat) |
+ S5P_SDO_HS_CB_GAIN1(bri_hue_sat.gain1_cb_hue_sat),
+ sdo_base + S5P_SDO_CBSCALE);
+
+ writel(S5P_SDO_HS_CR_GAIN0(bri_hue_sat.gain0_cr_hue_sat) |
+ S5P_SDO_HS_CR_GAIN1(bri_hue_sat.gain1_cr_hue_sat),
+ sdo_base + S5P_SDO_CRSCALE);
+
+ writel(S5P_SDO_HS_CR_OFFSET(bri_hue_sat.offset_cr_hue_sat) |
+ S5P_SDO_HS_CB_OFFSET(bri_hue_sat.offset_cb_hue_sat),
+ sdo_base + S5P_SDO_CB_CR_OFFSET);
+}
+
+void s5p_sdo_set_cvbs_color_compensation(
+ struct s5p_sdo_cvbs_compensation cvbs_comp)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d, %d, %d, %d, %d\n",
+ cvbs_comp.cvbs_color_compen, cvbs_comp.y_lower_mid,
+ cvbs_comp.y_bottom, cvbs_comp.y_top,
+ cvbs_comp.y_upper_mid, cvbs_comp.radius);
+
+ temp_reg = readl(sdo_base + S5P_SDO_CCCON);
+
+ if (cvbs_comp.cvbs_color_compen)
+ temp_reg &= ~S5P_SDO_COMPENSATION_CVBS_COMP_OFF;
+ else
+ temp_reg |= S5P_SDO_COMPENSATION_CVBS_COMP_OFF;
+
+ writel(temp_reg, sdo_base + S5P_SDO_CCCON);
+
+
+ writel(S5P_SDO_Y_LOWER_MID_CVBS_CORN(cvbs_comp.y_lower_mid) |
+ S5P_SDO_Y_BOTTOM_CVBS_CORN(cvbs_comp.y_bottom),
+ sdo_base + S5P_SDO_CVBS_CC_Y1);
+
+ writel(S5P_SDO_Y_TOP_CVBS_CORN(cvbs_comp.y_top) |
+ S5P_SDO_Y_UPPER_MID_CVBS_CORN(cvbs_comp.y_upper_mid),
+ sdo_base + S5P_SDO_CVBS_CC_Y2);
+
+ writel(S5P_SDO_RADIUS_CVBS_CORN(cvbs_comp.radius),
+ sdo_base + S5P_SDO_CVBS_CC_C);
+}
+
+void s5p_sdo_set_component_porch(
+ u32 back_525, u32 front_525, u32 back_625, u32 front_625)
+{
+ tvout_dbg("%d, %d, %d, %d\n",
+ back_525, front_525, back_625, front_625);
+
+ writel(S5P_SDO_COMPONENT_525_BP(back_525) |
+ S5P_SDO_COMPONENT_525_FP(front_525),
+ sdo_base + S5P_SDO_CSC_525_PORCH);
+ writel(S5P_SDO_COMPONENT_625_BP(back_625) |
+ S5P_SDO_COMPONENT_625_FP(front_625),
+ sdo_base + S5P_SDO_CSC_625_PORCH);
+}
+
+void s5p_sdo_set_ch_xtalk_cancel_coef(u32 coeff2, u32 coeff1)
+{
+ tvout_dbg("%d, %d\n", coeff2, coeff1);
+
+ writel(S5P_SDO_XTALK_COEF02(coeff2) |
+ S5P_SDO_XTALK_COEF01(coeff1),
+ sdo_base + S5P_SDO_XTALK0);
+}
+
+void s5p_sdo_set_closed_caption(u32 display_cc, u32 non_display_cc)
+{
+ tvout_dbg("%d, %d\n", display_cc, non_display_cc);
+
+ writel(S5P_SDO_DISPLAY_CC_CAPTION(display_cc) |
+ S5P_SDO_NON_DISPLAY_CC_CAPTION(non_display_cc),
+ sdo_base + S5P_SDO_ARMCC);
+}
+
+int s5p_sdo_set_wss525_data(struct s5p_sdo_525_data wss525)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d, %d, %d\n",
+ wss525.copy_permit, wss525.mv_psp,
+ wss525.copy_info, wss525.display_ratio);
+
+ switch (wss525.copy_permit) {
+ case SDO_525_COPY_PERMIT:
+ temp_reg = S5P_SDO_WORD2_WSS525_COPY_PERMIT;
+ break;
+
+ case SDO_525_ONECOPY_PERMIT:
+ temp_reg = S5P_SDO_WORD2_WSS525_ONECOPY_PERMIT;
+ break;
+
+ case SDO_525_NOCOPY_PERMIT:
+ temp_reg = S5P_SDO_WORD2_WSS525_NOCOPY_PERMIT;
+ break;
+
+ default:
+ tvout_err("invalid copy_permit parameter(%d)\n",
+ wss525.copy_permit);
+ return -1;
+ }
+
+ switch (wss525.mv_psp) {
+ case SDO_525_MV_PSP_OFF:
+ temp_reg |= S5P_SDO_WORD2_WSS525_MV_PSP_OFF;
+ break;
+
+ case SDO_525_MV_PSP_ON_2LINE_BURST:
+ temp_reg |= S5P_SDO_WORD2_WSS525_MV_PSP_ON_2LINE_BURST;
+ break;
+
+ case SDO_525_MV_PSP_ON_BURST_OFF:
+ temp_reg |= S5P_SDO_WORD2_WSS525_MV_PSP_ON_BURST_OFF;
+ break;
+
+ case SDO_525_MV_PSP_ON_4LINE_BURST:
+ temp_reg |= S5P_SDO_WORD2_WSS525_MV_PSP_ON_4LINE_BURST;
+ break;
+
+ default:
+ tvout_err("invalid mv_psp parameter(%d)\n", wss525.mv_psp);
+ return -1;
+ }
+
+ switch (wss525.copy_info) {
+ case SDO_525_COPY_INFO:
+ temp_reg |= S5P_SDO_WORD1_WSS525_COPY_INFO;
+ break;
+
+ case SDO_525_DEFAULT:
+ temp_reg |= S5P_SDO_WORD1_WSS525_DEFAULT;
+ break;
+
+ default:
+ tvout_err("invalid copy_info parameter(%d)\n",
+ wss525.copy_info);
+ return -1;
+ }
+
+ if (wss525.analog_on)
+ temp_reg |= S5P_SDO_WORD2_WSS525_ANALOG_ON;
+ else
+ temp_reg |= S5P_SDO_WORD2_WSS525_ANALOG_OFF;
+
+ switch (wss525.display_ratio) {
+ case SDO_525_COPY_PERMIT:
+ temp_reg |= S5P_SDO_WORD0_WSS525_4_3_NORMAL;
+ break;
+
+ case SDO_525_ONECOPY_PERMIT:
+ temp_reg |= S5P_SDO_WORD0_WSS525_16_9_ANAMORPIC;
+ break;
+
+ case SDO_525_NOCOPY_PERMIT:
+ temp_reg |= S5P_SDO_WORD0_WSS525_4_3_LETTERBOX;
+ break;
+
+ default:
+ tvout_err("invalid display_ratio parameter(%d)\n",
+ wss525.display_ratio);
+ return -1;
+ }
+
+ writel(temp_reg |
+ S5P_SDO_CRC_WSS525(s5p_sdo_calc_wss_cgms_crc(temp_reg)),
+ sdo_base + S5P_SDO_WSS525);
+
+ return 0;
+}
+
+int s5p_sdo_set_wss625_data(struct s5p_sdo_625_data wss625)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d, %d, %d, %d, %d, %d, %d, %d\n",
+ wss625.surround_sound, wss625.copyright,
+ wss625.copy_protection, wss625.text_subtitles,
+ wss625.open_subtitles, wss625.camera_film,
+ wss625.color_encoding, wss625.helper_signal,
+ wss625.display_ratio);
+
+ if (wss625.surround_sound)
+ temp_reg = S5P_SDO_WSS625_SURROUND_SOUND_ENABLE;
+ else
+ temp_reg = S5P_SDO_WSS625_SURROUND_SOUND_DISABLE;
+
+ if (wss625.copyright)
+ temp_reg |= S5P_SDO_WSS625_COPYRIGHT;
+ else
+ temp_reg |= S5P_SDO_WSS625_NO_COPYRIGHT;
+
+ if (wss625.copy_protection)
+ temp_reg |= S5P_SDO_WSS625_COPY_RESTRICTED;
+ else
+ temp_reg |= S5P_SDO_WSS625_COPY_NOT_RESTRICTED;
+
+ if (wss625.text_subtitles)
+ temp_reg |= S5P_SDO_WSS625_TELETEXT_SUBTITLES;
+ else
+ temp_reg |= S5P_SDO_WSS625_TELETEXT_NO_SUBTITLES;
+
+ switch (wss625.open_subtitles) {
+ case SDO_625_NO_OPEN_SUBTITLES:
+ temp_reg |= S5P_SDO_WSS625_NO_OPEN_SUBTITLES;
+ break;
+
+ case SDO_625_INACT_OPEN_SUBTITLES:
+ temp_reg |= S5P_SDO_WSS625_INACT_OPEN_SUBTITLES;
+ break;
+
+ case SDO_625_OUTACT_OPEN_SUBTITLES:
+ temp_reg |= S5P_SDO_WSS625_OUTACT_OPEN_SUBTITLES;
+ break;
+
+ default:
+ tvout_err("invalid open_subtitles parameter(%d)\n",
+ wss625.open_subtitles);
+ return -1;
+ }
+
+ switch (wss625.camera_film) {
+ case SDO_625_CAMERA:
+ temp_reg |= S5P_SDO_WSS625_CAMERA;
+ break;
+
+ case SDO_625_FILM:
+ temp_reg |= S5P_SDO_WSS625_FILM;
+ break;
+
+ default:
+ tvout_err("invalid camera_film parameter(%d)\n",
+ wss625.camera_film);
+ return -1;
+ }
+
+ switch (wss625.color_encoding) {
+ case SDO_625_NORMAL_PAL:
+ temp_reg |= S5P_SDO_WSS625_NORMAL_PAL;
+ break;
+
+ case SDO_625_MOTION_ADAPTIVE_COLORPLUS:
+ temp_reg |= S5P_SDO_WSS625_MOTION_ADAPTIVE_COLORPLUS;
+ break;
+
+ default:
+ tvout_err("invalid color_encoding parameter(%d)\n",
+ wss625.color_encoding);
+ return -1;
+ }
+
+ if (wss625.helper_signal)
+ temp_reg |= S5P_SDO_WSS625_HELPER_SIG;
+ else
+ temp_reg |= S5P_SDO_WSS625_HELPER_NO_SIG;
+
+ switch (wss625.display_ratio) {
+ case SDO_625_4_3_FULL_576:
+ temp_reg |= S5P_SDO_WSS625_4_3_FULL_576;
+ break;
+
+ case SDO_625_14_9_LETTERBOX_CENTER_504:
+ temp_reg |= S5P_SDO_WSS625_14_9_LETTERBOX_CENTER_504;
+ break;
+
+ case SDO_625_14_9_LETTERBOX_TOP_504:
+ temp_reg |= S5P_SDO_WSS625_14_9_LETTERBOX_TOP_504;
+ break;
+
+ case SDO_625_16_9_LETTERBOX_CENTER_430:
+ temp_reg |= S5P_SDO_WSS625_16_9_LETTERBOX_CENTER_430;
+ break;
+
+ case SDO_625_16_9_LETTERBOX_TOP_430:
+ temp_reg |= S5P_SDO_WSS625_16_9_LETTERBOX_TOP_430;
+ break;
+
+ case SDO_625_16_9_LETTERBOX_CENTER:
+ temp_reg |= S5P_SDO_WSS625_16_9_LETTERBOX_CENTER;
+ break;
+
+ case SDO_625_14_9_FULL_CENTER_576:
+ temp_reg |= S5P_SDO_WSS625_14_9_FULL_CENTER_576;
+ break;
+
+ case SDO_625_16_9_ANAMORPIC_576:
+ temp_reg |= S5P_SDO_WSS625_16_9_ANAMORPIC_576;
+ break;
+
+ default:
+ tvout_err("invalid display_ratio parameter(%d)\n",
+ wss625.display_ratio);
+ return -1;
+ }
+
+ writel(temp_reg, sdo_base + S5P_SDO_WSS625);
+
+ return 0;
+}
+
+int s5p_sdo_set_cgmsa525_data(struct s5p_sdo_525_data cgmsa525)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d, %d, %d\n",
+ cgmsa525.copy_permit, cgmsa525.mv_psp,
+ cgmsa525.copy_info, cgmsa525.display_ratio);
+
+ switch (cgmsa525.copy_permit) {
+ case SDO_525_COPY_PERMIT:
+ temp_reg = S5P_SDO_WORD2_CGMS525_COPY_PERMIT;
+ break;
+
+ case SDO_525_ONECOPY_PERMIT:
+ temp_reg = S5P_SDO_WORD2_CGMS525_ONECOPY_PERMIT;
+ break;
+
+ case SDO_525_NOCOPY_PERMIT:
+ temp_reg = S5P_SDO_WORD2_CGMS525_NOCOPY_PERMIT;
+ break;
+
+ default:
+ tvout_err("invalid copy_permit parameter(%d)\n",
+ cgmsa525.copy_permit);
+ return -1;
+ }
+
+ switch (cgmsa525.mv_psp) {
+ case SDO_525_MV_PSP_OFF:
+ temp_reg |= S5P_SDO_WORD2_CGMS525_MV_PSP_OFF;
+ break;
+
+ case SDO_525_MV_PSP_ON_2LINE_BURST:
+ temp_reg |= S5P_SDO_WORD2_CGMS525_MV_PSP_ON_2LINE_BURST;
+ break;
+
+ case SDO_525_MV_PSP_ON_BURST_OFF:
+ temp_reg |= S5P_SDO_WORD2_CGMS525_MV_PSP_ON_BURST_OFF;
+ break;
+
+ case SDO_525_MV_PSP_ON_4LINE_BURST:
+ temp_reg |= S5P_SDO_WORD2_CGMS525_MV_PSP_ON_4LINE_BURST;
+ break;
+
+ default:
+ tvout_err("invalid mv_psp parameter(%d)\n", cgmsa525.mv_psp);
+ return -1;
+ }
+
+ switch (cgmsa525.copy_info) {
+ case SDO_525_COPY_INFO:
+ temp_reg |= S5P_SDO_WORD1_CGMS525_COPY_INFO;
+ break;
+
+ case SDO_525_DEFAULT:
+ temp_reg |= S5P_SDO_WORD1_CGMS525_DEFAULT;
+ break;
+
+ default:
+ tvout_err("invalid copy_info parameter(%d)\n",
+ cgmsa525.copy_info);
+ return -1;
+ }
+
+ if (cgmsa525.analog_on)
+ temp_reg |= S5P_SDO_WORD2_CGMS525_ANALOG_ON;
+ else
+ temp_reg |= S5P_SDO_WORD2_CGMS525_ANALOG_OFF;
+
+ switch (cgmsa525.display_ratio) {
+ case SDO_525_COPY_PERMIT:
+ temp_reg |= S5P_SDO_WORD0_CGMS525_4_3_NORMAL;
+ break;
+
+ case SDO_525_ONECOPY_PERMIT:
+ temp_reg |= S5P_SDO_WORD0_CGMS525_16_9_ANAMORPIC;
+ break;
+
+ case SDO_525_NOCOPY_PERMIT:
+ temp_reg |= S5P_SDO_WORD0_CGMS525_4_3_LETTERBOX;
+ break;
+
+ default:
+ tvout_err("invalid display_ratio parameter(%d)\n",
+ cgmsa525.display_ratio);
+ return -1;
+ }
+
+ writel(temp_reg | S5P_SDO_CRC_CGMS525(
+ s5p_sdo_calc_wss_cgms_crc(temp_reg)),
+ sdo_base + S5P_SDO_CGMS525);
+
+ return 0;
+}
+
+
+int s5p_sdo_set_cgmsa625_data(struct s5p_sdo_625_data cgmsa625)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d, %d, %d, %d, %d, %d, %d, %d\n",
+ cgmsa625.surround_sound, cgmsa625.copyright,
+ cgmsa625.copy_protection, cgmsa625.text_subtitles,
+ cgmsa625.open_subtitles, cgmsa625.camera_film,
+ cgmsa625.color_encoding, cgmsa625.helper_signal,
+ cgmsa625.display_ratio);
+
+ if (cgmsa625.surround_sound)
+ temp_reg = S5P_SDO_CGMS625_SURROUND_SOUND_ENABLE;
+ else
+ temp_reg = S5P_SDO_CGMS625_SURROUND_SOUND_DISABLE;
+
+ if (cgmsa625.copyright)
+ temp_reg |= S5P_SDO_CGMS625_COPYRIGHT;
+ else
+ temp_reg |= S5P_SDO_CGMS625_NO_COPYRIGHT;
+
+ if (cgmsa625.copy_protection)
+ temp_reg |= S5P_SDO_CGMS625_COPY_RESTRICTED;
+ else
+ temp_reg |= S5P_SDO_CGMS625_COPY_NOT_RESTRICTED;
+
+ if (cgmsa625.text_subtitles)
+ temp_reg |= S5P_SDO_CGMS625_TELETEXT_SUBTITLES;
+ else
+ temp_reg |= S5P_SDO_CGMS625_TELETEXT_NO_SUBTITLES;
+
+ switch (cgmsa625.open_subtitles) {
+ case SDO_625_NO_OPEN_SUBTITLES:
+ temp_reg |= S5P_SDO_CGMS625_NO_OPEN_SUBTITLES;
+ break;
+
+ case SDO_625_INACT_OPEN_SUBTITLES:
+ temp_reg |= S5P_SDO_CGMS625_INACT_OPEN_SUBTITLES;
+ break;
+
+ case SDO_625_OUTACT_OPEN_SUBTITLES:
+ temp_reg |= S5P_SDO_CGMS625_OUTACT_OPEN_SUBTITLES;
+ break;
+
+ default:
+ tvout_err("invalid open_subtitles parameter(%d)\n",
+ cgmsa625.open_subtitles);
+ return -1;
+ }
+
+ switch (cgmsa625.camera_film) {
+ case SDO_625_CAMERA:
+ temp_reg |= S5P_SDO_CGMS625_CAMERA;
+ break;
+
+ case SDO_625_FILM:
+ temp_reg |= S5P_SDO_CGMS625_FILM;
+ break;
+
+ default:
+ tvout_err("invalid camera_film parameter(%d)\n",
+ cgmsa625.camera_film);
+ return -1;
+ }
+
+ switch (cgmsa625.color_encoding) {
+ case SDO_625_NORMAL_PAL:
+ temp_reg |= S5P_SDO_CGMS625_NORMAL_PAL;
+ break;
+
+ case SDO_625_MOTION_ADAPTIVE_COLORPLUS:
+ temp_reg |= S5P_SDO_CGMS625_MOTION_ADAPTIVE_COLORPLUS;
+ break;
+
+ default:
+ tvout_err("invalid color_encoding parameter(%d)\n",
+ cgmsa625.color_encoding);
+ return -1;
+ }
+
+ if (cgmsa625.helper_signal)
+ temp_reg |= S5P_SDO_CGMS625_HELPER_SIG;
+ else
+ temp_reg |= S5P_SDO_CGMS625_HELPER_NO_SIG;
+
+ switch (cgmsa625.display_ratio) {
+ case SDO_625_4_3_FULL_576:
+ temp_reg |= S5P_SDO_CGMS625_4_3_FULL_576;
+ break;
+
+ case SDO_625_14_9_LETTERBOX_CENTER_504:
+ temp_reg |= S5P_SDO_CGMS625_14_9_LETTERBOX_CENTER_504;
+ break;
+
+ case SDO_625_14_9_LETTERBOX_TOP_504:
+ temp_reg |= S5P_SDO_CGMS625_14_9_LETTERBOX_TOP_504;
+ break;
+
+ case SDO_625_16_9_LETTERBOX_CENTER_430:
+ temp_reg |= S5P_SDO_CGMS625_16_9_LETTERBOX_CENTER_430;
+ break;
+
+ case SDO_625_16_9_LETTERBOX_TOP_430:
+ temp_reg |= S5P_SDO_CGMS625_16_9_LETTERBOX_TOP_430;
+ break;
+
+ case SDO_625_16_9_LETTERBOX_CENTER:
+ temp_reg |= S5P_SDO_CGMS625_16_9_LETTERBOX_CENTER;
+ break;
+
+ case SDO_625_14_9_FULL_CENTER_576:
+ temp_reg |= S5P_SDO_CGMS625_14_9_FULL_CENTER_576;
+ break;
+
+ case SDO_625_16_9_ANAMORPIC_576:
+ temp_reg |= S5P_SDO_CGMS625_16_9_ANAMORPIC_576;
+ break;
+
+ default:
+ tvout_err("invalid display_ratio parameter(%d)\n",
+ cgmsa625.display_ratio);
+ return -1;
+ }
+
+ writel(temp_reg, sdo_base + S5P_SDO_CGMS625);
+
+ return 0;
+}
+
+int s5p_sdo_set_display_mode(
+ enum s5p_tvout_disp_mode disp_mode, enum s5p_sdo_order order)
+{
+ u32 temp_reg = 0;
+
+ tvout_dbg("%d, %d\n", disp_mode, order);
+
+ switch (disp_mode) {
+ case TVOUT_NTSC_M:
+ temp_reg |= S5P_SDO_NTSC_M;
+ s5p_sdo_set_video_scale_cfg(
+ SDO_LEVEL_75IRE,
+ SDO_VTOS_RATIO_10_4);
+
+ s5p_sdo_set_antialias_filter_coeff_default(
+ SDO_LEVEL_75IRE,
+ SDO_VTOS_RATIO_10_4);
+ break;
+
+ case TVOUT_PAL_BDGHI:
+ temp_reg |= S5P_SDO_PAL_BGHID;
+ s5p_sdo_set_video_scale_cfg(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+
+ s5p_sdo_set_antialias_filter_coeff_default(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+ break;
+
+ case TVOUT_PAL_M:
+ temp_reg |= S5P_SDO_PAL_M;
+ s5p_sdo_set_video_scale_cfg(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+
+ s5p_sdo_set_antialias_filter_coeff_default(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+ break;
+
+ case TVOUT_PAL_N:
+ temp_reg |= S5P_SDO_PAL_N;
+ s5p_sdo_set_video_scale_cfg(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+
+ s5p_sdo_set_antialias_filter_coeff_default(
+ SDO_LEVEL_75IRE,
+ SDO_VTOS_RATIO_10_4);
+ break;
+
+ case TVOUT_PAL_NC:
+ temp_reg |= S5P_SDO_PAL_NC;
+ s5p_sdo_set_video_scale_cfg(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+
+ s5p_sdo_set_antialias_filter_coeff_default(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+ break;
+
+ case TVOUT_PAL_60:
+ temp_reg |= S5P_SDO_PAL_60;
+ s5p_sdo_set_video_scale_cfg(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+ s5p_sdo_set_antialias_filter_coeff_default(
+ SDO_LEVEL_0IRE,
+ SDO_VTOS_RATIO_7_3);
+ break;
+
+ case TVOUT_NTSC_443:
+ temp_reg |= S5P_SDO_NTSC_443;
+ s5p_sdo_set_video_scale_cfg(
+ SDO_LEVEL_75IRE,
+ SDO_VTOS_RATIO_10_4);
+ s5p_sdo_set_antialias_filter_coeff_default(
+ SDO_LEVEL_75IRE,
+ SDO_VTOS_RATIO_10_4);
+ break;
+
+ default:
+ tvout_err("invalid disp_mode parameter(%d)\n", disp_mode);
+ return -1;
+ }
+
+ temp_reg |= S5P_SDO_COMPOSITE | S5P_SDO_INTERLACED;
+
+ switch (order) {
+
+ case SDO_O_ORDER_COMPOSITE_CVBS_Y_C:
+ temp_reg |= S5P_SDO_DAC2_CVBS | S5P_SDO_DAC1_Y |
+ S5P_SDO_DAC0_C;
+ break;
+
+ case SDO_O_ORDER_COMPOSITE_CVBS_C_Y:
+ temp_reg |= S5P_SDO_DAC2_CVBS | S5P_SDO_DAC1_C |
+ S5P_SDO_DAC0_Y;
+ break;
+
+ case SDO_O_ORDER_COMPOSITE_Y_C_CVBS:
+ temp_reg |= S5P_SDO_DAC2_Y | S5P_SDO_DAC1_C |
+ S5P_SDO_DAC0_CVBS;
+ break;
+
+ case SDO_O_ORDER_COMPOSITE_Y_CVBS_C:
+ temp_reg |= S5P_SDO_DAC2_Y | S5P_SDO_DAC1_CVBS |
+ S5P_SDO_DAC0_C;
+ break;
+
+ case SDO_O_ORDER_COMPOSITE_C_CVBS_Y:
+ temp_reg |= S5P_SDO_DAC2_C | S5P_SDO_DAC1_CVBS |
+ S5P_SDO_DAC0_Y;
+ break;
+
+ case SDO_O_ORDER_COMPOSITE_C_Y_CVBS:
+ temp_reg |= S5P_SDO_DAC2_C | S5P_SDO_DAC1_Y |
+ S5P_SDO_DAC0_CVBS;
+ break;
+
+ default:
+ tvout_err("invalid order parameter(%d)\n", order);
+ return -1;
+ }
+
+ writel(temp_reg, sdo_base + S5P_SDO_CONFIG);
+
+ return 0;
+}
+
+void s5p_sdo_clock_on(bool on)
+{
+ tvout_dbg("%d\n", on);
+
+ if (on)
+ writel(S5P_SDO_TVOUT_CLOCK_ON, sdo_base + S5P_SDO_CLKCON);
+ else {
+ mdelay(100);
+
+ writel(S5P_SDO_TVOUT_CLOCK_OFF, sdo_base + S5P_SDO_CLKCON);
+ }
+}
+
+void s5p_sdo_dac_on(bool on)
+{
+ tvout_dbg("%d\n", on);
+
+ if (on) {
+ writel(S5P_SDO_POWER_ON_DAC, sdo_base + S5P_SDO_DAC);
+
+ writel(S5P_DAC_ENABLE, S5P_DAC_CONTROL);
+ } else {
+ writel(S5P_DAC_DISABLE, S5P_DAC_CONTROL);
+
+ writel(S5P_SDO_POWER_DOWN_DAC, sdo_base + S5P_SDO_DAC);
+ }
+}
+
+void s5p_sdo_sw_reset(bool active)
+{
+ tvout_dbg("%d\n", active);
+
+ if (active)
+ writel(readl(sdo_base + S5P_SDO_CLKCON) |
+ S5P_SDO_TVOUT_SW_RESET,
+ sdo_base + S5P_SDO_CLKCON);
+ else
+ writel(readl(sdo_base + S5P_SDO_CLKCON) &
+ ~S5P_SDO_TVOUT_SW_RESET,
+ sdo_base + S5P_SDO_CLKCON);
+}
+
+void s5p_sdo_set_interrupt_enable(bool vsync_intc_en)
+{
+ tvout_dbg("%d\n", vsync_intc_en);
+
+ if (vsync_intc_en)
+ writel(readl(sdo_base + S5P_SDO_IRQMASK) &
+ ~S5P_SDO_VSYNC_IRQ_DISABLE,
+ sdo_base + S5P_SDO_IRQMASK);
+ else
+ writel(readl(sdo_base + S5P_SDO_IRQMASK) |
+ S5P_SDO_VSYNC_IRQ_DISABLE,
+ sdo_base + S5P_SDO_IRQMASK);
+}
+
+void s5p_sdo_clear_interrupt_pending(void)
+{
+ writel(readl(sdo_base + S5P_SDO_IRQ) | S5P_SDO_VSYNC_IRQ_PEND,
+ sdo_base + S5P_SDO_IRQ);
+}
+
+void s5p_sdo_init(void __iomem *addr)
+{
+ sdo_base = addr;
+}
diff --git a/drivers/media/video/samsung/tvout/hw_if/vp.c b/drivers/media/video/samsung/tvout/hw_if/vp.c
new file mode 100644
index 0000000..71d137f
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/hw_if/vp.c
@@ -0,0 +1,747 @@
+/* linux/drivers/media/video/samsung/tvout/hw_if/vp.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Hardware interface functions for video processor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include <mach/regs-vp.h>
+
+#include "../s5p_tvout_common_lib.h"
+#include "hw_if.h"
+
+#undef tvout_dbg
+
+#ifdef CONFIG_VP_DEBUG
+#define tvout_dbg(fmt, ...) \
+ printk(KERN_INFO "\t[VP] %s(): " fmt, \
+ __func__, ##__VA_ARGS__)
+#else
+#define tvout_dbg(fmt, ...)
+#endif
+
+/*
+ * Area for definitions to be used in only this file.
+ * This area can include #define, enum and struct defintition.
+ */
+#define H_RATIO(s_w, d_w) (((s_w) << 16) / (d_w))
+#define V_RATIO(s_h, d_h, ipc_2d) (((s_h) << ((ipc_2d) ? 17 : 16)) / (d_h))
+
+enum s5p_vp_poly_coeff {
+ VP_POLY8_Y0_LL = 0,
+ VP_POLY8_Y0_LH,
+ VP_POLY8_Y0_HL,
+ VP_POLY8_Y0_HH,
+ VP_POLY8_Y1_LL,
+ VP_POLY8_Y1_LH,
+ VP_POLY8_Y1_HL,
+ VP_POLY8_Y1_HH,
+ VP_POLY8_Y2_LL,
+ VP_POLY8_Y2_LH,
+ VP_POLY8_Y2_HL,
+ VP_POLY8_Y2_HH,
+ VP_POLY8_Y3_LL,
+ VP_POLY8_Y3_LH,
+ VP_POLY8_Y3_HL,
+ VP_POLY8_Y3_HH,
+ VP_POLY4_Y0_LL = 32,
+ VP_POLY4_Y0_LH,
+ VP_POLY4_Y0_HL,
+ VP_POLY4_Y0_HH,
+ VP_POLY4_Y1_LL,
+ VP_POLY4_Y1_LH,
+ VP_POLY4_Y1_HL,
+ VP_POLY4_Y1_HH,
+ VP_POLY4_Y2_LL,
+ VP_POLY4_Y2_LH,
+ VP_POLY4_Y2_HL,
+ VP_POLY4_Y2_HH,
+ VP_POLY4_Y3_LL,
+ VP_POLY4_Y3_LH,
+ VP_POLY4_Y3_HL,
+ VP_POLY4_Y3_HH,
+ VP_POLY4_C0_LL,
+ VP_POLY4_C0_LH,
+ VP_POLY4_C0_HL,
+ VP_POLY4_C0_HH,
+ VP_POLY4_C1_LL,
+ VP_POLY4_C1_LH,
+ VP_POLY4_C1_HL,
+ VP_POLY4_C1_HH
+};
+
+enum s5p_vp_filter_h_pp {
+ VP_PP_H_NORMAL,
+ VP_PP_H_8_9,
+ VP_PP_H_1_2,
+ VP_PP_H_1_3,
+ VP_PP_H_1_4
+};
+
+enum s5p_vp_filter_v_pp {
+ VP_PP_V_NORMAL,
+ VP_PP_V_5_6,
+ VP_PP_V_3_4,
+ VP_PP_V_1_2,
+ VP_PP_V_1_3,
+ VP_PP_V_1_4
+};
+
+/*
+ * Area for global variables to be used in only this file.
+ */
+
+static void __iomem *vp_base;
+
+/* Horizontal Y 8tap */
+const signed char g_s_vp8tap_coef_y_h[] = {
+ /* VP_PP_H_NORMAL */
+ 0, 0, 0, 0, 127, 0, 0, 0,
+ 0, 1, -2, 8, 126, -6, 2, -1,
+ 0, 1, -5, 16, 125, -12, 4, -1,
+ 0, 2, -8, 25, 121, -16, 5, -1,
+ -1, 3, -10, 35, 114, -18, 6, -1,
+ -1, 4, -13, 46, 107, -20, 6, -1,
+ -1, 5, -16, 57, 99, -21, 6, -1,
+ -1, 5, -18, 68, 89, -20, 6, -1,
+ -1, 6, -20, 79, 79, -20, 6, -1,
+ -1, 6, -20, 89, 68, -18, 5, -1,
+ -1, 6, -21, 99, 57, -16, 5, -1,
+ -1, 6, -20, 107, 46, -13, 4, -1,
+ -1, 6, -18, 114, 35, -10, 3, -1,
+ -1, 5, -16, 121, 25, -8, 2, 0,
+ -1, 4, -12, 125, 16, -5, 1, 0,
+ -1, 2, -6, 126, 8, -2, 1, 0,
+
+ /* VP_PP_H_8_9 */
+ 0, 3, -7, 12, 112, 12, -7, 3,
+ -1, 3, -9, 19, 113, 6, -5, 2,
+ -1, 3, -11, 27, 111, 0, -3, 2,
+ -1, 4, -13, 35, 108, -5, -1, 1,
+ -1, 4, -14, 43, 104, -9, 0, 1,
+ -1, 5, -16, 52, 99, -12, 1, 0,
+ -1, 5, -17, 61, 92, -14, 2, 0,
+ 0, 4, -17, 69, 85, -16, 3, 0,
+ 0, 4, -17, 77, 77, -17, 4, 0,
+ 0, 3, -16, 85, 69, -17, 4, 0,
+ 0, 2, -14, 92, 61, -17, 5, -1,
+ 0, 1, -12, 99, 52, -16, 5, -1,
+ 1, 0, -9, 104, 43, -14, 4, -1,
+ 1, -1, -5, 108, 35, -13, 4, -1,
+ 2, -3, 0, 111, 27, -11, 3, -1,
+ 2, -5, 6, 113, 19, -9, 3, -1,
+
+ /* VP_PP_H_1_2 */
+ 0, -3, 0, 35, 64, 35, 0, -3,
+ 0, -3, 1, 38, 64, 32, -1, -3,
+ 0, -3, 2, 41, 63, 29, -2, -2,
+ 0, -4, 4, 43, 63, 27, -3, -2,
+ 0, -4, 5, 46, 62, 24, -3, -2,
+ 0, -4, 7, 49, 60, 21, -3, -2,
+ -1, -4, 9, 51, 59, 19, -4, -1,
+ -1, -4, 12, 53, 57, 16, -4, -1,
+ -1, -4, 14, 55, 55, 14, -4, -1,
+ -1, -4, 16, 57, 53, 12, -4, -1,
+ -1, -4, 19, 59, 51, 9, -4, -1,
+ -2, -3, 21, 60, 49, 7, -4, 0,
+ -2, -3, 24, 62, 46, 5, -4, 0,
+ -2, -3, 27, 63, 43, 4, -4, 0,
+ -2, -2, 29, 63, 41, 2, -3, 0,
+ -3, -1, 32, 64, 38, 1, -3, 0,
+
+ /* VP_PP_H_1_3 */
+ 0, 0, 10, 32, 44, 32, 10, 0,
+ -1, 0, 11, 33, 45, 31, 9, 0,
+ -1, 0, 12, 35, 45, 29, 8, 0,
+ -1, 1, 13, 36, 44, 28, 7, 0,
+ -1, 1, 15, 37, 44, 26, 6, 0,
+ -1, 2, 16, 38, 43, 25, 5, 0,
+ -1, 2, 18, 39, 43, 23, 5, -1,
+ -1, 3, 19, 40, 42, 22, 4, -1,
+ -1, 3, 21, 41, 41, 21, 3, -1,
+ -1, 4, 22, 42, 40, 19, 3, -1,
+ -1, 5, 23, 43, 39, 18, 2, -1,
+ 0, 5, 25, 43, 38, 16, 2, -1,
+ 0, 6, 26, 44, 37, 15, 1, -1,
+ 0, 7, 28, 44, 36, 13, 1, -1,
+ 0, 8, 29, 45, 35, 12, 0, -1,
+ 0, 9, 31, 45, 33, 11, 0, -1,
+
+ /* VP_PP_H_1_4 */
+ 0, 2, 13, 30, 38, 30, 13, 2,
+ 0, 3, 14, 30, 38, 29, 12, 2,
+ 0, 3, 15, 31, 38, 28, 11, 2,
+ 0, 4, 16, 32, 38, 27, 10, 1,
+ 0, 4, 17, 33, 37, 26, 10, 1,
+ 0, 5, 18, 34, 37, 24, 9, 1,
+ 0, 5, 19, 34, 37, 24, 8, 1,
+ 1, 6, 20, 35, 36, 22, 7, 1,
+ 1, 6, 21, 36, 36, 21, 6, 1,
+ 1, 7, 22, 36, 35, 20, 6, 1,
+ 1, 8, 24, 37, 34, 19, 5, 0,
+ 1, 9, 24, 37, 34, 18, 5, 0,
+ 1, 10, 26, 37, 33, 17, 4, 0,
+ 1, 10, 27, 38, 32, 16, 4, 0,
+ 2, 11, 28, 38, 31, 15, 3, 0,
+ 2, 12, 29, 38, 30, 14, 3, 0
+};
+
+/* Horizontal C 4tap */
+const signed char g_s_vp4tap_coef_c_h[] = {
+ /* VP_PP_H_NORMAL */
+ 0, 0, 128, 0, 0, 5, 126, -3,
+ -1, 11, 124, -6, -1, 19, 118, -8,
+ -2, 27, 111, -8, -3, 37, 102, -8,
+ -4, 48, 92, -8, -5, 59, 81, -7,
+ -6, 70, 70, -6, -7, 81, 59, -5,
+ -8, 92, 48, -4, -8, 102, 37, -3,
+ -8, 111, 27, -2, -8, 118, 19, -1,
+ -6, 124, 11, -1, -3, 126, 5, 0,
+
+ /* VP_PP_H_8_9 */
+ 0, 8, 112, 8, -1, 13, 113, 3,
+ -2, 19, 111, 0, -2, 26, 107, -3,
+ -3, 34, 101, -4, -3, 42, 94, -5,
+ -4, 51, 86, -5, -5, 60, 78, -5,
+ -5, 69, 69, -5, -5, 78, 60, -5,
+ -5, 86, 51, -4, -5, 94, 42, -3,
+ -4, 101, 34, -3, -3, 107, 26, -2,
+ 0, 111, 19, -2, 3, 113, 13, -1,
+
+ /* VP_PP_H_1_2 */
+ 0, 26, 76, 26, 0, 30, 76, 22,
+ 0, 34, 75, 19, 1, 38, 73, 16,
+ 1, 43, 71, 13, 2, 47, 69, 10,
+ 3, 51, 66, 8, 4, 55, 63, 6,
+ 5, 59, 59, 5, 6, 63, 55, 4,
+ 8, 66, 51, 3, 10, 69, 47, 2,
+ 13, 71, 43, 1, 16, 73, 38, 1,
+ 19, 75, 34, 0, 22, 76, 30, 0,
+
+ /* VP_PP_H_1_3 */
+ 0, 30, 68, 30, 2, 33, 66, 27,
+ 3, 36, 66, 23, 3, 39, 65, 21,
+ 4, 43, 63, 18, 5, 46, 62, 15,
+ 6, 49, 60, 13, 8, 52, 57, 11,
+ 9, 55, 55, 9, 11, 57, 52, 8,
+ 13, 60, 49, 6, 15, 62, 46, 5,
+ 18, 63, 43, 4, 21, 65, 39, 3,
+ 23, 66, 36, 3, 27, 66, 33, 2,
+
+ /* VP_PP_H_1_4 */
+ 0, 31, 66, 31, 3, 34, 63, 28,
+ 4, 37, 62, 25, 4, 40, 62, 22,
+ 5, 43, 61, 19, 6, 46, 59, 17,
+ 7, 48, 58, 15, 9, 51, 55, 13,
+ 11, 53, 53, 11, 13, 55, 51, 9,
+ 15, 58, 48, 7, 17, 59, 46, 6,
+ 19, 61, 43, 5, 22, 62, 40, 4,
+ 25, 62, 37, 4, 28, 63, 34, 3,
+};
+
+
+/* Vertical Y 8tap */
+const signed char g_s_vp4tap_coef_y_v[] = {
+ /* VP_PP_V_NORMAL */
+ 0, 0, 127, 0, 0, 5, 126, -3,
+ -1, 11, 124, -6, -1, 19, 118, -8,
+ -2, 27, 111, -8, -3, 37, 102, -8,
+ -4, 48, 92, -8, -5, 59, 81, -7,
+ -6, 70, 70, -6, -7, 81, 59, -5,
+ -8, 92, 48, -4, -8, 102, 37, -3,
+ -8, 111, 27, -2, -8, 118, 19, -1,
+ -6, 124, 11, -1, -3, 126, 5, 0,
+
+ /* VP_PP_V_5_6 */
+ 0, 11, 106, 11, -2, 16, 107, 7,
+ -2, 22, 105, 3, -2, 29, 101, 0,
+ -3, 36, 96, -1, -3, 44, 90, -3,
+ -4, 52, 84, -4, -4, 60, 76, -4,
+ -4, 68, 68, -4, -4, 76, 60, -4,
+ -4, 84, 52, -4, -3, 90, 44, -3,
+ -1, 96, 36, -3, 0, 101, 29, -2,
+ 3, 105, 22, -2, 7, 107, 16, -2,
+
+ /* VP_PP_V_3_4 */
+ 0, 15, 98, 15, -2, 21, 97, 12,
+ -2, 26, 96, 8, -2, 32, 93, 5,
+ -2, 39, 89, 2, -2, 46, 84, 0,
+ -3, 53, 79, -1, -2, 59, 73, -2,
+ -2, 66, 66, -2, -2, 73, 59, -2,
+ -1, 79, 53, -3, 0, 84, 46, -2,
+ 2, 89, 39, -2, 5, 93, 32, -2,
+ 8, 96, 26, -2, 12, 97, 21, -2,
+
+ /* VP_PP_V_1_2 */
+ 0, 26, 76, 26, 0, 30, 76, 22,
+ 0, 34, 75, 19, 1, 38, 73, 16,
+ 1, 43, 71, 13, 2, 47, 69, 10,
+ 3, 51, 66, 8, 4, 55, 63, 6,
+ 5, 59, 59, 5, 6, 63, 55, 4,
+ 8, 66, 51, 3, 10, 69, 47, 2,
+ 13, 71, 43, 1, 16, 73, 38, 1,
+ 19, 75, 34, 0, 22, 76, 30, 0,
+
+ /* VP_PP_V_1_3 */
+ 0, 30, 68, 30, 2, 33, 66, 27,
+ 3, 36, 66, 23, 3, 39, 65, 21,
+ 4, 43, 63, 18, 5, 46, 62, 15,
+ 6, 49, 60, 13, 8, 52, 57, 11,
+ 9, 55, 55, 9, 11, 57, 52, 8,
+ 13, 60, 49, 6, 15, 62, 46, 5,
+ 18, 63, 43, 4, 21, 65, 39, 3,
+ 23, 66, 36, 3, 27, 66, 33, 2,
+
+ /* VP_PP_V_1_4 */
+ 0, 31, 66, 31, 3, 34, 63, 28,
+ 4, 37, 62, 25, 4, 40, 62, 22,
+ 5, 43, 61, 19, 6, 46, 59, 17,
+ 7, 48, 58, 15, 9, 51, 55, 13,
+ 11, 53, 53, 11, 13, 55, 51, 9,
+ 15, 58, 48, 7, 17, 59, 46, 6,
+ 19, 61, 43, 5, 22, 62, 40, 4,
+ 25, 62, 37, 4, 28, 63, 34, 3
+};
+
+/*
+ * Area for functions to be used in only this file.
+ * Functions of this area are defined by static
+ */
+static int s5p_vp_set_poly_filter_coef(
+ enum s5p_vp_poly_coeff poly_coeff,
+ signed char ch0, signed char ch1,
+ signed char ch2, signed char ch3)
+{
+ if (poly_coeff > VP_POLY4_C1_HH || poly_coeff < VP_POLY8_Y0_LL ||
+ (poly_coeff > VP_POLY8_Y3_HH && poly_coeff < VP_POLY4_Y0_LL)) {
+ tvout_err("invaild poly_coeff parameter\n");
+
+ return -1;
+ }
+
+ writel((((0xff & ch0) << 24) | ((0xff & ch1) << 16) |
+ ((0xff & ch2) << 8) | (0xff & ch3)),
+ vp_base + S5P_VP_POLY8_Y0_LL + poly_coeff * 4);
+
+ return 0;
+}
+
+/*
+ * Area for functions to be used by other files.
+ * Functions of this area must be defined in header file.
+ */
+void s5p_vp_set_poly_filter_coef_default(
+ u32 src_width, u32 src_height,
+ u32 dst_width, u32 dst_height, bool ipc_2d)
+{
+ enum s5p_vp_filter_h_pp e_h_filter;
+ enum s5p_vp_filter_v_pp e_v_filter;
+ u8 *poly_flt_coeff;
+ int i, j;
+
+ u32 h_ratio = H_RATIO(src_width, dst_width);
+ u32 v_ratio = V_RATIO(src_height, dst_height, ipc_2d);
+
+ /*
+ * For the real interlace mode, the vertical ratio should be
+ * used after divided by 2. Because in the interlace mode, all
+ * the VP output is used for SDOUT display and it should be the
+ * same as one field of the progressive mode. Therefore the same
+ * filter coefficients should be used for the same the final
+ * output video. When half of the interlace V_RATIO is same as
+ * the progressive V_RATIO, the final output video scale is same.
+ */
+
+ if (h_ratio <= (0x1 << 16)) /* 720->720 or zoom in */
+ e_h_filter = VP_PP_H_NORMAL;
+ else if (h_ratio <= (0x9 << 13)) /* 720->640 */
+ e_h_filter = VP_PP_H_8_9;
+ else if (h_ratio <= (0x1 << 17)) /* 2->1 */
+ e_h_filter = VP_PP_H_1_2;
+ else if (h_ratio <= (0x3 << 16)) /* 2->1 */
+ e_h_filter = VP_PP_H_1_3;
+ else
+ e_h_filter = VP_PP_H_1_4; /* 4->1 */
+
+ /* Vertical Y 4tap */
+
+ if (v_ratio <= (0x1 << 16)) /* 720->720 or zoom in*/
+ e_v_filter = VP_PP_V_NORMAL;
+ else if (v_ratio <= (0x5 << 14)) /* 4->3*/
+ e_v_filter = VP_PP_V_3_4;
+ else if (v_ratio <= (0x3 << 15)) /*6->5*/
+ e_v_filter = VP_PP_V_5_6;
+ else if (v_ratio <= (0x1 << 17)) /* 2->1*/
+ e_v_filter = VP_PP_V_1_2;
+ else if (v_ratio <= (0x3 << 16)) /* 3->1*/
+ e_v_filter = VP_PP_V_1_3;
+ else
+ e_v_filter = VP_PP_V_1_4;
+
+ poly_flt_coeff = (u8 *)(g_s_vp8tap_coef_y_h + e_h_filter * 16 * 8);
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ s5p_vp_set_poly_filter_coef(
+ VP_POLY8_Y0_LL + (i*4) + j,
+ *(poly_flt_coeff + 4*j*8 + (7 - i)),
+ *(poly_flt_coeff + (4*j + 1)*8 + (7 - i)),
+ *(poly_flt_coeff + (4*j + 2)*8 + (7 - i)),
+ *(poly_flt_coeff + (4*j + 3)*8 + (7 - i)));
+ }
+ }
+
+ poly_flt_coeff = (u8 *)(g_s_vp4tap_coef_c_h + e_h_filter * 16 * 4);
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 4; j++) {
+ s5p_vp_set_poly_filter_coef(
+ VP_POLY4_C0_LL + (i*4) + j,
+ *(poly_flt_coeff + 4*j*4 + (3 - i)),
+ *(poly_flt_coeff + (4*j + 1)*4 + (3 - i)),
+ *(poly_flt_coeff + (4*j + 2)*4 + (3 - i)),
+ *(poly_flt_coeff + (4*j + 3)*4 + (3 - i)));
+ }
+ }
+
+ poly_flt_coeff = (u8 *)(g_s_vp4tap_coef_y_v + e_v_filter * 16 * 4);
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ s5p_vp_set_poly_filter_coef(
+ VP_POLY4_Y0_LL + (i*4) + j,
+ *(poly_flt_coeff + 4*j*4 + (3 - i)),
+ *(poly_flt_coeff + (4*j + 1)*4 + (3 - i)),
+ *(poly_flt_coeff + (4*j + 2)*4 + (3 - i)),
+ *(poly_flt_coeff + (4*j + 3)*4 + (3 - i)));
+ }
+ }
+}
+
+void s5p_vp_set_field_id(enum s5p_vp_field mode)
+{
+ writel((mode == VP_TOP_FIELD) ? VP_TOP_FIELD : VP_BOTTOM_FIELD,
+ vp_base + S5P_VP_FIELD_ID);
+}
+
+int s5p_vp_set_top_field_address(u32 top_y_addr, u32 top_c_addr)
+{
+ if (S5P_VP_PTR_ILLEGAL(top_y_addr) || S5P_VP_PTR_ILLEGAL(top_c_addr)) {
+ tvout_err("address is not double word align = 0x%x, 0x%x\n",
+ top_y_addr, top_c_addr);
+
+ return -1;
+ }
+
+ writel(top_y_addr, vp_base + S5P_VP_TOP_Y_PTR);
+ writel(top_c_addr, vp_base + S5P_VP_TOP_C_PTR);
+
+ return 0;
+}
+
+int s5p_vp_get_top_field_address(u32* top_y_addr, u32* top_c_addr)
+{
+ *top_y_addr = readl(vp_base + S5P_VP_TOP_Y_PTR);
+ *top_c_addr = readl(vp_base + S5P_VP_TOP_C_PTR);
+
+ return 0;
+}
+
+int s5p_vp_set_bottom_field_address(
+ u32 bottom_y_addr, u32 bottom_c_addr)
+{
+ if (S5P_VP_PTR_ILLEGAL(bottom_y_addr) ||
+ S5P_VP_PTR_ILLEGAL(bottom_c_addr)) {
+ tvout_err("address is not double word align = 0x%x, 0x%x\n",
+ bottom_y_addr, bottom_c_addr);
+
+ return -1;
+ }
+
+ writel(bottom_y_addr, vp_base + S5P_VP_BOT_Y_PTR);
+ writel(bottom_c_addr, vp_base + S5P_VP_BOT_C_PTR);
+
+ return 0;
+}
+
+int s5p_vp_set_img_size(u32 img_width, u32 img_height)
+{
+ if (S5P_VP_IMG_SIZE_ILLEGAL(img_width) ||
+ S5P_VP_IMG_SIZE_ILLEGAL(img_height)) {
+ tvout_err("full image size is not double word align ="
+ "%d, %d\n", img_width, img_height);
+
+ return -1;
+ }
+
+ writel(S5P_VP_IMG_HSIZE(img_width) | S5P_VP_IMG_VSIZE(img_height),
+ vp_base + S5P_VP_IMG_SIZE_Y);
+ writel(S5P_VP_IMG_HSIZE(img_width) | S5P_VP_IMG_VSIZE(img_height / 2),
+ vp_base + S5P_VP_IMG_SIZE_C);
+
+ return 0;
+}
+
+void s5p_vp_set_src_position(
+ u32 src_off_x, u32 src_x_fract_step, u32 src_off_y)
+{
+ writel(S5P_VP_SRC_H_POSITION_VAL(src_off_x) |
+ S5P_VP_SRC_X_FRACT_STEP(src_x_fract_step),
+ vp_base + S5P_VP_SRC_H_POSITION);
+ writel(S5P_VP_SRC_V_POSITION_VAL(src_off_y),
+ vp_base + S5P_VP_SRC_V_POSITION);
+}
+
+void s5p_vp_set_dest_position(u32 dst_off_x, u32 dst_off_y)
+{
+ writel(S5P_VP_DST_H_POSITION_VAL(dst_off_x),
+ vp_base + S5P_VP_DST_H_POSITION);
+ writel(S5P_VP_DST_V_POSITION_VAL(dst_off_y),
+ vp_base + S5P_VP_DST_V_POSITION);
+}
+
+void s5p_vp_set_src_dest_size(
+ u32 src_width, u32 src_height,
+ u32 dst_width, u32 dst_height, bool ipc_2d)
+{
+ u32 h_ratio = H_RATIO(src_width, dst_width);
+ u32 v_ratio = V_RATIO(src_height, dst_height, ipc_2d);
+
+ writel(S5P_VP_SRC_WIDTH_VAL(src_width), vp_base + S5P_VP_SRC_WIDTH);
+ writel(S5P_VP_SRC_HEIGHT_VAL(src_height), vp_base + S5P_VP_SRC_HEIGHT);
+ writel(S5P_VP_DST_WIDTH_VAL(dst_width), vp_base + S5P_VP_DST_WIDTH);
+ writel(S5P_VP_DST_HEIGHT_VAL(dst_height), vp_base + S5P_VP_DST_HEIGHT);
+ writel(S5P_VP_H_RATIO_VAL(h_ratio), vp_base + S5P_VP_H_RATIO);
+ writel(S5P_VP_V_RATIO_VAL(v_ratio), vp_base + S5P_VP_V_RATIO);
+
+ writel((ipc_2d) ?
+ (readl(vp_base + S5P_VP_MODE) | S5P_VP_MODE_2D_IPC_ENABLE) :
+ (readl(vp_base + S5P_VP_MODE) & ~S5P_VP_MODE_2D_IPC_ENABLE),
+ vp_base + S5P_VP_MODE);
+}
+
+void s5p_vp_set_op_mode(
+ bool line_skip,
+ enum s5p_vp_mem_type mem_type,
+ enum s5p_vp_mem_mode mem_mode,
+ enum s5p_vp_chroma_expansion chroma_exp,
+ bool auto_toggling)
+{
+ u32 temp_reg;
+
+ temp_reg = (mem_type) ?
+ S5P_VP_MODE_IMG_TYPE_YUV420_NV21 :
+ S5P_VP_MODE_IMG_TYPE_YUV420_NV12;
+ temp_reg |= (line_skip) ?
+ S5P_VP_MODE_LINE_SKIP_ON : S5P_VP_MODE_LINE_SKIP_OFF;
+ temp_reg |= (mem_mode == VP_2D_TILE_MODE) ?
+ S5P_VP_MODE_MEM_MODE_2D_TILE :
+ S5P_VP_MODE_MEM_MODE_LINEAR;
+ temp_reg |= (chroma_exp == VP_C_TOP_BOTTOM) ?
+ S5P_VP_MODE_CROMA_EXP_C_TOPBOTTOM_PTR :
+ S5P_VP_MODE_CROMA_EXP_C_TOP_PTR;
+ temp_reg |= (auto_toggling) ?
+ S5P_VP_MODE_FIELD_ID_AUTO_TOGGLING :
+ S5P_VP_MODE_FIELD_ID_MAN_TOGGLING;
+
+ writel(temp_reg, vp_base + S5P_VP_MODE);
+}
+
+void s5p_vp_set_pixel_rate_control(enum s5p_vp_pxl_rate rate)
+{
+ writel(S5P_VP_PEL_RATE_CTRL(rate), vp_base + S5P_VP_PER_RATE_CTRL);
+}
+
+void s5p_vp_set_endian(enum s5p_tvout_endian endian)
+{
+ writel(endian, vp_base + S5P_VP_ENDIAN_MODE);
+}
+
+void s5p_vp_set_bypass_post_process(bool bypass)
+{
+ writel((bypass) ? S5P_VP_BY_PASS_ENABLE : S5P_VP_BY_PASS_DISABLE,
+ vp_base + S5P_PP_BYPASS);
+}
+
+void s5p_vp_set_saturation(u32 sat)
+{
+ writel(S5P_VP_SATURATION(sat), vp_base + S5P_PP_SATURATION);
+}
+
+void s5p_vp_set_sharpness(
+ u32 th_h_noise, enum s5p_vp_sharpness_control sharpness)
+{
+ writel(S5P_VP_TH_HNOISE(th_h_noise) | S5P_VP_SHARPNESS(sharpness),
+ vp_base + S5P_PP_SHARPNESS);
+}
+
+void s5p_vp_set_brightness_contrast(u16 b, u8 c)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ writel(S5P_VP_LINE_INTC(b) | S5P_VP_LINE_SLOPE(c),
+ vp_base + S5P_PP_LINE_EQ0 + i*4);
+}
+
+void s5p_vp_set_brightness_offset(u32 offset)
+{
+ writel(S5P_VP_BRIGHT_OFFSET(offset), vp_base + S5P_PP_BRIGHT_OFFSET);
+}
+
+int s5p_vp_set_brightness_contrast_control(
+ enum s5p_vp_line_eq eq_num, u32 intc, u32 slope)
+{
+ if (eq_num > VP_LINE_EQ_7 || eq_num < VP_LINE_EQ_0) {
+ tvout_err("invaild eq_num parameter\n");
+
+ return -1;
+ }
+
+ writel(S5P_VP_LINE_INTC(intc) | S5P_VP_LINE_SLOPE(slope),
+ vp_base + S5P_PP_LINE_EQ0 + eq_num*4);
+
+ return 0;
+}
+
+void s5p_vp_set_csc_control(bool sub_y_offset_en, bool csc_en)
+{
+ u32 temp_reg;
+
+ temp_reg = (sub_y_offset_en) ? S5P_VP_SUB_Y_OFFSET_ENABLE :
+ S5P_VP_SUB_Y_OFFSET_DISABLE;
+ temp_reg |= (csc_en) ? S5P_VP_CSC_ENABLE : S5P_VP_CSC_DISABLE;
+
+ writel(temp_reg, vp_base + S5P_PP_CSC_EN);
+}
+
+int s5p_vp_set_csc_coef(enum s5p_vp_csc_coeff csc_coeff, u32 coeff)
+{
+ if (csc_coeff > VP_CSC_CR2CR_COEF ||
+ csc_coeff < VP_CSC_Y2Y_COEF) {
+ tvout_err("invaild csc_coeff parameter\n");
+
+ return -1;
+ }
+
+ writel(S5P_PP_CSC_COEF(coeff),
+ vp_base + S5P_PP_CSC_Y2Y_COEF + csc_coeff*4);
+
+ return 0;
+}
+
+int s5p_vp_set_csc_coef_default(enum s5p_vp_csc_type csc_type)
+{
+ switch (csc_type) {
+ case VP_CSC_SD_HD:
+ writel(S5P_PP_Y2Y_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_Y2Y_COEF);
+ writel(S5P_PP_CB2Y_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_CB2Y_COEF);
+ writel(S5P_PP_CR2Y_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_CR2Y_COEF);
+ writel(S5P_PP_Y2CB_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_Y2CB_COEF);
+ writel(S5P_PP_CB2CB_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_CB2CB_COEF);
+ writel(S5P_PP_CR2CB_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_CR2CB_COEF);
+ writel(S5P_PP_Y2CR_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_Y2CR_COEF);
+ writel(S5P_PP_CB2CR_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_CB2CR_COEF);
+ writel(S5P_PP_CR2CR_COEF_601_TO_709,
+ vp_base + S5P_PP_CSC_CR2CR_COEF);
+ break;
+
+ case VP_CSC_HD_SD:
+ writel(S5P_PP_Y2Y_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_Y2Y_COEF);
+ writel(S5P_PP_CB2Y_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_CB2Y_COEF);
+ writel(S5P_PP_CR2Y_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_CR2Y_COEF);
+ writel(S5P_PP_Y2CB_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_Y2CB_COEF);
+ writel(S5P_PP_CB2CB_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_CB2CB_COEF);
+ writel(S5P_PP_CR2CB_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_CR2CB_COEF);
+ writel(S5P_PP_Y2CR_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_Y2CR_COEF);
+ writel(S5P_PP_CB2CR_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_CB2CR_COEF);
+ writel(S5P_PP_CR2CR_COEF_709_TO_601,
+ vp_base + S5P_PP_CSC_CR2CR_COEF);
+ break;
+
+ default:
+ tvout_err("invalid csc_type parameter = %d\n", csc_type);
+ return -1;
+ }
+
+ return 0;
+}
+
+int s5p_vp_update(void)
+{
+ writel(readl(vp_base + S5P_VP_SHADOW_UPDATE) |
+ S5P_VP_SHADOW_UPDATE_ENABLE,
+ vp_base + S5P_VP_SHADOW_UPDATE);
+
+ return 0;
+}
+
+int s5p_vp_get_update_status(void)
+{
+ if (readl(vp_base + S5P_VP_SHADOW_UPDATE) & S5P_VP_SHADOW_UPDATE_ENABLE)
+ return 0;
+ else
+ return -1;
+}
+
+void s5p_vp_sw_reset(void)
+{
+ writel((readl(vp_base + S5P_VP_SRESET) | S5P_VP_SRESET_PROCESSING),
+ vp_base + S5P_VP_SRESET);
+
+ while (readl(vp_base + S5P_VP_SRESET) & S5P_VP_SRESET_PROCESSING)
+ msleep(10);
+}
+
+int s5p_vp_start(void)
+{
+ writel(S5P_VP_ENABLE_ON, vp_base + S5P_VP_ENABLE);
+
+ s5p_vp_update();
+
+ return 0;
+}
+
+int s5p_vp_stop(void)
+{
+ u32 val;
+
+ val = readl(vp_base + S5P_VP_ENABLE);
+ val &= ~S5P_VP_ENABLE_ON;
+ writel(val, vp_base + S5P_VP_ENABLE);
+
+ do {
+ val = readl(vp_base + S5P_VP_ENABLE);
+ } while (!(val & S5P_VP_ENABLE_OPERATING));
+
+ return 0;
+}
+
+void s5p_vp_init(void __iomem *addr)
+{
+ vp_base = addr;
+}
diff --git a/drivers/media/video/samsung/tvout/s5p_mixer_ctrl.c b/drivers/media/video/samsung/tvout/s5p_mixer_ctrl.c
new file mode 100644
index 0000000..a0169cb
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_mixer_ctrl.c
@@ -0,0 +1,1146 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_mixer_ctrl.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Functions of mixer ctrl class for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+#include <plat/clock.h>
+
+#include "hw_if/hw_if.h"
+#include "s5p_tvout_ctrl.h"
+
+enum {
+ ACLK = 0,
+ MUX,
+ NO_OF_CLK
+};
+
+struct s5p_bg_color {
+ u32 color_y;
+ u32 color_cb;
+ u32 color_cr;
+};
+
+struct s5p_mixer_video_layer_info {
+ bool layer_blend;
+ u32 alpha;
+ u32 priority;
+ u32 y_min;
+ u32 y_max;
+ u32 c_min;
+ u32 c_max;
+
+ bool use_video_layer;
+};
+
+struct s5p_mixer_grp_layer_info {
+ bool pixel_blend;
+ bool layer_blend;
+ u32 alpha;
+
+ bool chroma_enable;
+ u32 chroma_key;
+
+ bool pre_mul_mode;
+
+ u32 src_x;
+ u32 src_y;
+ u32 dst_x;
+ u32 dst_y;
+ u32 width;
+ u32 height;
+ dma_addr_t fb_addr;
+
+ bool use_grp_layer;
+
+ u32 priority;
+ enum s5p_mixer_color_fmt format;
+
+ enum s5ptvfb_ver_scaling_t ver;
+ enum s5ptvfb_hor_scaling_t hor;
+};
+
+struct s5p_mixer_ctrl_private_data {
+ char *pow_name;
+ struct s5p_tvout_clk_info clk[NO_OF_CLK];
+ struct irq_info irq;
+ struct reg_mem_info reg_mem;
+
+ enum s5p_mixer_burst_mode burst;
+ enum s5p_tvout_endian endian;
+ struct s5p_bg_color bg_color[3];
+
+ struct s5p_mixer_video_layer_info v_layer;
+ struct s5p_mixer_grp_layer_info layer[S5PTV_FB_CNT];
+
+ bool running;
+ bool vsync_interrupt_enable;
+};
+
+static struct s5p_mixer_ctrl_private_data s5p_mixer_ctrl_private = {
+ .pow_name = "mixer_pd",
+ .clk[ACLK] = {
+ .name = "mixer",
+ .ptr = NULL
+ },
+ .clk[MUX] = {
+ .name = "sclk_mixer",
+ .ptr = NULL
+ },
+ .irq = {
+ .name = "s5p-mixer",
+ .handler = s5p_mixer_irq,
+ .no = -1
+ },
+ .reg_mem = {
+ .name = "s5p-mixer",
+ .res = NULL,
+ .base = NULL
+ },
+
+ .burst = MIXER_BURST_16,
+ .endian = TVOUT_LITTLE_ENDIAN,
+ .bg_color[0].color_y = 16,
+ .bg_color[0].color_cb = 128,
+ .bg_color[0].color_cr = 128,
+ .bg_color[1].color_y = 16,
+ .bg_color[1].color_cb = 128,
+ .bg_color[1].color_cr = 128,
+ .bg_color[2].color_y = 16,
+ .bg_color[2].color_cb = 128,
+ .bg_color[2].color_cr = 128,
+
+ .v_layer = {
+ .layer_blend = false,
+ .alpha = 0xff,
+ .priority = 10,
+ .y_min = 0x10,
+ .y_max = 0xeb,
+ .c_min = 0x10,
+ .c_max = 0xf0,
+ },
+ .layer[MIXER_GPR0_LAYER] = {
+ .pixel_blend = false,
+ .layer_blend = false,
+ .alpha = 0xff,
+ .chroma_enable = false,
+ .chroma_key = 0x0,
+ .pre_mul_mode = false,
+ .src_x = 0,
+ .src_y = 0,
+ .dst_x = 0,
+ .dst_y = 0,
+ .width = 0,
+ .height = 0,
+ .priority = 10,
+ .format = MIXER_RGB8888,
+ .ver = VERTICAL_X1,
+ .hor = HORIZONTAL_X1
+ },
+ .layer[MIXER_GPR1_LAYER] = {
+ .pixel_blend = false,
+ .layer_blend = false,
+ .alpha = 0xff,
+ .chroma_enable = false,
+ .chroma_key = 0x0,
+ .pre_mul_mode = false,
+ .src_x = 0,
+ .src_y = 0,
+ .dst_x = 0,
+ .dst_y = 0,
+ .width = 0,
+ .height = 0,
+ .priority = 10,
+ .format = MIXER_RGB8888,
+ .ver = VERTICAL_X1,
+ .hor = HORIZONTAL_X1
+ },
+
+ .running = false,
+ .vsync_interrupt_enable = false,
+};
+
+static int s5p_mixer_ctrl_set_reg(enum s5p_mixer_layer layer)
+{
+ bool layer_blend;
+ u32 alpha;
+ u32 priority;
+ struct s5ptvfb_user_scaling scaling;
+
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ layer_blend = s5p_mixer_ctrl_private.v_layer.layer_blend;
+ alpha = s5p_mixer_ctrl_private.v_layer.alpha;
+ priority = s5p_mixer_ctrl_private.v_layer.priority;
+ break;
+ case MIXER_GPR0_LAYER:
+ case MIXER_GPR1_LAYER:
+ layer_blend = s5p_mixer_ctrl_private.layer[layer].layer_blend;
+ alpha = s5p_mixer_ctrl_private.layer[layer].alpha;
+ priority = s5p_mixer_ctrl_private.layer[layer].priority;
+
+ s5p_mixer_set_pre_mul_mode(layer,
+ s5p_mixer_ctrl_private.layer[layer].pre_mul_mode);
+ s5p_mixer_set_chroma_key(layer,
+ s5p_mixer_ctrl_private.layer[layer].chroma_enable,
+ s5p_mixer_ctrl_private.layer[layer].chroma_key);
+ s5p_mixer_set_grp_layer_dst_pos(layer,
+ s5p_mixer_ctrl_private.layer[layer].dst_x,
+ s5p_mixer_ctrl_private.layer[layer].dst_y);
+
+ scaling.ver = s5p_mixer_ctrl_private.layer[layer].ver;
+ scaling.hor = s5p_mixer_ctrl_private.layer[layer].hor;
+ s5p_mixer_scaling(layer, scaling);
+ s5p_mixer_set_grp_base_address(layer,
+ s5p_mixer_ctrl_private.layer[layer].fb_addr);
+
+ s5p_mixer_set_color_format(layer,
+ s5p_mixer_ctrl_private.layer[layer].format);
+
+ s5p_mixer_set_grp_layer_src_pos(layer,
+ s5p_mixer_ctrl_private.layer[layer].src_x,
+ s5p_mixer_ctrl_private.layer[layer].src_y,
+ s5p_mixer_ctrl_private.layer[layer].width,
+ s5p_mixer_ctrl_private.layer[layer].width,
+ s5p_mixer_ctrl_private.layer[layer].height);
+
+ s5p_mixer_set_pixel_blend(layer,
+ s5p_mixer_ctrl_private.layer[layer].pixel_blend);
+ break;
+ default:
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ s5p_mixer_set_layer_blend(layer, layer_blend);
+ s5p_mixer_set_alpha(layer, alpha);
+ s5p_mixer_set_priority(layer, priority);
+
+ return 0;
+}
+
+static void s5p_mixer_ctrl_clock(bool on)
+{
+ /* power control function is not implemented yet */
+ if (on) {
+ clk_enable(s5p_mixer_ctrl_private.clk[MUX].ptr);
+#ifdef CONFIG_ARCH_EXYNOS4
+ s5p_tvout_pm_runtime_get();
+#endif
+
+ clk_enable(s5p_mixer_ctrl_private.clk[ACLK].ptr);
+
+ // Restore mixer_base address
+ s5p_mixer_init(s5p_mixer_ctrl_private.reg_mem.base);
+ } else {
+ clk_disable(s5p_mixer_ctrl_private.clk[ACLK].ptr);
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ s5p_tvout_pm_runtime_put();
+#endif
+
+ clk_disable(s5p_mixer_ctrl_private.clk[MUX].ptr);
+
+ // Set mixer_base address to NULL
+ s5p_mixer_init(NULL);
+ }
+}
+
+void s5p_mixer_ctrl_init_fb_addr_phy(enum s5p_mixer_layer layer,
+ dma_addr_t fb_addr)
+{
+ s5p_mixer_ctrl_private.layer[layer].fb_addr = fb_addr;
+}
+
+void s5p_mixer_ctrl_init_grp_layer(enum s5p_mixer_layer layer)
+{
+ struct s5ptvfb_user_scaling scaling;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return;
+ } else
+#endif
+ {
+ if (s5p_mixer_ctrl_private.running) {
+ s5p_mixer_set_priority(layer,
+ s5p_mixer_ctrl_private.layer[layer].priority);
+ s5p_mixer_set_pre_mul_mode(layer,
+ s5p_mixer_ctrl_private.layer[layer].pre_mul_mode);
+ s5p_mixer_set_chroma_key(layer,
+ s5p_mixer_ctrl_private.layer[layer].chroma_enable,
+ s5p_mixer_ctrl_private.layer[layer].chroma_key);
+ s5p_mixer_set_layer_blend(layer,
+ s5p_mixer_ctrl_private.layer[layer].layer_blend);
+ s5p_mixer_set_alpha(layer,
+ s5p_mixer_ctrl_private.layer[layer].alpha);
+ s5p_mixer_set_grp_layer_dst_pos(layer,
+ s5p_mixer_ctrl_private.layer[layer].dst_x,
+ s5p_mixer_ctrl_private.layer[layer].dst_y);
+
+ scaling.ver = s5p_mixer_ctrl_private.layer[layer].ver;
+ scaling.hor = s5p_mixer_ctrl_private.layer[layer].hor;
+ s5p_mixer_scaling(layer, scaling);
+ s5p_mixer_set_grp_base_address(layer,
+ s5p_mixer_ctrl_private.layer[layer].fb_addr);
+ }
+ }
+}
+
+int s5p_mixer_ctrl_set_pixel_format(enum s5p_mixer_layer layer, u32 bpp, u32 trans_len)
+{
+ enum s5p_mixer_color_fmt format;
+
+ switch (bpp) {
+ case 16:
+ if (trans_len == 1)
+ format = MIXER_RGB1555;
+ else if (trans_len == 4)
+ format = MIXER_RGB4444;
+ else
+ format = MIXER_RGB565;
+ break;
+ case 32:
+ format = MIXER_RGB8888;
+ break;
+ default:
+ tvout_err("invalid bits per pixel\n");
+ return -1;
+ }
+
+ s5p_mixer_ctrl_private.layer[layer].format = format;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ } else
+#endif
+ {
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_color_format(layer, format);
+
+ }
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_enable_layer(enum s5p_mixer_layer layer)
+{
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ s5p_mixer_ctrl_private.v_layer.use_video_layer = true;
+ break;
+ case MIXER_GPR0_LAYER:
+ case MIXER_GPR1_LAYER:
+ s5p_mixer_ctrl_private.layer[layer].use_grp_layer = true;
+ break;
+ default:
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+
+ if (s5p_mixer_ctrl_private.running) {
+ s5p_mixer_ctrl_set_reg(layer);
+
+ s5p_mixer_set_show(layer, true);
+ }
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_disable_layer(enum s5p_mixer_layer layer)
+{
+ bool use_vid, use_grp0, use_grp1;
+
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ s5p_mixer_ctrl_private.v_layer.use_video_layer = false;
+ break;
+ case MIXER_GPR0_LAYER:
+ case MIXER_GPR1_LAYER:
+ s5p_mixer_ctrl_private.layer[layer].use_grp_layer = false;
+ break;
+ default:
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+
+ use_vid = s5p_mixer_ctrl_private.v_layer.use_video_layer;
+ use_grp0 = s5p_mixer_ctrl_private.layer[MIXER_GPR0_LAYER].use_grp_layer;
+ use_grp1 = s5p_mixer_ctrl_private.layer[MIXER_GPR1_LAYER].use_grp_layer;
+
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_show(layer, false);
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_set_priority(enum s5p_mixer_layer layer, u32 prio)
+{
+ if (prio > 15) {
+ tvout_err("layer priority range : 0 - 15\n");
+ return -1;
+ }
+
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ s5p_mixer_ctrl_private.v_layer.priority = prio;
+ break;
+ case MIXER_GPR0_LAYER:
+ case MIXER_GPR1_LAYER:
+ s5p_mixer_ctrl_private.layer[layer].priority = prio;
+ break;
+ default:
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_priority(layer, prio);
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_set_dst_win_pos(enum s5p_mixer_layer layer,
+ int dst_x, int dst_y, u32 w, u32 h)
+{
+ u32 w_t, h_t;
+ enum s5p_tvout_disp_mode std;
+ enum s5p_tvout_o_mode inf;
+
+ if ((layer != MIXER_GPR0_LAYER) && (layer != MIXER_GPR1_LAYER)) {
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ s5p_tvif_ctrl_get_std_if(&std, &inf);
+ tvout_dbg("standard no = %d, output mode no = %d\n", std, inf);
+
+ /*
+ * When tvout resolution was overscanned, there is no
+ * adjust method in H/W. So, framebuffer should be resized.
+ * In this case - TV w/h is greater than FB w/h, grp layer's
+ * dst offset must be changed to fix tv screen.
+ */
+
+ switch (std) {
+ case TVOUT_NTSC_M:
+ case TVOUT_480P_60_16_9:
+ case TVOUT_480P_60_4_3:
+ case TVOUT_480P_59:
+ w_t = 720;
+ h_t = 480;
+ break;
+
+ case TVOUT_576P_50_16_9:
+ case TVOUT_576P_50_4_3:
+ w_t = 720;
+ h_t = 576;
+ break;
+
+ case TVOUT_720P_60:
+ case TVOUT_720P_59:
+ case TVOUT_720P_50:
+ w_t = 1280;
+ h_t = 720;
+ break;
+
+ case TVOUT_1080I_60:
+ case TVOUT_1080I_59:
+ case TVOUT_1080I_50:
+ case TVOUT_1080P_60:
+ case TVOUT_1080P_59:
+ case TVOUT_1080P_50:
+ case TVOUT_1080P_30:
+ w_t = 1920;
+ h_t = 1080;
+ break;
+
+#ifdef CONFIG_HDMI_14A_3D
+ case TVOUT_720P_60_SBS_HALF:
+ case TVOUT_720P_59_SBS_HALF:
+ case TVOUT_720P_50_TB:
+ w_t = 1280;
+ h_t = 720;
+ break;
+
+ case TVOUT_1080P_24_TB:
+ case TVOUT_1080P_23_TB:
+ w_t = 1920;
+ h_t = 1080;
+ break;
+
+#endif
+ default:
+ w_t = 0;
+ h_t = 0;
+ break;
+ }
+
+ if (dst_x < 0)
+ dst_x = 0;
+
+ if (dst_y < 0)
+ dst_y = 0;
+
+ if (dst_x + w > w_t)
+ dst_x = w_t - w;
+
+ if (dst_y + h > h_t)
+ dst_y = h_t - h;
+
+ tvout_dbg("destination coordinates : x = %d, y = %d\n",
+ dst_x, dst_y);
+ tvout_dbg("output device screen size : width = %d, height = %d",
+ w_t, h_t);
+
+ s5p_mixer_ctrl_private.layer[layer].dst_x = (u32)dst_x;
+ s5p_mixer_ctrl_private.layer[layer].dst_y = (u32)dst_y;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_grp_layer_dst_pos(layer, (u32)dst_x, (u32)dst_y);
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_set_src_win_pos(enum s5p_mixer_layer layer,
+ u32 src_x, u32 src_y, u32 w, u32 h)
+{
+ if ((layer != MIXER_GPR0_LAYER) && (layer != MIXER_GPR1_LAYER)) {
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ tvout_dbg("source coordinates : x = %d, y = %d\n", src_x, src_y);
+ tvout_dbg("source size : width = %d, height = %d\n", w, h);
+
+ s5p_mixer_ctrl_private.layer[layer].src_x = src_x;
+ s5p_mixer_ctrl_private.layer[layer].src_y = src_y;
+ s5p_mixer_ctrl_private.layer[layer].width = w;
+ s5p_mixer_ctrl_private.layer[layer].height = h;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ } else
+#endif
+ {
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_grp_layer_src_pos(layer, src_x, src_y, w, w, h);
+ }
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_set_buffer_address(enum s5p_mixer_layer layer,
+ dma_addr_t start_addr)
+{
+ if ((layer != MIXER_GPR0_LAYER) && (layer != MIXER_GPR1_LAYER)) {
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ tvout_dbg("TV frame buffer base address = 0x%x\n", start_addr);
+
+ s5p_mixer_ctrl_private.layer[layer].fb_addr = start_addr;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_grp_base_address(layer, start_addr);
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_set_chroma_key(enum s5p_mixer_layer layer,
+ struct s5ptvfb_chroma chroma)
+{
+ bool enabled = (chroma.enabled) ? true : false;
+
+ if ((layer != MIXER_GPR0_LAYER) && (layer != MIXER_GPR1_LAYER)) {
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ s5p_mixer_ctrl_private.layer[layer].chroma_enable = enabled;
+ s5p_mixer_ctrl_private.layer[layer].chroma_key = chroma.key;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_chroma_key(layer, enabled, chroma.key);
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_set_alpha(enum s5p_mixer_layer layer, u32 alpha)
+{
+ switch (layer) {
+ case MIXER_VIDEO_LAYER:
+ s5p_mixer_ctrl_private.v_layer.alpha = alpha;
+ break;
+ case MIXER_GPR0_LAYER:
+ case MIXER_GPR1_LAYER:
+ s5p_mixer_ctrl_private.layer[layer].alpha = alpha;
+ break;
+ default:
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ tvout_dbg("alpha value = 0x%x\n", alpha);
+
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_alpha(layer, alpha);
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_set_blend_mode(enum s5p_mixer_layer layer,
+ enum s5ptvfb_alpha_t mode)
+{
+ if ((layer != MIXER_VIDEO_LAYER) && (layer != MIXER_GPR0_LAYER) &&
+ (layer != MIXER_GPR1_LAYER)) {
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ if ((layer == MIXER_VIDEO_LAYER) && (mode == PIXEL_BLENDING)) {
+ tvout_err("video layer doesn't support pixel blending\n");
+ return -1;
+ }
+
+ switch (mode) {
+ case PIXEL_BLENDING:
+ tvout_dbg("pixel blending\n");
+ s5p_mixer_ctrl_private.layer[layer].pixel_blend = true;
+
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_pixel_blend(layer, true);
+ break;
+
+ case LAYER_BLENDING:
+ tvout_dbg("layer blending\n");
+ if (layer == MIXER_VIDEO_LAYER)
+ s5p_mixer_ctrl_private.v_layer.layer_blend = true;
+ else /* graphic layer */
+ s5p_mixer_ctrl_private.layer[layer].layer_blend = true;
+
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_layer_blend(layer, true);
+ break;
+
+ case NONE_BLENDING:
+ tvout_dbg("alpha blending off\n");
+ if (layer == MIXER_VIDEO_LAYER) {
+ s5p_mixer_ctrl_private.v_layer.layer_blend = false;
+
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_layer_blend(layer, false);
+ } else { /* graphic layer */
+ s5p_mixer_ctrl_private.layer[layer].pixel_blend = false;
+ s5p_mixer_ctrl_private.layer[layer].layer_blend = false;
+
+ if (s5p_mixer_ctrl_private.running) {
+ s5p_mixer_set_layer_blend(layer, false);
+ s5p_mixer_set_pixel_blend(layer, false);
+ }
+ }
+ break;
+
+ default:
+ tvout_err("invalid blending mode\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_set_alpha_blending(enum s5p_mixer_layer layer,
+ enum s5ptvfb_alpha_t blend_mode, unsigned int alpha)
+{
+ if ((layer != MIXER_GPR0_LAYER) && (layer != MIXER_GPR1_LAYER)) {
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ switch (blend_mode) {
+ case PIXEL_BLENDING:
+ tvout_dbg("pixel blending\n");
+ s5p_mixer_ctrl_private.layer[layer].pixel_blend = true;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_pixel_blend(layer, true);
+ break;
+
+ case LAYER_BLENDING:
+ tvout_dbg("layer blending : alpha value = 0x%x\n", alpha);
+ s5p_mixer_ctrl_private.layer[layer].layer_blend = true;
+ s5p_mixer_ctrl_private.layer[layer].alpha = alpha;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+ if (s5p_mixer_ctrl_private.running) {
+ s5p_mixer_set_layer_blend(layer, true);
+ s5p_mixer_set_alpha(layer, alpha);
+ }
+ break;
+
+ case NONE_BLENDING:
+ tvout_dbg("alpha blending off\n");
+ s5p_mixer_ctrl_private.layer[layer].pixel_blend = false;
+ s5p_mixer_ctrl_private.layer[layer].layer_blend = false;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+ if (s5p_mixer_ctrl_private.running) {
+ s5p_mixer_set_pixel_blend(layer, false);
+ s5p_mixer_set_layer_blend(layer, false);
+ }
+ break;
+
+ default:
+ tvout_err("invalid blending mode\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_scaling(enum s5p_mixer_layer layer,
+ struct s5ptvfb_user_scaling scaling)
+{
+ if ((layer != MIXER_GPR0_LAYER) && (layer != MIXER_GPR1_LAYER)) {
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ if ((scaling.ver != VERTICAL_X1) && (scaling.ver != VERTICAL_X2)) {
+ tvout_err("invalid vertical size\n");
+ return -1;
+ }
+
+ if ((scaling.hor != HORIZONTAL_X1) && (scaling.hor != HORIZONTAL_X2)) {
+ tvout_err("invalid horizontal size\n");
+ return -1;
+ }
+
+ s5p_mixer_ctrl_private.layer[layer].ver = scaling.ver;
+ s5p_mixer_ctrl_private.layer[layer].hor = scaling.hor;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return 0;
+ }
+#endif
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_scaling(layer, scaling);
+
+ return 0;
+}
+
+int s5p_mixer_ctrl_mux_clk(struct clk *ptr)
+{
+ if (clk_set_parent(s5p_mixer_ctrl_private.clk[MUX].ptr, ptr)) {
+ tvout_err("unable to set parent %s of clock %s.\n",
+ ptr->name, s5p_mixer_ctrl_private.clk[MUX].ptr->name);
+ return -1;
+ }
+
+ return 0;
+}
+
+void s5p_mixer_ctrl_set_int_enable(bool en)
+{
+ tvout_dbg("mixer layers' underflow interrupts are %s, running %d\n",
+ en ? "enabled" : "disabled",
+ s5p_mixer_ctrl_private.running);
+
+ if (s5p_mixer_ctrl_private.running) {
+ s5p_mixer_set_underflow_int_enable(MIXER_VIDEO_LAYER, en);
+ s5p_mixer_set_underflow_int_enable(MIXER_GPR0_LAYER, en);
+ s5p_mixer_set_underflow_int_enable(MIXER_GPR1_LAYER, en);
+ }
+}
+
+void s5p_mixer_ctrl_set_vsync_interrupt(bool en)
+{
+ s5p_mixer_ctrl_private.vsync_interrupt_enable = en;
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_set_vsync_interrupt(en);
+}
+
+bool s5p_mixer_ctrl_get_vsync_interrupt()
+{
+ return s5p_mixer_ctrl_private.vsync_interrupt_enable;
+}
+
+void s5p_mixer_ctrl_clear_pend_all(void)
+{
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_clear_pend_all();
+}
+
+void s5p_mixer_ctrl_stop(void)
+{
+ int i;
+
+ tvout_dbg("running(%d)\n", s5p_mixer_ctrl_private.running);
+ if (s5p_mixer_ctrl_private.running) {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ } else
+#endif
+ {
+ s5p_mixer_set_vsync_interrupt(false);
+
+ for (i = 0; i < S5PTV_VP_BUFF_CNT -1; i++)
+ s5ptv_vp_buff.copy_buff_idxs[i] = i;
+
+ s5ptv_vp_buff.curr_copy_idx = 0;
+ s5ptv_vp_buff.vp_access_buff_idx = S5PTV_VP_BUFF_CNT - 1;
+
+ s5p_mixer_stop();
+ s5p_mixer_ctrl_clock(0);
+ }
+ s5p_mixer_ctrl_private.running = false;
+ }
+}
+
+void s5p_mixer_ctrl_internal_start(void)
+{
+ tvout_dbg("running(%d)\n", s5p_mixer_ctrl_private.running);
+ if (s5p_mixer_ctrl_private.running)
+ s5p_mixer_start();
+}
+
+int s5p_mixer_ctrl_start(
+ enum s5p_tvout_disp_mode disp, enum s5p_tvout_o_mode out)
+{
+ int i;
+
+ int csc = MIXER_RGB601_16_235;
+ int csc_for_coeff = MIXER_RGB601_0_255;
+ enum s5p_mixer_burst_mode burst = s5p_mixer_ctrl_private.burst;
+ enum s5p_tvout_endian endian = s5p_mixer_ctrl_private.endian;
+ struct clk *sclk_mixer = s5p_mixer_ctrl_private.clk[MUX].ptr;
+ bool mixer_video_limiter = true;
+
+ /*
+ * Getting mega struct member variable will be replaced another tvout
+ * interface
+ */
+ struct s5p_tvout_status *st = &s5ptv_status;
+
+ tvout_dbg("running(%d)\n", s5p_mixer_ctrl_private.running);
+
+ switch (out) {
+ case TVOUT_COMPOSITE:
+ if (clk_set_parent(sclk_mixer, st->sclk_dac)) {
+ tvout_err("unable to set parent %s of clock %s.\n",
+ st->sclk_dac->name, sclk_mixer->name);
+ return -1;
+ }
+
+ if (!s5p_mixer_ctrl_private.running) {
+ s5p_mixer_ctrl_clock(true);
+ s5p_mixer_ctrl_private.running = true;
+ }
+
+ csc = MIXER_RGB601_0_255;
+ csc_for_coeff = MIXER_RGB601_0_255;
+ break;
+
+ case TVOUT_HDMI_RGB:
+ case TVOUT_HDMI:
+ case TVOUT_DVI:
+ if (clk_set_parent(sclk_mixer, st->sclk_hdmi)) {
+ tvout_err("unable to set parent %s of clock %s.\n",
+ st->sclk_hdmi->name, sclk_mixer->name);
+ return -1;
+ }
+
+ if (clk_set_parent(st->sclk_hdmi, st->sclk_hdmiphy)) {
+ tvout_err("unable to set parent %s of clock %s.\n",
+ st->sclk_hdmiphy->name, st->sclk_hdmi->name);
+ return -1;
+ }
+
+ if (!s5p_mixer_ctrl_private.running) {
+ s5p_mixer_ctrl_clock(true);
+ s5p_mixer_ctrl_private.running = true;
+ }
+
+ switch (disp) {
+
+ case TVOUT_480P_60_4_3:
+ if (s5p_tvif_get_q_range() || out == TVOUT_HDMI_RGB)
+ csc = MIXER_RGB601_0_255;
+ else
+ csc = MIXER_RGB601_16_235;
+ csc_for_coeff = MIXER_RGB601_0_255;
+ break;
+ case TVOUT_480P_60_16_9:
+ case TVOUT_480P_59:
+ case TVOUT_576P_50_16_9:
+ case TVOUT_576P_50_4_3:
+ if (s5p_tvif_get_q_range() && out != TVOUT_HDMI_RGB)
+ csc = MIXER_RGB601_0_255;
+ else
+ csc = MIXER_RGB601_16_235;
+ csc_for_coeff = MIXER_RGB601_0_255;
+ break;
+ case TVOUT_720P_60:
+ case TVOUT_720P_50:
+ case TVOUT_720P_59:
+ case TVOUT_1080I_60:
+ case TVOUT_1080I_59:
+ case TVOUT_1080I_50:
+ case TVOUT_1080P_60:
+ case TVOUT_1080P_30:
+ case TVOUT_1080P_59:
+ case TVOUT_1080P_50:
+ if (!s5p_tvif_get_q_range() || out == TVOUT_HDMI_RGB)
+ csc = MIXER_RGB709_16_235;
+ else
+ csc = MIXER_RGB709_0_255;
+ csc_for_coeff = MIXER_RGB709_0_255;
+ break;
+#ifdef CONFIG_HDMI_14A_3D
+ case TVOUT_720P_60_SBS_HALF:
+ case TVOUT_720P_59_SBS_HALF:
+ case TVOUT_720P_50_TB:
+ case TVOUT_1080P_24_TB:
+ case TVOUT_1080P_23_TB:
+ if (!s5p_tvif_get_q_range() || out == TVOUT_HDMI_RGB)
+ csc = MIXER_RGB709_16_235;
+ else
+ csc = MIXER_RGB709_0_255;
+ csc_for_coeff = MIXER_RGB709_0_255;
+ break;
+
+#endif
+ default:
+ break;
+ }
+ break;
+
+ default:
+ tvout_err("invalid tvout output mode = %d\n", out);
+ return -1;
+ }
+
+ tvout_dbg("%s burst mode\n", burst ? "16" : "8");
+ tvout_dbg("%s endian\n", endian ? "big" : "little");
+
+ if ((burst != MIXER_BURST_8) && (burst != MIXER_BURST_16)) {
+ tvout_err("invalid burst mode\n");
+ return -1;
+ }
+
+ if ((endian != TVOUT_BIG_ENDIAN) && (endian != TVOUT_LITTLE_ENDIAN)) {
+ tvout_err("invalid endian\n");
+ return -1;
+ }
+
+ s5p_mixer_init_status_reg(burst, endian);
+
+ tvout_dbg("tvout standard = 0x%X, output mode = %d\n", disp, out);
+ /* error handling will be implemented */
+ tvout_dbg(KERN_INFO "Color range mode set : %d\n",
+ s5p_tvif_get_q_range());
+ s5p_mixer_init_csc_coef_default(csc_for_coeff);
+ s5p_mixer_init_display_mode(disp, out, csc);
+
+ if (!s5p_tvif_get_q_range() || out == TVOUT_HDMI_RGB)
+ mixer_video_limiter = true;
+ else
+ mixer_video_limiter = false;
+
+ s5p_mixer_set_video_limiter(s5p_mixer_ctrl_private.v_layer.y_min,
+ s5p_mixer_ctrl_private.v_layer.y_max,
+ s5p_mixer_ctrl_private.v_layer.c_min,
+ s5p_mixer_ctrl_private.v_layer.c_max,
+ mixer_video_limiter);
+
+ for (i = MIXER_BG_COLOR_0; i <= MIXER_BG_COLOR_2; i++) {
+ s5p_mixer_set_bg_color(i,
+ s5p_mixer_ctrl_private.bg_color[i].color_y,
+ s5p_mixer_ctrl_private.bg_color[i].color_cb,
+ s5p_mixer_ctrl_private.bg_color[i].color_cr);
+ }
+
+ if (s5p_mixer_ctrl_private.v_layer.use_video_layer) {
+ s5p_mixer_ctrl_set_reg(MIXER_VIDEO_LAYER);
+ s5p_mixer_set_show(MIXER_VIDEO_LAYER, true);
+ }
+ if (s5p_mixer_ctrl_private.layer[MIXER_GPR0_LAYER].use_grp_layer) {
+ s5p_mixer_ctrl_set_reg(MIXER_GPR0_LAYER);
+ s5p_mixer_set_show(MIXER_GPR0_LAYER, true);
+ }
+ if (s5p_mixer_ctrl_private.layer[MIXER_GPR1_LAYER].use_grp_layer) {
+ s5p_mixer_ctrl_set_reg(MIXER_GPR1_LAYER);
+ s5p_mixer_set_show(MIXER_GPR1_LAYER, true);
+ }
+
+ s5p_mixer_start();
+ if (s5p_mixer_ctrl_private.vsync_interrupt_enable)
+ s5p_mixer_set_vsync_interrupt(true);
+
+ return 0;
+}
+
+wait_queue_head_t s5ptv_wq;
+
+int s5p_mixer_ctrl_constructor(struct platform_device *pdev)
+{
+ int ret = 0, i;
+
+ ret = s5p_tvout_map_resource_mem(
+ pdev,
+ s5p_mixer_ctrl_private.reg_mem.name,
+ &(s5p_mixer_ctrl_private.reg_mem.base),
+ &(s5p_mixer_ctrl_private.reg_mem.res));
+
+ if (ret)
+ goto err_on_res;
+
+ for (i = ACLK; i < NO_OF_CLK; i++) {
+ s5p_mixer_ctrl_private.clk[i].ptr =
+ clk_get(&pdev->dev, s5p_mixer_ctrl_private.clk[i].name);
+
+ if (IS_ERR(s5p_mixer_ctrl_private.clk[i].ptr)) {
+ printk(KERN_ERR "Failed to find clock %s\n",
+ s5p_mixer_ctrl_private.clk[i].name);
+ ret = -ENOENT;
+ goto err_on_clk;
+ }
+ }
+
+ s5p_mixer_ctrl_private.irq.no =
+ platform_get_irq_byname(pdev, s5p_mixer_ctrl_private.irq.name);
+
+ if (s5p_mixer_ctrl_private.irq.no < 0) {
+ tvout_err("Failed to call platform_get_irq_byname() for %s\n",
+ s5p_mixer_ctrl_private.irq.name);
+ ret = s5p_mixer_ctrl_private.irq.no;
+ goto err_on_irq;
+ }
+
+ /* Initializing wait queue for mixer vsync interrupt */
+ init_waitqueue_head(&s5ptv_wq);
+
+ s5p_mixer_init(s5p_mixer_ctrl_private.reg_mem.base);
+
+ ret = request_irq(
+ s5p_mixer_ctrl_private.irq.no,
+ s5p_mixer_ctrl_private.irq.handler,
+ IRQF_DISABLED,
+ s5p_mixer_ctrl_private.irq.name,
+ NULL);
+ if (ret) {
+ tvout_err("Failed to call request_irq() for %s\n",
+ s5p_mixer_ctrl_private.irq.name);
+ goto err_on_irq;
+ }
+
+ return 0;
+
+err_on_irq:
+err_on_clk:
+ iounmap(s5p_mixer_ctrl_private.reg_mem.base);
+ release_resource(s5p_mixer_ctrl_private.reg_mem.res);
+ kfree(s5p_mixer_ctrl_private.reg_mem.res);
+
+err_on_res:
+ return ret;
+}
+
+void s5p_mixer_ctrl_destructor(void)
+{
+ int i;
+ int irq_no = s5p_mixer_ctrl_private.irq.no;
+
+ if (irq_no >= 0)
+ free_irq(irq_no, NULL);
+
+ s5p_tvout_unmap_resource_mem(
+ s5p_mixer_ctrl_private.reg_mem.base,
+ s5p_mixer_ctrl_private.reg_mem.res);
+
+ for (i = ACLK; i < NO_OF_CLK; i++) {
+ if (s5p_mixer_ctrl_private.clk[i].ptr) {
+ clk_disable(s5p_mixer_ctrl_private.clk[i].ptr);
+ clk_put(s5p_mixer_ctrl_private.clk[i].ptr);
+ s5p_mixer_init(NULL);
+ }
+ }
+}
+
+bool pm_running;
+
+void s5p_mixer_ctrl_suspend(void)
+{
+ tvout_dbg("running(%d)\n", s5p_mixer_ctrl_private.running);
+ /* Mixer clock will be gated by tvif_ctrl */
+}
+
+void s5p_mixer_ctrl_resume(void)
+{
+ tvout_dbg("running(%d)\n", s5p_mixer_ctrl_private.running);
+ /* Mixer clock will be gated by tvif_ctrl */
+}
diff --git a/drivers/media/video/samsung/tvout/s5p_tvif_ctrl.c b/drivers/media/video/samsung/tvout/s5p_tvif_ctrl.c
new file mode 100644
index 0000000..93cb640
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvif_ctrl.c
@@ -0,0 +1,2952 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvif_ctrl.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Tvout ctrl class for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*****************************************************************************
+ * This file includes functions for ctrl classes of TVOUT driver.
+ * There are 3 ctrl classes. (tvif, hdmi, sdo)
+ * - tvif ctrl class: controls hdmi and sdo ctrl class.
+ * - hdmi ctrl class: contrls hdmi hardware by using hw_if/hdmi.c
+ * - sdo ctrl class: contrls sdo hardware by using hw_if/sdo.c
+ *
+ * +-----------------+
+ * | tvif ctrl class |
+ * +-----------------+
+ * | |
+ * +----------+ +----------+ ctrl class layer
+ * | |
+ * V V
+ * +-----------------+ +-----------------+
+ * | sdo ctrl class | | hdmi ctrl class |
+ * +-----------------+ +-----------------+
+ * | |
+ * ---------------+-------------------------+------------------------------
+ * V V
+ * +-----------------+ +-----------------+
+ * | hw_if/sdo.c | | hw_if/hdmi.c | hw_if layer
+ * +-----------------+ +-----------------+
+ * | |
+ * ---------------+-------------------------+------------------------------
+ * V V
+ * +-----------------+ +-----------------+
+ * | sdo hardware | | hdmi hardware | Hardware
+ * +-----------------+ +-----------------+
+ *
+ ****************************************************************************/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include <plat/clock.h>
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+#include <mach/dev.h>
+#endif
+
+#include "s5p_tvout_common_lib.h"
+#include "hw_if/hw_if.h"
+#include "s5p_tvout_ctrl.h"
+
+#ifdef CONFIG_HDMI_14A_3D
+static struct s5p_hdmi_v_format s5p_hdmi_v_fmt[] = {
+ [v720x480p_60Hz] = {
+ .frame = {
+ .vH_Line = 0x035a,
+ .vV_Line = 0x020d,
+ .vH_SYNC_START = 0x000e,
+ .vH_SYNC_END = 0x004c,
+ .vV1_Blank = 0x002d,
+ .vV2_Blank = 0x020d,
+ .vHBlank = 0x008a,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x9,
+ .vVSYNC_LINE_BEF_2 = 0x000f,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 1,
+ .Vsync_polarity = 1,
+ .interlaced = 0,
+ .vAVI_VIC = 2,
+ .vAVI_VIC_16_9 = 3,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_27_027,
+ },
+ .tg_H_FSZ = 0x35a,
+ .tg_HACT_ST = 0x8a,
+ .tg_HACT_SZ = 0x2d0,
+ .tg_V_FSZ = 0x20d,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x2d,
+ .tg_VACT_SZ = 0x1e0,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_60Hz] = {
+ .frame = {
+ .vH_Line = 0x0672,
+ .vV_Line = 0x02ee,
+ .vH_SYNC_START = 0x006c,
+ .vH_SYNC_END = 0x0094,
+ .vV1_Blank = 0x001e,
+ .vV2_Blank = 0x02ee,
+ .vHBlank = 0x0172,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x5,
+ .vVSYNC_LINE_BEF_2 = 0x000a,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 4,
+ .vAVI_VIC_16_9 = 4,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0x672,
+ .tg_HACT_ST = 0x172,
+ .tg_HACT_SZ = 0x500,
+ .tg_V_FSZ = 0x2ee,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x1e,
+ .tg_VACT_SZ = 0x2d0,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080i_60Hz] = {
+ .frame = {
+ .vH_Line = 0x0898,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x0056,
+ .vH_SYNC_END = 0x0082,
+ .vV1_Blank = 0x0016,
+ .vV2_Blank = 0x0232,
+ .vHBlank = 0x0118,
+ .VBLANK_F0 = 0x0249,
+ .VBLANK_F1 = 0x0465,
+ .vVSYNC_LINE_BEF_1 = 0x2,
+ .vVSYNC_LINE_BEF_2 = 0x0007,
+ .vVSYNC_LINE_AFT_1 = 0x0234,
+ .vVSYNC_LINE_AFT_2 = 0x0239,
+ .vVSYNC_LINE_AFT_PXL_1 = 0x04a4,
+ .vVSYNC_LINE_AFT_PXL_2 = 0x04a4,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 1,
+ .vAVI_VIC = 5,
+ .vAVI_VIC_16_9 = 5,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0x898,
+ .tg_HACT_ST = 0x118,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x16,
+ .tg_VACT_SZ = 0x21c,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x249,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x233,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_60Hz] = {
+ .frame = {
+ .vH_Line = 0x0898,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x0056,
+ .vH_SYNC_END = 0x0082,
+ .vV1_Blank = 0x002d,
+ .vV2_Blank = 0x0465,
+ .vHBlank = 0x0118,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x4,
+ .vVSYNC_LINE_BEF_2 = 0x0009,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 16,
+ .vAVI_VIC_16_9 = 16,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_148_500,
+ },
+ .tg_H_FSZ = 0x898,
+ .tg_HACT_ST = 0x118,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x2d,
+ .tg_VACT_SZ = 0x438,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v720x576p_50Hz] = {
+ .frame = {
+ .vH_Line = 0x0360,
+ .vV_Line = 0x0271,
+ .vH_SYNC_START = 0x000a,
+ .vH_SYNC_END = 0x004a,
+ .vV1_Blank = 0x0031,
+ .vV2_Blank = 0x0271,
+ .vHBlank = 0x0090,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x5,
+ .vVSYNC_LINE_BEF_2 = 0x000a,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 1,
+ .Vsync_polarity = 1,
+ .interlaced = 0,
+ .vAVI_VIC = 17,
+ .vAVI_VIC_16_9 = 18,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_27,
+ },
+ .tg_H_FSZ = 0x360,
+ .tg_HACT_ST = 0x90,
+ .tg_HACT_SZ = 0x2d0,
+ .tg_V_FSZ = 0x271,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x31,
+ .tg_VACT_SZ = 0x240,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_50Hz] = {
+ .frame = {
+ .vH_Line = 0x07BC,
+ .vV_Line = 0x02EE,
+ .vH_SYNC_START = 0x01b6,
+ .vH_SYNC_END = 0x01de,
+ .vV1_Blank = 0x001E,
+ .vV2_Blank = 0x02EE,
+ .vHBlank = 0x02BC,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x5,
+ .vVSYNC_LINE_BEF_2 = 0x000a,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 19,
+ .vAVI_VIC_16_9 = 19,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0x7bc,
+ .tg_HACT_ST = 0x2bc,
+ .tg_HACT_SZ = 0x500,
+ .tg_V_FSZ = 0x2ee,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x1e,
+ .tg_VACT_SZ = 0x2d0,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080i_50Hz] = {
+ .frame = {
+ .vH_Line = 0x0A50,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x020e,
+ .vH_SYNC_END = 0x023a,
+ .vV1_Blank = 0x0016,
+ .vV2_Blank = 0x0232,
+ .vHBlank = 0x02D0,
+ .VBLANK_F0 = 0x0249,
+ .VBLANK_F1 = 0x0465,
+ .vVSYNC_LINE_BEF_1 = 0x2,
+ .vVSYNC_LINE_BEF_2 = 0x0007,
+ .vVSYNC_LINE_AFT_1 = 0x0234,
+ .vVSYNC_LINE_AFT_2 = 0x0239,
+ .vVSYNC_LINE_AFT_PXL_1 = 0x0738,
+ .vVSYNC_LINE_AFT_PXL_2 = 0x0738,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 1,
+ .vAVI_VIC = 20,
+ .vAVI_VIC_16_9 = 20,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0xa50,
+ .tg_HACT_ST = 0x2d0,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x16,
+ .tg_VACT_SZ = 0x21c,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x249,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x233,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_50Hz] = {
+ .frame = {
+ .vH_Line = 0x0A50,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x020e,
+ .vH_SYNC_END = 0x023a,
+ .vV1_Blank = 0x002D,
+ .vV2_Blank = 0x0465,
+ .vHBlank = 0x02D0,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x4,
+ .vVSYNC_LINE_BEF_2 = 0x0009,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 31,
+ .vAVI_VIC_16_9 = 31,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_148_500,
+ },
+ .tg_H_FSZ = 0xa50,
+ .tg_HACT_ST = 0x2d0,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x2d,
+ .tg_VACT_SZ = 0x438,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_30Hz] = {
+ .frame = {
+ .vH_Line = 0x0898,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x056,
+ .vH_SYNC_END = 0x082,
+ .vV1_Blank = 0x002D,
+ .vV2_Blank = 0x0465,
+ .vHBlank = 0x0118,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x4,
+ .vVSYNC_LINE_BEF_2 = 0x0009,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 34,
+ .vAVI_VIC_16_9 = 34,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_176,
+ },
+ .tg_H_FSZ = 0x898,
+ .tg_HACT_ST = 0x118,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x2d,
+ .tg_VACT_SZ = 0x438,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v720x480p_59Hz] = {
+ .frame = {
+ .vH_Line = 0x035a,
+ .vV_Line = 0x020d,
+ .vH_SYNC_START = 0x000e,
+ .vH_SYNC_END = 0x004c,
+ .vV1_Blank = 0x002D,
+ .vV2_Blank = 0x020d,
+ .vHBlank = 0x008a,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x9,
+ .vVSYNC_LINE_BEF_2 = 0x000f,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 1,
+ .Vsync_polarity = 1,
+ .interlaced = 0,
+ .vAVI_VIC = 2,
+ .vAVI_VIC_16_9 = 3,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_27,
+ },
+ .tg_H_FSZ = 0x35a,
+ .tg_HACT_ST = 0x8a,
+ .tg_HACT_SZ = 0x2d0,
+ .tg_V_FSZ = 0x20d,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x2d,
+ .tg_VACT_SZ = 0x1e0,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_59Hz] = {
+ .frame = {
+ .vH_Line = 0x0672,
+ .vV_Line = 0x02ee,
+ .vH_SYNC_START = 0x006c,
+ .vH_SYNC_END = 0x0094,
+ .vV1_Blank = 0x001e,
+ .vV2_Blank = 0x02ee,
+ .vHBlank = 0x0172,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x5,
+ .vVSYNC_LINE_BEF_2 = 0x000a,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 4,
+ .vAVI_VIC_16_9 = 4,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_176,
+ },
+ .tg_H_FSZ = 0x672,
+ .tg_HACT_ST = 0x172,
+ .tg_HACT_SZ = 0x500,
+ .tg_V_FSZ = 0x2ee,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x1e,
+ .tg_VACT_SZ = 0x2d0,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080i_59Hz] = {
+ .frame = {
+ .vH_Line = 0x0898,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x0056,
+ .vH_SYNC_END = 0x0082,
+ .vV1_Blank = 0x0016,
+ .vV2_Blank = 0x0232,
+ .vHBlank = 0x0118,
+ .VBLANK_F0 = 0x0249,
+ .VBLANK_F1 = 0x0465,
+ .vVSYNC_LINE_BEF_1 = 0x2,
+ .vVSYNC_LINE_BEF_2 = 0x0007,
+ .vVSYNC_LINE_AFT_1 = 0x0234,
+ .vVSYNC_LINE_AFT_2 = 0x0239,
+ .vVSYNC_LINE_AFT_PXL_1 = 0x04a4,
+ .vVSYNC_LINE_AFT_PXL_2 = 0x04a4,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 1,
+ .vAVI_VIC = 5,
+ .vAVI_VIC_16_9 = 5,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_176,
+ },
+ .tg_H_FSZ = 0x898,
+ .tg_HACT_ST = 0x118,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x16,
+ .tg_VACT_SZ = 0x21c,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x249,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x233,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_59Hz] = {
+ .frame = {
+ .vH_Line = 0x0898,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x0056,
+ .vH_SYNC_END = 0x0082,
+ .vV1_Blank = 0x002d,
+ .vV2_Blank = 0x0465,
+ .vHBlank = 0x0118,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x4,
+ .vVSYNC_LINE_BEF_2 = 0x0009,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 16,
+ .vAVI_VIC_16_9 = 16,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_148_352,
+ },
+ .tg_H_FSZ = 0x898,
+ .tg_HACT_ST = 0x118,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x2d,
+ .tg_VACT_SZ = 0x438,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_60Hz_SBS_HALF] = {
+ .frame = {
+ .vH_Line = 0x0672,
+ .vV_Line = 0x02ee,
+ .vH_SYNC_START = 0x006c,
+ .vH_SYNC_END = 0x0094,
+ .vV1_Blank = 0x001e,
+ .vV2_Blank = 0x02ee,
+ .vHBlank = 0x0172,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x5,
+ .vVSYNC_LINE_BEF_2 = 0x000a,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 4,
+ .vAVI_VIC_16_9 = 4,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0x672,
+ .tg_HACT_ST = 0x172,
+ .tg_HACT_SZ = 0x500,
+ .tg_V_FSZ = 0x2ee,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x1e,
+ .tg_VACT_SZ = 0x2d0,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x30c,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_59Hz_SBS_HALF] = {
+ .frame = {
+ .vH_Line = 0x0672,
+ .vV_Line = 0x02ee,
+ .vH_SYNC_START = 0x006c,
+ .vH_SYNC_END = 0x0094,
+ .vV1_Blank = 0x001e,
+ .vV2_Blank = 0x02ee,
+ .vHBlank = 0x0172,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x5,
+ .vVSYNC_LINE_BEF_2 = 0x000a,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 4,
+ .vAVI_VIC_16_9 = 4,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0x672,
+ .tg_HACT_ST = 0x172,
+ .tg_HACT_SZ = 0x500,
+ .tg_V_FSZ = 0x2ee,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x1e,
+ .tg_VACT_SZ = 0x2d0,
+ .tg_FIELD_CHG = 0x0,
+ .tg_VACT_ST2 = 0x30c,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_50Hz_TB] = {
+ .frame = {
+ .vH_Line = 0x07bc,
+ .vV_Line = 0x02ee,
+ .vH_SYNC_START = 0x01b6,
+ .vH_SYNC_END = 0x01de,
+ .vV1_Blank = 0x001e,
+ .vV2_Blank = 0x02ee,
+ .vHBlank = 0x02bc,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x5,
+ .vVSYNC_LINE_BEF_2 = 0x000a,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 19,
+ .vAVI_VIC_16_9 = 19,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0x7bc,
+ .tg_HACT_ST = 0x2bc,
+ .tg_HACT_SZ = 0x500,
+ .tg_V_FSZ = 0x2ee,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x1e,
+ .tg_VACT_SZ = 0x2d0,
+ .tg_FIELD_CHG = 0x0,
+ .tg_VACT_ST2 = 0x30c,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_24Hz_TB] = {
+ .frame = {
+ .vH_Line = 0x0abe,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x027c,
+ .vH_SYNC_END = 0x02a8,
+ .vV1_Blank = 0x002d,
+ .vV2_Blank = 0x0465,
+ .vHBlank = 0x033e,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x4,
+ .vVSYNC_LINE_BEF_2 = 0x0009,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 32,
+ .vAVI_VIC_16_9 = 32,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0xabe,
+ .tg_HACT_ST = 0x33e,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x2d,
+ .tg_VACT_SZ = 0x438,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_23Hz_TB] = {
+ .frame = {
+ .vH_Line = 0x0abe,
+ .vV_Line = 0x0465,
+ .vH_SYNC_START = 0x027c,
+ .vH_SYNC_END = 0x02a8,
+ .vV1_Blank = 0x002d,
+ .vV2_Blank = 0x0465,
+ .vHBlank = 0x033e,
+ .VBLANK_F0 = 0xffff,
+ .VBLANK_F1 = 0xffff,
+ .vVSYNC_LINE_BEF_1 = 0x4,
+ .vVSYNC_LINE_BEF_2 = 0x0009,
+ .vVSYNC_LINE_AFT_1 = 0xffff,
+ .vVSYNC_LINE_AFT_2 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_1 = 0xffff,
+ .vVSYNC_LINE_AFT_PXL_2 = 0xffff,
+ .vVACT_SPACE_1 = 0xffff,
+ .vVACT_SPACE_2 = 0xffff,
+ .Hsync_polarity = 0,
+ .Vsync_polarity = 0,
+ .interlaced = 0,
+ .vAVI_VIC = 32,
+ .vAVI_VIC_16_9 = 32,
+ .repetition = 0,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .tg_H_FSZ = 0xabe,
+ .tg_HACT_ST = 0x33e,
+ .tg_HACT_SZ = 0x780,
+ .tg_V_FSZ = 0x465,
+ .tg_VSYNC = 0x1,
+ .tg_VSYNC2 = 0x233,
+ .tg_VACT_ST = 0x2d,
+ .tg_VACT_SZ = 0x438,
+ .tg_FIELD_CHG = 0x233,
+ .tg_VACT_ST2 = 0x248,
+ .tg_VACT_ST3 = 0x0,
+ .tg_VACT_ST4 = 0x0,
+ .tg_VSYNC_TOP_HDMI = 0x1,
+ .tg_VSYNC_BOT_HDMI = 0x1,
+ .tg_FIELD_TOP_HDMI = 0x1,
+ .tg_FIELD_BOT_HDMI = 0x233,
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+};
+#else
+static struct s5p_hdmi_v_format s5p_hdmi_v_fmt[] = {
+ [v720x480p_60Hz] = {
+ .frame = {
+ .vic = 2,
+ .vic_16_9 = 3,
+ .repetition = 0,
+ .polarity = 1,
+ .i_p = 0,
+ .h_active = 720,
+ .v_active = 480,
+ .h_total = 858,
+ .h_blank = 138,
+ .v_total = 525,
+ .v_blank = 45,
+ .pixel_clock = ePHY_FREQ_27_027,
+ },
+ .h_sync = {
+ .begin = 0xe,
+ .end = 0x4c,
+ },
+ .v_sync_top = {
+ .begin = 0x9,
+ .end = 0xf,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_60Hz] = {
+ .frame = {
+ .vic = 4,
+ .vic_16_9 = 4,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 0,
+ .h_active = 1280,
+ .v_active = 720,
+ .h_total = 1650,
+ .h_blank = 370,
+ .v_total = 750,
+ .v_blank = 30,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .h_sync = {
+ .begin = 0x6c,
+ .end = 0x94,
+ },
+ .v_sync_top = {
+ .begin = 0x5,
+ .end = 0xa,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080i_60Hz] = {
+ .frame = {
+ .vic = 5,
+ .vic_16_9 = 5,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 1,
+ .h_active = 1920,
+ .v_active = 540,
+ .h_total = 2200,
+ .h_blank = 280,
+ .v_total = 1125,
+ .v_blank = 22,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .h_sync = {
+ .begin = 0x56,
+ .end = 0x82,
+ },
+ .v_sync_top = {
+ .begin = 0x2,
+ .end = 0x7,
+ },
+ .v_sync_bottom = {
+ .begin = 0x234,
+ .end = 0x239,
+ },
+ .v_sync_h_pos = {
+ .begin = 0x4a4,
+ .end = 0x4a4,
+ },
+ .v_blank_f = {
+ .begin = 0x249,
+ .end = 0x465,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_60Hz] = {
+ .frame = {
+ .vic = 16,
+ .vic_16_9 = 16,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 0,
+ .h_active = 1920,
+ .v_active = 1080,
+ .h_total = 2200,
+ .h_blank = 280,
+ .v_total = 1125,
+ .v_blank = 45,
+ .pixel_clock = ePHY_FREQ_148_500,
+ },
+ .h_sync = {
+ .begin = 0x56,
+ .end = 0x82,
+ },
+ .v_sync_top = {
+ .begin = 0x4,
+ .end = 0x9,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v720x576p_50Hz] = {
+ .frame = {
+ .vic = 17,
+ .vic_16_9 = 18,
+ .repetition = 0,
+ .polarity = 1,
+ .i_p = 0,
+ .h_active = 720,
+ .v_active = 576,
+ .h_total = 864,
+ .h_blank = 144,
+ .v_total = 625,
+ .v_blank = 49,
+ .pixel_clock = ePHY_FREQ_27,
+ },
+ .h_sync = {
+ .begin = 0xa,
+ .end = 0x4a,
+ },
+ .v_sync_top = {
+ .begin = 0x5,
+ .end = 0xa,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_50Hz] = {
+ .frame = {
+ .vic = 19,
+ .vic_16_9 = 19,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 0,
+ .h_active = 1280,
+ .v_active = 720,
+ .h_total = 1980,
+ .h_blank = 700,
+ .v_total = 750,
+ .v_blank = 30,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .h_sync = {
+ .begin = 0x1b6,
+ .end = 0x1de,
+ },
+ .v_sync_top = {
+ .begin = 0x5,
+ .end = 0xa,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080i_50Hz] = {
+ .frame = {
+ .vic = 20,
+ .vic_16_9 = 20,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 1,
+ .h_active = 1920,
+ .v_active = 540,
+ .h_total = 2640,
+ .h_blank = 720,
+ .v_total = 1125,
+ .v_blank = 22,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .h_sync = {
+ .begin = 0x20e,
+ .end = 0x23a,
+ },
+ .v_sync_top = {
+ .begin = 0x2,
+ .end = 0x7,
+ },
+ .v_sync_bottom = {
+ .begin = 0x234,
+ .end = 0x239,
+ },
+ .v_sync_h_pos = {
+ .begin = 0x738,
+ .end = 0x738,
+ },
+ .v_blank_f = {
+ .begin = 0x249,
+ .end = 0x465,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_50Hz] = {
+ .frame = {
+ .vic = 31,
+ .vic_16_9 = 31,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 0,
+ .h_active = 1920,
+ .v_active = 1080,
+ .h_total = 2640,
+ .h_blank = 720,
+ .v_total = 1125,
+ .v_blank = 45,
+ .pixel_clock = ePHY_FREQ_148_500,
+ },
+ .h_sync = {
+ .begin = 0x20e,
+ .end = 0x23a,
+ },
+ .v_sync_top = {
+ .begin = 0x4,
+ .end = 0x9,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_30Hz] = {
+ .frame = {
+ .vic = 34,
+ .vic_16_9 = 34,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 0,
+ .h_active = 1920,
+ .v_active = 1080,
+ .h_total = 2200,
+ .h_blank = 280,
+ .v_total = 1125,
+ .v_blank = 45,
+ .pixel_clock = ePHY_FREQ_74_250,
+ },
+ .h_sync = {
+ .begin = 0x56,
+ .end = 0x82,
+ },
+ .v_sync_top = {
+ .begin = 0x4,
+ .end = 0x9,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v720x480p_59Hz] = {
+ .frame = {
+ .vic = 2,
+ .vic_16_9 = 3,
+ .repetition = 0,
+ .polarity = 1,
+ .i_p = 0,
+ .h_active = 720,
+ .v_active = 480,
+ .h_total = 858,
+ .h_blank = 138,
+ .v_total = 525,
+ .v_blank = 45,
+ .pixel_clock = ePHY_FREQ_27,
+ },
+ .h_sync = {
+ .begin = 0xe,
+ .end = 0x4c,
+ },
+ .v_sync_top = {
+ .begin = 0x9,
+ .end = 0xf,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1280x720p_59Hz] = {
+ .frame = {
+ .vic = 4,
+ .vic_16_9 = 4,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 0,
+ .h_active = 1280,
+ .v_active = 720,
+ .h_total = 1650,
+ .h_blank = 370,
+ .v_total = 750,
+ .v_blank = 30,
+ .pixel_clock = ePHY_FREQ_74_176,
+ },
+ .h_sync = {
+ .begin = 0x6c,
+ .end = 0x94,
+ },
+ .v_sync_top = {
+ .begin = 0x5,
+ .end = 0xa,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080i_59Hz] = {
+ .frame = {
+ .vic = 5,
+ .vic_16_9 = 5,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 1,
+ .h_active = 1920,
+ .v_active = 540,
+ .h_total = 2200,
+ .h_blank = 280,
+ .v_total = 1125,
+ .v_blank = 22,
+ .pixel_clock = ePHY_FREQ_74_176,
+ },
+ .h_sync = {
+ .begin = 0x56,
+ .end = 0x82,
+ },
+ .v_sync_top = {
+ .begin = 0x2,
+ .end = 0x7,
+ },
+ .v_sync_bottom = {
+ .begin = 0x234,
+ .end = 0x239,
+ },
+ .v_sync_h_pos = {
+ .begin = 0x4a4,
+ .end = 0x4a4,
+ },
+ .v_blank_f = {
+ .begin = 0x249,
+ .end = 0x465,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+
+ [v1920x1080p_59Hz] = {
+ .frame = {
+ .vic = 16,
+ .vic_16_9 = 16,
+ .repetition = 0,
+ .polarity = 0,
+ .i_p = 0,
+ .h_active = 1920,
+ .v_active = 1080,
+ .h_total = 2200,
+ .h_blank = 280,
+ .v_total = 1125,
+ .v_blank = 45,
+ .pixel_clock = ePHY_FREQ_148_352,
+ },
+ .h_sync = {
+ .begin = 0x56,
+ .end = 0x82,
+ },
+ .v_sync_top = {
+ .begin = 0x4,
+ .end = 0x9,
+ },
+ .v_sync_bottom = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_sync_h_pos = {
+ .begin = 0,
+ .end = 0,
+ },
+ .v_blank_f = {
+ .begin = 0,
+ .end = 0,
+ },
+ .mhl_hsync = 0xf,
+ .mhl_vsync = 0x1,
+ },
+};
+#endif
+
+static struct s5p_hdmi_o_params s5p_hdmi_output[] = {
+ {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00},
+ }, {
+ {0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04},
+ {0x40, 0x00, 0x02, 0x40, 0x00},
+ }, {
+ {0x02, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x04},
+ {0x00, 0x00, 0x02, 0x20, 0x00},
+ }, {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x22, 0x01, 0x20, 0x01},
+ },
+};
+
+static struct s5p_hdmi_ctrl_private_data s5p_hdmi_ctrl_private = {
+ .vendor = "SAMSUNG",
+ .product = "S5PC210",
+
+ .blue_screen = {
+ .enable = false,
+#ifdef CONFIG_HDMI_14A_3D
+ .b = 0,
+ .g = 0,
+ .r = 0,
+#else
+ .cb_b = 0,
+ .y_g = 0,
+ .cr_r = 0,
+#endif
+ },
+
+ .video = {
+ .color_r = {
+ .y_min = 0x10,
+ .y_max = 0xeb,
+ .c_min = 0x10,
+ .c_max = 0xf0,
+ },
+ .depth = HDMI_CD_24,
+ .q_range = HDMI_Q_LIMITED_RANGE,
+ },
+
+ .packet = {
+ .vsi_info = {0x81, 0x1, 27},
+ .avi_info = {0x82, 0x2, 13},
+ .spd_info = {0x83, 0x1, 27},
+ .aui_info = {0x84, 0x1, 0x0a},
+ .mpg_info = {0x85, 0x1, 5},
+ },
+
+ .tg = {
+ .correction_en = false,
+ .bt656_en = false,
+ },
+
+ .hdcp_en = false,
+
+ .audio = {
+ .type = HDMI_60958_AUDIO,
+ .bit = 16,
+ .freq = 44100,
+ /* Support audio 5.1Ch */
+#if defined(CONFIG_VIDEO_TVOUT_2CH_AUDIO)
+ .channel = 2,
+#else
+ .channel = 6,
+#endif
+ },
+
+ .av_mute = false,
+ .running = false,
+
+ .pow_name = "hdmi_pd",
+
+ .clk[HDMI_PCLK] = {
+ .name = "hdmi",
+ .ptr = NULL
+ },
+
+ .clk[HDMI_MUX] = {
+ .name = "sclk_hdmi",
+ .ptr = NULL
+ },
+
+ .reg_mem[HDMI] = {
+ .name = "s5p-hdmi",
+ .res = NULL,
+ .base = NULL
+ },
+
+ .reg_mem[HDMI_PHY] = {
+ .name = "s5p-i2c-hdmi-phy",
+ .res = NULL,
+ .base = NULL
+ },
+
+ .irq = {
+ .name = "s5p-hdmi",
+ .handler = s5p_hdmi_irq,
+ .no = -1
+ }
+
+};
+
+static struct s5p_tvif_ctrl_private_data s5p_tvif_ctrl_private = {
+ .curr_std = TVOUT_INIT_DISP_VALUE,
+ .curr_if = TVOUT_INIT_O_VALUE,
+
+ .running = false
+};
+
+#ifdef CONFIG_ANALOG_TVENC
+struct s5p_sdo_ctrl_private_data {
+ struct s5p_sdo_vscale_cfg video_scale_cfg;
+ struct s5p_sdo_vbi vbi;
+ struct s5p_sdo_offset_gain offset_gain;
+ struct s5p_sdo_delay delay;
+ struct s5p_sdo_bright_hue_saturation bri_hue_sat;
+ struct s5p_sdo_cvbs_compensation cvbs_compen;
+ struct s5p_sdo_component_porch compo_porch;
+ struct s5p_sdo_ch_xtalk_cancellat_coeff xtalk_cc;
+ struct s5p_sdo_closed_caption closed_cap;
+ struct s5p_sdo_525_data wss_525;
+ struct s5p_sdo_625_data wss_625;
+ struct s5p_sdo_525_data cgms_525;
+ struct s5p_sdo_625_data cgms_625;
+
+ bool color_sub_carrier_phase_adj;
+
+ bool running;
+
+ struct s5p_tvout_clk_info clk[SDO_NO_OF_CLK];
+ char *pow_name;
+ struct reg_mem_info reg_mem;
+};
+
+static struct s5p_sdo_ctrl_private_data s5p_sdo_ctrl_private = {
+ .clk[SDO_PCLK] = {
+ .name = "tvenc",
+ .ptr = NULL
+ },
+ .clk[SDO_MUX] = {
+ .name = "sclk_dac",
+ .ptr = NULL
+ },
+ .pow_name = "tv_enc_pd",
+ .reg_mem = {
+ .name = "s5p-sdo",
+ .res = NULL,
+ .base = NULL
+ },
+
+ .running = false,
+
+ .color_sub_carrier_phase_adj = false,
+
+ .vbi = {
+ .wss_cvbs = true,
+ .caption_cvbs = SDO_INS_OTHERS
+ },
+
+ .offset_gain = {
+ .offset = 0,
+ .gain = 0x800
+ },
+
+ .delay = {
+ .delay_y = 0x00,
+ .offset_video_start = 0xfa,
+ .offset_video_end = 0x00
+ },
+
+ .bri_hue_sat = {
+ .bright_hue_sat_adj = false,
+ .gain_brightness = 0x80,
+ .offset_brightness = 0x00,
+ .gain0_cb_hue_sat = 0x00,
+ .gain1_cb_hue_sat = 0x00,
+ .gain0_cr_hue_sat = 0x00,
+ .gain1_cr_hue_sat = 0x00,
+ .offset_cb_hue_sat = 0x00,
+ .offset_cr_hue_sat = 0x00
+ },
+
+ .cvbs_compen = {
+ .cvbs_color_compen = false,
+ .y_lower_mid = 0x200,
+ .y_bottom = 0x000,
+ .y_top = 0x3ff,
+ .y_upper_mid = 0x200,
+ .radius = 0x1ff
+ },
+
+ .compo_porch = {
+ .back_525 = 0x8a,
+ .front_525 = 0x359,
+ .back_625 = 0x96,
+ .front_625 = 0x35c
+ },
+
+ .xtalk_cc = {
+ .coeff2 = 0,
+ .coeff1 = 0
+ },
+
+ .closed_cap = {
+ .display_cc = 0,
+ .nondisplay_cc = 0
+ },
+
+ .wss_525 = {
+ .copy_permit = SDO_525_COPY_PERMIT,
+ .mv_psp = SDO_525_MV_PSP_OFF,
+ .copy_info = SDO_525_COPY_INFO,
+ .analog_on = false,
+ .display_ratio = SDO_525_4_3_NORMAL
+ },
+
+ .wss_625 = {
+ .surround_sound = false,
+ .copyright = false,
+ .copy_protection = false,
+ .text_subtitles = false,
+ .open_subtitles = SDO_625_NO_OPEN_SUBTITLES,
+ .camera_film = SDO_625_CAMERA,
+ .color_encoding = SDO_625_NORMAL_PAL,
+ .helper_signal = false,
+ .display_ratio = SDO_625_4_3_FULL_576
+ },
+
+ .cgms_525 = {
+ .copy_permit = SDO_525_COPY_PERMIT,
+ .mv_psp = SDO_525_MV_PSP_OFF,
+ .copy_info = SDO_525_COPY_INFO,
+ .analog_on = false,
+ .display_ratio = SDO_525_4_3_NORMAL
+ },
+
+ .cgms_625 = {
+ .surround_sound = false,
+ .copyright = false,
+ .copy_protection = false,
+ .text_subtitles = false,
+ .open_subtitles = SDO_625_NO_OPEN_SUBTITLES,
+ .camera_film = SDO_625_CAMERA,
+ .color_encoding = SDO_625_NORMAL_PAL,
+ .helper_signal = false,
+ .display_ratio = SDO_625_4_3_FULL_576
+ },
+};
+#endif
+
+/****************************************
+ * Functions for sdo ctrl class
+ ***************************************/
+#ifdef CONFIG_ANALOG_TVENC
+
+static void s5p_sdo_ctrl_init_private(void)
+{
+}
+
+static int s5p_sdo_ctrl_set_reg(enum s5p_tvout_disp_mode disp_mode)
+{
+ struct s5p_sdo_ctrl_private_data *private = &s5p_sdo_ctrl_private;
+
+ s5p_sdo_sw_reset(1);
+
+ if (s5p_sdo_set_display_mode(disp_mode, SDO_O_ORDER_COMPOSITE_Y_C_CVBS))
+ return -1;
+
+ if (s5p_sdo_set_video_scale_cfg(
+ private->video_scale_cfg.composite_level,
+ private->video_scale_cfg.composite_ratio))
+ return -1;
+
+ if (s5p_sdo_set_vbi(
+ private->vbi.wss_cvbs, private->vbi.caption_cvbs))
+ return -1;
+
+ s5p_sdo_set_offset_gain(
+ private->offset_gain.offset, private->offset_gain.gain);
+
+ s5p_sdo_set_delay(
+ private->delay.delay_y,
+ private->delay.offset_video_start,
+ private->delay.offset_video_end);
+
+ s5p_sdo_set_schlock(private->color_sub_carrier_phase_adj);
+
+ s5p_sdo_set_brightness_hue_saturation(private->bri_hue_sat);
+
+ s5p_sdo_set_cvbs_color_compensation(private->cvbs_compen);
+
+ s5p_sdo_set_component_porch(
+ private->compo_porch.back_525,
+ private->compo_porch.front_525,
+ private->compo_porch.back_625,
+ private->compo_porch.front_625);
+
+ s5p_sdo_set_ch_xtalk_cancel_coef(
+ private->xtalk_cc.coeff2, private->xtalk_cc.coeff1);
+
+ s5p_sdo_set_closed_caption(
+ private->closed_cap.display_cc,
+ private->closed_cap.nondisplay_cc);
+
+ if (s5p_sdo_set_wss525_data(private->wss_525))
+ return -1;
+
+ if (s5p_sdo_set_wss625_data(private->wss_625))
+ return -1;
+
+ if (s5p_sdo_set_cgmsa525_data(private->cgms_525))
+ return -1;
+
+ if (s5p_sdo_set_cgmsa625_data(private->cgms_625))
+ return -1;
+
+ s5p_sdo_set_interrupt_enable(0);
+
+ s5p_sdo_clear_interrupt_pending();
+
+ s5p_sdo_clock_on(1);
+ s5p_sdo_dac_on(1);
+
+ return 0;
+}
+
+static void s5p_sdo_ctrl_internal_stop(void)
+{
+ s5p_sdo_clock_on(0);
+ s5p_sdo_dac_on(0);
+}
+
+static void s5p_sdo_ctrl_clock(bool on)
+{
+ if (on) {
+ clk_enable(s5p_sdo_ctrl_private.clk[SDO_MUX].ptr);
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ s5p_tvout_pm_runtime_get();
+#endif
+ // Restore sdo_base address
+ s5p_sdo_init(s5p_sdo_ctrl_private.reg_mem.base);
+
+ clk_enable(s5p_sdo_ctrl_private.clk[SDO_PCLK].ptr);
+ } else {
+ clk_disable(s5p_sdo_ctrl_private.clk[SDO_PCLK].ptr);
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ s5p_tvout_pm_runtime_put();
+#endif
+
+ clk_disable(s5p_sdo_ctrl_private.clk[SDO_MUX].ptr);
+
+ // Set sdo_base address to NULL
+ s5p_sdo_init(NULL);
+ }
+
+ mdelay(50);
+}
+
+#ifdef CONFIG_ANALOG_TVENC
+#ifndef CONFIG_VPLL_USE_FOR_TVENC
+static void s5p_tvenc_src_to_hdmiphy_on(void);
+static void s5p_tvenc_src_to_hdmiphy_off(void);
+#endif
+#endif
+
+void s5p_sdo_ctrl_stop(void)
+{
+ if (s5p_sdo_ctrl_private.running) {
+ s5p_sdo_ctrl_internal_stop();
+ s5p_sdo_ctrl_clock(0);
+
+#ifdef CONFIG_ANALOG_TVENC
+#ifndef CONFIG_VPLL_USE_FOR_TVENC
+ s5p_tvenc_src_to_hdmiphy_off();
+#endif
+#endif
+
+ s5p_sdo_ctrl_private.running = false;
+ }
+}
+
+int s5p_sdo_ctrl_start(enum s5p_tvout_disp_mode disp_mode)
+{
+ struct s5p_sdo_ctrl_private_data *sdo_private = &s5p_sdo_ctrl_private;
+
+ switch (disp_mode) {
+ case TVOUT_NTSC_M:
+ case TVOUT_NTSC_443:
+ sdo_private->video_scale_cfg.composite_level =
+ SDO_LEVEL_75IRE;
+ sdo_private->video_scale_cfg.composite_ratio =
+ SDO_VTOS_RATIO_10_4;
+ break;
+
+ case TVOUT_PAL_BDGHI:
+ case TVOUT_PAL_M:
+ case TVOUT_PAL_N:
+ case TVOUT_PAL_NC:
+ case TVOUT_PAL_60:
+ sdo_private->video_scale_cfg.composite_level =
+ SDO_LEVEL_0IRE;
+ sdo_private->video_scale_cfg.composite_ratio =
+ SDO_VTOS_RATIO_7_3;
+ break;
+
+ default:
+ tvout_err("invalid disp_mode(%d) for SDO\n",
+ disp_mode);
+ goto err_on_s5p_sdo_start;
+ }
+
+ if (sdo_private->running)
+ s5p_sdo_ctrl_internal_stop();
+ else {
+ s5p_sdo_ctrl_clock(1);
+
+#ifdef CONFIG_ANALOG_TVENC
+#ifndef CONFIG_VPLL_USE_FOR_TVENC
+ s5p_tvenc_src_to_hdmiphy_on();
+#endif
+#endif
+
+ sdo_private->running = true;
+ }
+
+ if (s5p_sdo_ctrl_set_reg(disp_mode))
+ goto err_on_s5p_sdo_start;
+
+ return 0;
+
+err_on_s5p_sdo_start:
+ return -1;
+}
+
+int s5p_sdo_ctrl_constructor(struct platform_device *pdev)
+{
+ int ret;
+ int i, j;
+
+ ret = s5p_tvout_map_resource_mem(
+ pdev,
+ s5p_sdo_ctrl_private.reg_mem.name,
+ &(s5p_sdo_ctrl_private.reg_mem.base),
+ &(s5p_sdo_ctrl_private.reg_mem.res));
+
+ if (ret)
+ goto err_on_res;
+
+ for (i = SDO_PCLK; i < SDO_NO_OF_CLK; i++) {
+ s5p_sdo_ctrl_private.clk[i].ptr =
+ clk_get(&pdev->dev, s5p_sdo_ctrl_private.clk[i].name);
+
+ if (IS_ERR(s5p_sdo_ctrl_private.clk[i].ptr)) {
+ tvout_err("Failed to find clock %s\n",
+ s5p_sdo_ctrl_private.clk[i].name);
+ ret = -ENOENT;
+ goto err_on_clk;
+ }
+ }
+
+ s5p_sdo_ctrl_init_private();
+ s5p_sdo_init(s5p_sdo_ctrl_private.reg_mem.base);
+
+ return 0;
+
+err_on_clk:
+ for (j = 0; j < i; j++)
+ clk_put(s5p_sdo_ctrl_private.clk[j].ptr);
+
+ s5p_tvout_unmap_resource_mem(
+ s5p_sdo_ctrl_private.reg_mem.base,
+ s5p_sdo_ctrl_private.reg_mem.res);
+
+err_on_res:
+ return ret;
+}
+
+void s5p_sdo_ctrl_destructor(void)
+{
+ int i;
+
+ s5p_tvout_unmap_resource_mem(
+ s5p_sdo_ctrl_private.reg_mem.base,
+ s5p_sdo_ctrl_private.reg_mem.res);
+
+ for (i = SDO_PCLK; i < SDO_NO_OF_CLK; i++)
+ if (s5p_sdo_ctrl_private.clk[i].ptr) {
+ if (s5p_sdo_ctrl_private.running)
+ clk_disable(s5p_sdo_ctrl_private.clk[i].ptr);
+ clk_put(s5p_sdo_ctrl_private.clk[i].ptr);
+ }
+ s5p_sdo_init(NULL);
+}
+#endif
+
+
+
+
+/****************************************
+ * Functions for hdmi ctrl class
+ ***************************************/
+
+static enum s5p_hdmi_v_mode s5p_hdmi_check_v_fmt(enum s5p_tvout_disp_mode disp)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ struct s5p_hdmi_video *video = &ctrl->video;
+ enum s5p_hdmi_v_mode mode;
+
+ video->aspect = HDMI_PIC_RATIO_16_9;
+ video->colorimetry = HDMI_CLRIMETRY_601;
+
+ switch (disp) {
+ case TVOUT_480P_60_16_9:
+ mode = v720x480p_60Hz;
+ break;
+
+ case TVOUT_480P_60_4_3:
+ mode = v720x480p_60Hz;
+ video->aspect = HDMI_PIC_RATIO_4_3;
+ break;
+
+ case TVOUT_480P_59:
+ mode = v720x480p_59Hz;
+ break;
+
+ case TVOUT_576P_50_16_9:
+ mode = v720x576p_50Hz;
+ break;
+
+ case TVOUT_576P_50_4_3:
+ mode = v720x576p_50Hz;
+ video->aspect = HDMI_PIC_RATIO_4_3;
+ break;
+
+ case TVOUT_720P_60:
+ mode = v1280x720p_60Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_720P_59:
+ mode = v1280x720p_59Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_720P_50:
+ mode = v1280x720p_50Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_1080P_30:
+ mode = v1920x1080p_30Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_1080P_60:
+ mode = v1920x1080p_60Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_1080P_59:
+ mode = v1920x1080p_59Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_1080P_50:
+ mode = v1920x1080p_50Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_1080I_60:
+ mode = v1920x1080i_60Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_1080I_59:
+ mode = v1920x1080i_59Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+
+ case TVOUT_1080I_50:
+ mode = v1920x1080i_50Hz;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+#ifdef CONFIG_HDMI_14A_3D
+ case TVOUT_720P_60_SBS_HALF:
+ mode = v1280x720p_60Hz_SBS_HALF;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+ case TVOUT_720P_59_SBS_HALF:
+ mode = v1280x720p_59Hz_SBS_HALF;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+ case TVOUT_720P_50_TB:
+ mode = v1280x720p_50Hz_TB;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+ case TVOUT_1080P_24_TB:
+ mode = v1920x1080p_24Hz_TB;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+ case TVOUT_1080P_23_TB:
+ mode = v1920x1080p_23Hz_TB;
+ video->colorimetry = HDMI_CLRIMETRY_709;
+ break;
+#endif
+
+ default:
+ mode = v720x480p_60Hz;
+ tvout_err("Not supported mode : %d\n", mode);
+ }
+
+ return mode;
+}
+
+static void s5p_hdmi_set_acr(struct s5p_hdmi_audio *audio, u8 *acr)
+{
+ u32 n = (audio->freq == 32000) ? 4096 :
+ (audio->freq == 44100) ? 6272 :
+ (audio->freq == 88200) ? 12544 :
+ (audio->freq == 176400) ? 25088 :
+ (audio->freq == 48000) ? 6144 :
+ (audio->freq == 96000) ? 12288 :
+ (audio->freq == 192000) ? 24576 : 0;
+
+ u32 cts = (audio->freq == 32000) ? 27000 :
+ (audio->freq == 44100) ? 30000 :
+ (audio->freq == 88200) ? 30000 :
+ (audio->freq == 176400) ? 30000 :
+ (audio->freq == 48000) ? 27000 :
+ (audio->freq == 96000) ? 27000 :
+ (audio->freq == 192000) ? 27000 : 0;
+
+ acr[1] = cts >> 16;
+ acr[2] = cts >> 8 & 0xff;
+ acr[3] = cts & 0xff;
+
+ acr[4] = n >> 16;
+ acr[5] = n >> 8 & 0xff;
+ acr[6] = n & 0xff;
+
+ tvout_dbg("n value = %d\n", n);
+ tvout_dbg("cts = %d\n", cts);
+}
+
+static void s5p_hdmi_set_asp(u8 *header)
+{
+ header[1] = 0;
+ header[2] = 0;
+}
+
+static void s5p_hdmi_set_acp(struct s5p_hdmi_audio *audio, u8 *header)
+{
+ header[1] = audio->type;
+}
+
+static void s5p_hdmi_set_isrc(u8 *header)
+{
+}
+
+static void s5p_hdmi_set_gmp(u8 *gmp)
+{
+}
+
+static void s5p_hdmi_set_avi(
+ enum s5p_hdmi_v_mode mode, enum s5p_tvout_o_mode out,
+ struct s5p_hdmi_video *video, u8 *avi)
+{
+ struct s5p_hdmi_o_params param = s5p_hdmi_output[out];
+ struct s5p_hdmi_v_frame frame;
+
+ frame = s5p_hdmi_v_fmt[mode].frame;
+ avi[0] = param.reg.pxl_fmt;
+ avi[2] &= (u8)((~0x3) << 2);
+ avi[4] &= (u8)((~0x3) << 6);
+
+ /* RGB or YCbCr */
+ if (s5p_tvif_ctrl_private.curr_if == TVOUT_HDMI_RGB) {
+ avi[0] |= (0x1 << 4);
+ avi[4] |= frame.repetition;
+ if (s5p_tvif_ctrl_private.curr_std == TVOUT_480P_60_4_3) {
+ avi[2] |= HDMI_Q_DEFAULT << 2;
+ avi[4] |= HDMI_AVI_YQ_FULL_RANGE << 6;
+ } else {
+ avi[2] |= HDMI_Q_DEFAULT << 2;
+ avi[4] |= HDMI_AVI_YQ_LIMITED_RANGE << 6;
+ }
+ } else {
+ avi[0] |= (0x5 << 4);
+ avi[4] |= frame.repetition;
+ if (video->q_range == HDMI_Q_FULL_RANGE) {
+ tvout_dbg("Q_Range : %d\n", video->q_range);
+ avi[2] |= HDMI_Q_DEFAULT << 2;
+ avi[4] |= HDMI_AVI_YQ_FULL_RANGE << 6;
+ } else {
+ tvout_dbg("Q_Range : %d\n", video->q_range);
+ avi[2] |= HDMI_Q_DEFAULT << 2;
+ avi[4] |= HDMI_AVI_YQ_LIMITED_RANGE << 6;
+ }
+ }
+
+ avi[1] = video->colorimetry;
+ avi[1] |= video->aspect << 4;
+ avi[1] |= AVI_SAME_WITH_PICTURE_AR;
+#ifdef CONFIG_HDMI_14A_3D
+ avi[3] = (video->aspect == HDMI_PIC_RATIO_16_9) ?
+ frame.vAVI_VIC_16_9 : frame.vAVI_VIC;
+#else
+ avi[3] = (video->aspect == HDMI_PIC_RATIO_16_9) ?
+ frame.vic_16_9 : frame.vic;
+#endif
+ if (s5p_tvif_ctrl_private.curr_std == TVOUT_480P_60_4_3)
+ avi[3] = 0x1;
+
+ tvout_dbg(KERN_INFO "AVI BYTE 1 : 0x%x\n", avi[0]);
+ tvout_dbg(KERN_INFO "AVI BYTE 2 : 0x%x\n", avi[1]);
+ tvout_dbg(KERN_INFO "AVI BYTE 3 : 0x%x\n", avi[2]);
+ tvout_dbg(KERN_INFO "AVI BYTE 4 : %d\n", avi[3]);
+ tvout_dbg(KERN_INFO "AVI BYTE 5 : 0x%x\n", avi[4]);
+}
+
+static void s5p_hdmi_set_aui(struct s5p_hdmi_audio *audio, u8 *aui)
+{
+ aui[0] = audio->channel - 1;
+ if (audio->channel == 2) {
+ aui[1] = 0x0;
+ aui[2] = 0;
+ aui[3] = 0x0;
+ } else {
+ aui[1] = 0x0;
+ aui[2] = 0;
+ aui[3] = 0x0b;
+ }
+}
+
+static void s5p_hdmi_set_spd(u8 *spd)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+
+ memcpy(spd, ctrl->vendor, 8);
+ memcpy(&spd[8], ctrl->product, 16);
+
+ spd[24] = 0x1; /* Digital STB */
+}
+
+static void s5p_hdmi_set_mpg(u8 *mpg)
+{
+}
+
+static int s5p_hdmi_ctrl_audio_enable(bool en)
+{
+ if (!s5p_hdmi_output[s5p_hdmi_ctrl_private.out].reg.dvi)
+ s5p_hdmi_reg_audio_enable(en);
+
+ return 0;
+}
+
+#if 0 /* This function will be used in the future */
+static void s5p_hdmi_ctrl_bluescreen_clr(u8 cb_b, u8 y_g, u8 cr_r)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+
+ ctrl->blue_screen.cb_b = cb_b;
+ ctrl->blue_screen.y_g = y_g;
+ ctrl->blue_screen.cr_r = cr_r;
+
+ s5p_hdmi_reg_bluescreen_clr(cb_b, y_g, cr_r);
+}
+#endif
+
+static void s5p_hdmi_ctrl_set_bluescreen(bool en)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+
+ ctrl->blue_screen.enable = en ? true : false;
+
+ s5p_hdmi_reg_bluescreen(en);
+}
+
+#ifndef CONFIG_HDMI_EARJACK_MUTE
+static void s5p_hdmi_ctrl_set_audio(bool en)
+#else
+void s5p_hdmi_ctrl_set_audio(bool en)
+#endif
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+
+ s5p_hdmi_ctrl_private.audio.on = en ? 1 : 0;
+
+ if (ctrl->running)
+ s5p_hdmi_ctrl_audio_enable(en);
+}
+
+static void s5p_hdmi_ctrl_set_av_mute(bool en)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+
+ ctrl->av_mute = en ? 1 : 0;
+
+ if (ctrl->running) {
+ if (en) {
+ s5p_hdmi_ctrl_audio_enable(false);
+ s5p_hdmi_ctrl_set_bluescreen(true);
+ } else {
+ s5p_hdmi_ctrl_audio_enable(true);
+ s5p_hdmi_ctrl_set_bluescreen(false);
+ }
+ }
+
+}
+
+u8 s5p_hdmi_ctrl_get_mute(void)
+{
+ return s5p_hdmi_ctrl_private.av_mute ? 1 : 0;
+}
+
+#if 0 /* This function will be used in the future */
+static void s5p_hdmi_ctrl_mute(bool en)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+
+ if (en) {
+ s5p_hdmi_reg_bluescreen(true);
+ s5p_hdmi_ctrl_audio_enable(false);
+ } else {
+ s5p_hdmi_reg_bluescreen(false);
+ if (ctrl->audio.on)
+ s5p_hdmi_ctrl_audio_enable(true);
+ }
+}
+#endif
+
+void s5p_hdmi_ctrl_set_hdcp(bool en)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+
+ ctrl->hdcp_en = en ? 1 : 0;
+}
+
+static void s5p_hdmi_ctrl_init_private(void)
+{
+}
+
+static bool s5p_hdmi_ctrl_set_reg(
+ enum s5p_hdmi_v_mode mode, enum s5p_tvout_o_mode out)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ struct s5p_hdmi_packet *packet = &ctrl->packet;
+
+ struct s5p_hdmi_bluescreen *bl = &ctrl->blue_screen;
+ struct s5p_hdmi_color_range *cr = &ctrl->video.color_r;
+ struct s5p_hdmi_tg *tg = &ctrl->tg;
+#ifdef CONFIG_HDMI_14A_3D
+ u8 type3D;
+#endif
+
+#ifdef CONFIG_HDMI_14A_3D
+ s5p_hdmi_reg_bluescreen_clr(bl->b, bl->g, bl->r);
+#else
+ s5p_hdmi_reg_bluescreen_clr(bl->cb_b, bl->y_g, bl->cr_r);
+#endif
+ s5p_hdmi_reg_bluescreen(bl->enable);
+
+ s5p_hdmi_reg_clr_range(cr->y_min, cr->y_max, cr->c_min, cr->c_max);
+
+ s5p_hdmi_reg_acr(packet->acr);
+ s5p_hdmi_reg_asp(packet->h_asp, &ctrl->audio);
+#ifdef CONFIG_HDMI_14A_3D
+ s5p_hdmi_reg_gcp(s5p_hdmi_v_fmt[mode].frame.interlaced, packet->gcp);
+#else
+ s5p_hdmi_reg_gcp(s5p_hdmi_v_fmt[mode].frame.i_p, packet->gcp);
+#endif
+
+ s5p_hdmi_reg_acp(packet->h_acp, packet->acp);
+ s5p_hdmi_reg_isrc(packet->isrc1, packet->isrc2);
+ s5p_hdmi_reg_gmp(packet->gmp);
+
+
+#ifdef CONFIG_HDMI_14A_3D
+ if ((mode == v1280x720p_60Hz_SBS_HALF) ||
+ (mode == v1280x720p_59Hz_SBS_HALF))
+ type3D = HDMI_3D_SSH_FORMAT;
+ else if ((mode == v1280x720p_50Hz_TB) ||
+ (mode == v1920x1080p_24Hz_TB) || (mode == v1920x1080p_23Hz_TB))
+ type3D = HDMI_3D_TB_FORMAT;
+ else
+ type3D = HDMI_2D_FORMAT;
+
+ s5p_hdmi_reg_infoframe(&packet->vsi_info, packet->vsi, type3D);
+ s5p_hdmi_reg_infoframe(&packet->vsi_info, packet->vsi, type3D);
+ s5p_hdmi_reg_infoframe(&packet->avi_info, packet->avi, type3D);
+ s5p_hdmi_reg_infoframe(&packet->aui_info, packet->aui, type3D);
+ s5p_hdmi_reg_infoframe(&packet->spd_info, packet->spd, type3D);
+ s5p_hdmi_reg_infoframe(&packet->mpg_info, packet->mpg, type3D);
+#else
+ s5p_hdmi_reg_infoframe(&packet->avi_info, packet->avi);
+ s5p_hdmi_reg_infoframe(&packet->aui_info, packet->aui);
+ s5p_hdmi_reg_infoframe(&packet->spd_info, packet->spd);
+ s5p_hdmi_reg_infoframe(&packet->mpg_info, packet->mpg);
+#endif
+
+ s5p_hdmi_reg_packet_trans(&s5p_hdmi_output[out].trans);
+ s5p_hdmi_reg_output(&s5p_hdmi_output[out].reg);
+
+#ifdef CONFIG_HDMI_14A_3D
+ s5p_hdmi_reg_tg(&s5p_hdmi_v_fmt[mode]);
+#else
+ s5p_hdmi_reg_tg(&s5p_hdmi_v_fmt[mode].frame);
+#endif
+ s5p_hdmi_reg_v_timing(&s5p_hdmi_v_fmt[mode]);
+ s5p_hdmi_reg_tg_cmd(tg->correction_en, tg->bt656_en, true);
+
+ switch (ctrl->audio.type) {
+ case HDMI_GENERIC_AUDIO:
+ break;
+
+ case HDMI_60958_AUDIO:
+ s5p_hdmi_audio_init(PCM, 44100, 16, 0, &ctrl->audio);
+ break;
+
+ case HDMI_DVD_AUDIO:
+ case HDMI_SUPER_AUDIO:
+ break;
+
+ default:
+ tvout_err("Invalid audio type %d\n", ctrl->audio.type);
+ return -1;
+ }
+
+ s5p_hdmi_reg_audio_enable(true);
+
+ return 0;
+}
+
+static void s5p_hdmi_ctrl_internal_stop(void)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ struct s5p_hdmi_tg *tg = &ctrl->tg;
+
+ tvout_dbg("\n");
+#ifdef CONFIG_HDMI_HPD
+ s5p_hpd_set_eint();
+#endif
+ if (ctrl->hdcp_en)
+ s5p_hdcp_stop();
+
+ s5p_hdmi_reg_enable(false);
+
+ s5p_hdmi_reg_tg_cmd(tg->correction_en, tg->bt656_en, false);
+}
+
+int s5p_hdmi_ctrl_phy_power(bool on)
+{
+ tvout_dbg("on(%d)\n", on);
+ if (on) {
+ /* on */
+ clk_enable(s5ptv_status.i2c_phy_clk);
+ // Restore i2c_hdmi_phy_base address
+ s5p_hdmi_phy_init(s5p_hdmi_ctrl_private.reg_mem[HDMI_PHY].base);
+
+ s5p_hdmi_phy_power(true);
+
+ } else {
+ /*
+ * for preventing hdmi hang up when restart
+ * switch to internal clk - SCLK_DAC, SCLK_PIXEL
+ */
+ s5p_mixer_ctrl_mux_clk(s5ptv_status.sclk_dac);
+ if (clk_set_parent(s5ptv_status.sclk_hdmi,
+ s5ptv_status.sclk_pixel)) {
+ tvout_err("unable to set parent %s of clock %s.\n",
+ s5ptv_status.sclk_pixel->name,
+ s5ptv_status.sclk_hdmi->name);
+ return -1;
+ }
+
+ s5p_hdmi_phy_power(false);
+
+ clk_disable(s5ptv_status.i2c_phy_clk);
+ // Set i2c_hdmi_phy_base to NULL
+ s5p_hdmi_phy_init(NULL);
+ }
+
+ return 0;
+}
+
+void s5p_hdmi_ctrl_clock(bool on)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ struct s5p_tvout_clk_info *clk = ctrl->clk;
+
+ tvout_dbg("on(%d)\n", on);
+ if (on) {
+ clk_enable(clk[HDMI_MUX].ptr);
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ s5p_tvout_pm_runtime_get();
+#endif
+ clk_enable(clk[HDMI_PCLK].ptr);
+
+ // Restore hdmi_base address
+ s5p_hdmi_init(s5p_hdmi_ctrl_private.reg_mem[HDMI].base);
+ } else {
+ clk_disable(clk[HDMI_PCLK].ptr);
+
+#ifdef CONFIG_ARCH_EXYNOS4
+ s5p_tvout_pm_runtime_put();
+#endif
+
+ clk_disable(clk[HDMI_MUX].ptr);
+
+ // Set hdmi_base to NULL
+ s5p_hdmi_init(NULL);
+ }
+}
+
+bool s5p_hdmi_ctrl_status(void)
+{
+ return s5p_hdmi_ctrl_private.running;
+}
+
+void s5p_hdmi_ctrl_stop(void)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+
+ tvout_dbg("running(%d)\n", ctrl->running);
+ if (ctrl->running) {
+ ctrl->running = false;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ } else
+#endif
+ {
+ s5p_hdmi_ctrl_internal_stop();
+ s5p_hdmi_ctrl_clock(0);
+ }
+ }
+}
+
+int s5p_hdmi_ctrl_start(
+ enum s5p_tvout_disp_mode disp, enum s5p_tvout_o_mode out)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ struct s5p_hdmi_packet *packet = &ctrl->packet;
+ struct s5p_hdmi_v_frame frame;
+
+ enum s5p_hdmi_v_mode mode;
+
+ ctrl->out = out;
+ mode = s5p_hdmi_check_v_fmt(disp);
+ ctrl->mode = mode;
+
+ tvout_dbg("\n");
+ if (ctrl->running)
+ s5p_hdmi_ctrl_internal_stop();
+ else {
+ s5p_hdmi_ctrl_clock(1);
+ ctrl->running = true;
+ }
+ on_start_process = false;
+ tvout_dbg("on_start_process(%d)\n", on_start_process);
+
+ frame = s5p_hdmi_v_fmt[mode].frame;
+
+ if (s5p_hdmi_phy_config(frame.pixel_clock, ctrl->video.depth) < 0) {
+ tvout_err("hdmi phy configuration failed.\n");
+ goto err_on_s5p_hdmi_start;
+ }
+
+
+ s5p_hdmi_set_acr(&ctrl->audio, packet->acr);
+ s5p_hdmi_set_asp(packet->h_asp);
+ s5p_hdmi_set_gcp(ctrl->video.depth, packet->gcp);
+
+ s5p_hdmi_set_acp(&ctrl->audio, packet->h_acp);
+ s5p_hdmi_set_isrc(packet->h_isrc);
+ s5p_hdmi_set_gmp(packet->gmp);
+
+ s5p_hdmi_set_avi(mode, out, &ctrl->video, packet->avi);
+ s5p_hdmi_set_spd(packet->spd);
+ s5p_hdmi_set_aui(&ctrl->audio, packet->aui);
+ s5p_hdmi_set_mpg(packet->mpg);
+
+ s5p_hdmi_ctrl_set_reg(mode, out);
+
+ if (ctrl->hdcp_en)
+ s5p_hdcp_start();
+
+ s5p_hdmi_reg_enable(true);
+
+#ifdef CONFIG_HDMI_HPD
+ s5p_hpd_set_hdmiint();
+#endif
+
+ return 0;
+
+err_on_s5p_hdmi_start:
+ return -1;
+}
+
+int s5p_hdmi_ctrl_constructor(struct platform_device *pdev)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ struct reg_mem_info *reg_mem = ctrl->reg_mem;
+ struct s5p_tvout_clk_info *clk = ctrl->clk;
+ struct irq_info *irq = &ctrl->irq;
+ int ret, i, k, j;
+
+ for (i = 0; i < HDMI_NO_OF_MEM_RES; i++) {
+ ret = s5p_tvout_map_resource_mem(pdev, reg_mem[i].name,
+ &(reg_mem[i].base), &(reg_mem[i].res));
+
+ if (ret)
+ goto err_on_res;
+ }
+
+ for (k = HDMI_PCLK; k < HDMI_NO_OF_CLK; k++) {
+ clk[k].ptr = clk_get(&pdev->dev, clk[k].name);
+
+ if (IS_ERR(clk[k].ptr)) {
+ printk(KERN_ERR "%s clk is not found\n", clk[k].name);
+ ret = -ENOENT;
+ goto err_on_clk;
+ }
+ }
+
+ irq->no = platform_get_irq_byname(pdev, irq->name);
+
+ if (irq->no < 0) {
+ printk(KERN_ERR "can not get platform irq by name : %s\n",
+ irq->name);
+ ret = irq->no;
+ goto err_on_irq;
+ }
+
+ s5p_hdmi_init(reg_mem[HDMI].base);
+ s5p_hdmi_phy_init(reg_mem[HDMI_PHY].base);
+
+ ret = request_irq(irq->no, irq->handler, IRQF_DISABLED,
+ irq->name, NULL);
+ if (ret) {
+ printk(KERN_ERR "can not request irq : %s\n", irq->name);
+ goto err_on_irq;
+ }
+
+ s5p_hdmi_ctrl_init_private();
+
+ /* set initial state of HDMI PHY power to off */
+ s5p_hdmi_ctrl_phy_power(1);
+ s5p_hdmi_ctrl_phy_power(0);
+
+ ret = s5p_hdcp_init();
+
+ if (ret) {
+ printk(KERN_ERR "HDCP init failed..\n");
+ goto err_hdcp_init;
+ }
+
+ return 0;
+
+err_hdcp_init:
+err_on_irq:
+err_on_clk:
+ for (j = 0; j < k; j++)
+ clk_put(clk[j].ptr);
+
+err_on_res:
+ for (j = 0; j < i; j++)
+ s5p_tvout_unmap_resource_mem(reg_mem[j].base, reg_mem[j].res);
+
+ return ret;
+}
+
+void s5p_hdmi_ctrl_destructor(void)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ struct reg_mem_info *reg_mem = ctrl->reg_mem;
+ struct s5p_tvout_clk_info *clk = ctrl->clk;
+ struct irq_info *irq = &ctrl->irq;
+
+ int i;
+
+ if (irq->no >= 0)
+ free_irq(irq->no, NULL);
+
+ for (i = 0; i < HDMI_NO_OF_MEM_RES; i++)
+ s5p_tvout_unmap_resource_mem(reg_mem[i].base, reg_mem[i].res);
+
+ for (i = HDMI_PCLK; i < HDMI_NO_OF_CLK; i++)
+ if (clk[i].ptr) {
+ if (ctrl->running)
+ clk_disable(clk[i].ptr);
+ clk_put(clk[i].ptr);
+ }
+
+ s5p_hdmi_phy_init(NULL);
+ s5p_hdmi_init(NULL);
+}
+
+void s5p_hdmi_ctrl_suspend(void)
+{
+}
+
+void s5p_hdmi_ctrl_resume(void)
+{
+}
+
+#ifdef CONFIG_ANALOG_TVENC
+#ifndef CONFIG_VPLL_USE_FOR_TVENC
+static void s5p_tvenc_src_to_hdmiphy_on(void)
+{
+ s5p_hdmi_ctrl_clock(1);
+ s5p_hdmi_ctrl_phy_power(1);
+ if (s5p_hdmi_phy_config(ePHY_FREQ_54, HDMI_CD_24) < 0)
+ tvout_err("hdmi phy configuration failed.\n");
+ if (clk_set_parent(s5ptv_status.sclk_dac, s5ptv_status.sclk_hdmiphy))
+ tvout_err("unable to set parent %s of clock %s.\n",
+ s5ptv_status.sclk_hdmiphy->name,
+ s5ptv_status.sclk_dac->name);
+}
+
+static void s5p_tvenc_src_to_hdmiphy_off(void)
+{
+ s5p_hdmi_ctrl_phy_power(0);
+ s5p_hdmi_ctrl_clock(0);
+}
+#endif
+#endif
+
+/****************************************
+ * Functions for tvif ctrl class
+ ***************************************/
+static void s5p_tvif_ctrl_init_private(struct platform_device *pdev)
+{
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ /* add bus device ptr for using bus frequency with opp */
+ s5p_tvif_ctrl_private.bus_dev = dev_get("exynos-busfreq");
+#endif
+ s5p_tvif_ctrl_private.dev = &pdev->dev;
+}
+
+/*
+ * TV cut off sequence
+ * VP stop -> Mixer stop -> HDMI stop -> HDMI TG stop
+ * Above sequence should be satisfied.
+ */
+static int s5p_tvif_ctrl_internal_stop(void)
+{
+ tvout_dbg("status(%d)\n", s5p_tvif_ctrl_private.curr_if);
+ s5p_mixer_ctrl_stop();
+
+ switch (s5p_tvif_ctrl_private.curr_if) {
+#ifdef CONFIG_ANALOG_TVENC
+ case TVOUT_COMPOSITE:
+ s5p_sdo_ctrl_stop();
+ break;
+#endif
+ case TVOUT_DVI:
+ case TVOUT_HDMI:
+ case TVOUT_HDMI_RGB:
+ s5p_hdmi_ctrl_stop();
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ } else
+#endif
+ {
+ s5p_hdmi_ctrl_phy_power(0);
+ }
+ break;
+
+ default:
+ tvout_err("invalid out parameter(%d)\n",
+ s5p_tvif_ctrl_private.curr_if);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void s5p_tvif_ctrl_internal_start(
+ enum s5p_tvout_disp_mode std,
+ enum s5p_tvout_o_mode inf)
+{
+ tvout_dbg("\n");
+ s5p_mixer_ctrl_set_int_enable(false);
+
+ /* Clear All Interrupt Pending */
+ s5p_mixer_ctrl_clear_pend_all();
+
+ switch (inf) {
+#ifdef CONFIG_ANALOG_TVENC
+ case TVOUT_COMPOSITE:
+ if (s5p_mixer_ctrl_start(std, inf) < 0)
+ goto ret_on_err;
+
+ if (0 != s5p_sdo_ctrl_start(std))
+ goto ret_on_err;
+
+ break;
+#endif
+ case TVOUT_HDMI:
+ case TVOUT_HDMI_RGB:
+ case TVOUT_DVI:
+ s5p_hdmi_ctrl_phy_power(1);
+
+ if (s5p_mixer_ctrl_start(std, inf) < 0)
+ goto ret_on_err;
+
+ if (0 != s5p_hdmi_ctrl_start(std, inf))
+ goto ret_on_err;
+ break;
+ default:
+ break;
+ }
+
+ret_on_err:
+ s5p_mixer_ctrl_set_int_enable(true);
+
+ /* Clear All Interrupt Pending */
+ s5p_mixer_ctrl_clear_pend_all();
+}
+
+int s5p_tvif_ctrl_set_audio(bool en)
+{
+ switch (s5p_tvif_ctrl_private.curr_if) {
+ case TVOUT_HDMI:
+ case TVOUT_HDMI_RGB:
+ case TVOUT_DVI:
+ s5p_hdmi_ctrl_set_audio(en);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void s5p_tvif_audio_channel(int channel)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ ctrl->audio.channel = channel;
+}
+
+void s5p_tvif_q_color_range(int range)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ if (range)
+ ctrl->video.q_range = HDMI_Q_FULL_RANGE;
+ else
+ ctrl->video.q_range = HDMI_Q_LIMITED_RANGE;
+ tvout_dbg("%s: Set Q range : %d\n", __func__, ctrl->video.q_range);
+}
+
+int s5p_tvif_get_q_range(void)
+{
+ struct s5p_hdmi_ctrl_private_data *ctrl = &s5p_hdmi_ctrl_private;
+ tvout_dbg("%s: Get Q range : %d\n", __func__, ctrl->video.q_range);
+ if (ctrl->video.q_range == HDMI_Q_FULL_RANGE)
+ return 1;
+ else
+ return 0;
+}
+
+int s5p_tvif_ctrl_set_av_mute(bool en)
+{
+ switch (s5p_tvif_ctrl_private.curr_if) {
+ case TVOUT_HDMI:
+ case TVOUT_HDMI_RGB:
+ case TVOUT_DVI:
+ s5p_hdmi_ctrl_set_av_mute(en);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int s5p_tvif_ctrl_get_std_if(
+ enum s5p_tvout_disp_mode *std, enum s5p_tvout_o_mode *inf)
+{
+ *std = s5p_tvif_ctrl_private.curr_std;
+ *inf = s5p_tvif_ctrl_private.curr_if;
+
+ return 0;
+}
+
+bool s5p_tvif_ctrl_get_run_state()
+{
+ return s5p_tvif_ctrl_private.running;
+}
+
+int s5p_tvif_ctrl_start(
+ enum s5p_tvout_disp_mode std, enum s5p_tvout_o_mode inf)
+{
+ tvout_dbg("\n");
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ if ((std == TVOUT_1080P_60) || (std == TVOUT_1080P_59)
+ || (std == TVOUT_1080P_50)) {
+ dev_lock(s5p_tvif_ctrl_private.bus_dev,
+ s5p_tvif_ctrl_private.dev, BUSFREQ_400MHZ);
+ }
+#if defined(CONFIG_MACH_MIDAS)
+ else {
+ dev_lock(s5p_tvif_ctrl_private.bus_dev,
+ s5p_tvif_ctrl_private.dev, BUSFREQ_133MHZ);
+ }
+#endif
+#endif
+ if (s5p_tvif_ctrl_private.running &&
+ (std == s5p_tvif_ctrl_private.curr_std) &&
+ (inf == s5p_tvif_ctrl_private.curr_if)) {
+ on_start_process = false;
+ tvout_dbg("%s() on_start_process(%d)\n",
+ __func__, on_start_process);
+ goto cannot_change;
+ }
+
+ s5p_tvif_ctrl_private.curr_std = std;
+ s5p_tvif_ctrl_private.curr_if = inf;
+
+ switch (inf) {
+ case TVOUT_COMPOSITE:
+ case TVOUT_HDMI:
+ case TVOUT_HDMI_RGB:
+ case TVOUT_DVI:
+ break;
+ default:
+ tvout_err("invalid out parameter(%d)\n", inf);
+ goto cannot_change;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ } else
+#endif
+ {
+ /* how to control the clock path on stop time ??? */
+ if (s5p_tvif_ctrl_private.running)
+ s5p_tvif_ctrl_internal_stop();
+
+ s5p_tvif_ctrl_internal_start(std, inf);
+ }
+
+ s5p_tvif_ctrl_private.running = true;
+
+ return 0;
+
+cannot_change:
+ return -1;
+}
+
+void s5p_tvif_ctrl_stop(void)
+{
+ if (s5p_tvif_ctrl_private.running) {
+ s5p_tvif_ctrl_internal_stop();
+
+ s5p_tvif_ctrl_private.running = false;
+ }
+#if defined(CONFIG_BUSFREQ_OPP) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ dev_unlock(s5p_tvif_ctrl_private.bus_dev, s5p_tvif_ctrl_private.dev);
+#endif
+}
+
+int s5p_tvif_ctrl_constructor(struct platform_device *pdev)
+{
+#ifdef CONFIG_ANALOG_TVENC
+ if (s5p_sdo_ctrl_constructor(pdev))
+ goto err;
+#endif
+
+ if (s5p_hdmi_ctrl_constructor(pdev))
+ goto err;
+
+ s5p_tvif_ctrl_init_private(pdev);
+
+ return 0;
+
+err:
+ return -1;
+}
+
+void s5p_tvif_ctrl_destructor(void)
+{
+#ifdef CONFIG_ANALOG_TVENC
+ s5p_sdo_ctrl_destructor();
+#endif
+ s5p_hdmi_ctrl_destructor();
+}
+
+void s5p_tvif_ctrl_suspend(void)
+{
+ tvout_dbg("\n");
+ if (s5p_tvif_ctrl_private.running) {
+ s5p_tvif_ctrl_internal_stop();
+#ifdef CONFIG_VCM
+ s5p_tvout_vcm_deactivate();
+#endif
+ }
+
+}
+
+void s5p_tvif_ctrl_resume(void)
+{
+ if (s5p_tvif_ctrl_private.running) {
+#ifdef CONFIG_VCM
+ s5p_tvout_vcm_activate();
+#endif
+ s5p_tvif_ctrl_internal_start(
+ s5p_tvif_ctrl_private.curr_std,
+ s5p_tvif_ctrl_private.curr_if);
+ }
+}
+
+#ifdef CONFIG_PM
+void s5p_hdmi_ctrl_phy_power_resume(void)
+{
+ tvout_dbg("running(%d)\n", s5p_tvif_ctrl_private.running);
+ if (s5p_tvif_ctrl_private.running)
+ return;
+
+ s5p_hdmi_ctrl_phy_power(1);
+ s5p_hdmi_ctrl_phy_power(0);
+
+ return;
+}
+#endif
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout.c b/drivers/media/video/samsung/tvout/s5p_tvout.c
new file mode 100644
index 0000000..7407670
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout.c
@@ -0,0 +1,666 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvout.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Entry file for Samsung TVOut driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
+#if defined(CONFIG_S5P_SYSMMU_TV)
+#include <plat/sysmmu.h>
+#endif
+
+#if defined(CONFIG_S5P_MEM_CMA)
+#include <linux/cma.h>
+#elif defined(CONFIG_S5P_MEM_BOOTMEM)
+#include <plat/media.h>
+#include <mach/media.h>
+#endif
+
+#include "s5p_tvout_common_lib.h"
+#include "s5p_tvout_ctrl.h"
+#include "s5p_tvout_fb.h"
+#include "s5p_tvout_v4l2.h"
+
+#define TV_CLK_GET_WITH_ERR_CHECK(clk, pdev, clk_name) \
+ do { \
+ clk = clk_get(&pdev->dev, clk_name); \
+ if (IS_ERR(clk)) { \
+ printk(KERN_ERR \
+ "failed to find clock %s\n", clk_name); \
+ return -ENOENT; \
+ } \
+ } while (0);
+
+struct s5p_tvout_status s5ptv_status;
+bool on_stop_process;
+bool on_start_process;
+struct s5p_tvout_vp_bufferinfo s5ptv_vp_buff;
+#ifdef CONFIG_PM
+static struct workqueue_struct *tvout_resume_wq;
+struct work_struct tvout_resume_work;
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+static struct early_suspend s5ptv_early_suspend;
+static DEFINE_MUTEX(s5p_tvout_mutex);
+unsigned int suspend_status;
+static void s5p_tvout_early_suspend(struct early_suspend *h);
+static void s5p_tvout_late_resume(struct early_suspend *h);
+#endif
+bool flag_after_resume;
+
+#ifdef CONFIG_TVOUT_DEBUG
+int tvout_dbg_flag;
+#endif
+
+
+#ifdef CONFIG_HDMI_EARJACK_MUTE
+bool hdmi_audio_ext;
+
+/* To provide an interface fo Audio path control */
+static ssize_t hdmi_set_audio_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int count = 0;
+
+ printk(KERN_ERR "[HDMI]: AUDIO PATH\n");
+ return count;
+}
+
+static ssize_t hdmi_set_audio_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ char *after;
+ bool value = !strncmp(buf, "1", 1) ? true : false;
+
+ printk(KERN_ERR "[HDMI] Change AUDIO PATH: %d\n", (int)value);
+
+ if (value == hdmi_audio_ext) {
+ if (value) {
+ hdmi_audio_ext = 0;
+ s5p_hdmi_ctrl_set_audio(1);
+ } else {
+ hdmi_audio_ext = 1;
+ s5p_hdmi_ctrl_set_audio(0);
+ }
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR(hdmi_audio_set_ext, 0660,
+ hdmi_set_audio_read, hdmi_set_audio_store);
+#endif
+
+static int __devinit s5p_tvout_clk_get(struct platform_device *pdev,
+ struct s5p_tvout_status *ctrl)
+{
+ struct clk *ext_xtal_clk, *mout_vpll_src, *fout_vpll, *mout_vpll;
+
+ TV_CLK_GET_WITH_ERR_CHECK(ctrl->i2c_phy_clk, pdev, "i2c-hdmiphy");
+
+ TV_CLK_GET_WITH_ERR_CHECK(ctrl->sclk_dac, pdev, "sclk_dac");
+ TV_CLK_GET_WITH_ERR_CHECK(ctrl->sclk_hdmi, pdev, "sclk_hdmi");
+
+ TV_CLK_GET_WITH_ERR_CHECK(ctrl->sclk_pixel, pdev, "sclk_pixel");
+ TV_CLK_GET_WITH_ERR_CHECK(ctrl->sclk_hdmiphy, pdev, "sclk_hdmiphy");
+
+ TV_CLK_GET_WITH_ERR_CHECK(ext_xtal_clk, pdev, "ext_xtal");
+ TV_CLK_GET_WITH_ERR_CHECK(mout_vpll_src, pdev, "vpll_src");
+ TV_CLK_GET_WITH_ERR_CHECK(fout_vpll, pdev, "fout_vpll");
+ TV_CLK_GET_WITH_ERR_CHECK(mout_vpll, pdev, "sclk_vpll");
+
+#ifdef CONFIG_VPLL_USE_FOR_TVENC
+ if (clk_set_rate(fout_vpll, 54000000)) {
+ tvout_err("%s rate change failed: %lu\n", fout_vpll->name,
+ 54000000);
+ return -1;
+ }
+
+ if (clk_set_parent(mout_vpll_src, ext_xtal_clk)) {
+ tvout_err("unable to set parent %s of clock %s.\n",
+ ext_xtal_clk->name, mout_vpll_src->name);
+ return -1;
+ }
+
+ if (clk_set_parent(mout_vpll, fout_vpll)) {
+ tvout_err("unable to set parent %s of clock %s.\n",
+ fout_vpll->name, mout_vpll->name);
+ return -1;
+ }
+
+ /* sclk_dac's parent is fixed as mout_vpll */
+ if (clk_set_parent(ctrl->sclk_dac, mout_vpll)) {
+ tvout_err("unable to set parent %s of clock %s.\n",
+ mout_vpll->name, ctrl->sclk_dac->name);
+ return -1;
+ }
+
+ /* It'll be moved in the future */
+ if (clk_enable(mout_vpll_src) < 0)
+ return -1;
+
+ if (clk_enable(fout_vpll) < 0)
+ return -1;
+
+ if (clk_enable(mout_vpll) < 0)
+ return -1;
+
+ clk_put(ext_xtal_clk);
+ clk_put(mout_vpll_src);
+ clk_put(fout_vpll);
+ clk_put(mout_vpll);
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_TVOUT_DEBUG
+void show_tvout_dbg_flag(void)
+{
+ pr_info("hw_if/hdmi.c %s\n",
+ ((tvout_dbg_flag >> DBG_FLAG_HDMI) & 0x1 ? "On" : "Off"));
+ pr_info("s5p_tvout_hpd.c %s\n",
+ ((tvout_dbg_flag >> DBG_FLAG_HPD) & 0x1 ? "On" : "Off"));
+ pr_info("s5p_tvout_common_lib.h %s\n",
+ ((tvout_dbg_flag >> DBG_FLAG_TVOUT) & 0x1 ? "On" : "Off"));
+ pr_info("hw_if/hdcp.c %s\n",
+ ((tvout_dbg_flag >> DBG_FLAG_HDCP) & 0x1 ? "On" : "Off"));
+}
+
+void set_flag_value(int *flag, int pos, int value)
+{
+ if (value == 1) {
+ *flag |= (1 << pos);
+ } else { /* value is 0 */
+ *flag &= ~(1 << pos);
+ }
+}
+
+static ssize_t sysfs_dbg_msg_show(struct class *class,
+ struct class_attribute *attr, char *buf)
+{
+ pr_info("sysfs_dbg_msg_show\n");
+ show_tvout_dbg_flag();
+ return sprintf(buf, "hw_if/hdmi.c %s\n"
+ "s5p_tvout_hpd.c %s\n"
+ "s5p_tvout_common_lib.h %s\n"
+ "hw_if/hdcp.c %s\n",
+ ((tvout_dbg_flag >> DBG_FLAG_HDMI) & 0x1 ? "On" : "Off"),
+ ((tvout_dbg_flag >> DBG_FLAG_HPD) & 0x1 ? "On" : "Off"),
+ ((tvout_dbg_flag >> DBG_FLAG_TVOUT) & 0x1 ? "On" : "Off"),
+ ((tvout_dbg_flag >> DBG_FLAG_HDCP) & 0x1 ? "On" : "Off"));
+}
+
+static ssize_t sysfs_dbg_msg_store(struct class *class,
+ struct class_attribute *attr, const char *buf, size_t size)
+{
+ enum tvout_dbg_flag_bit_num tvout_dbg_flag_bit;
+ int value;
+ int i;
+ char *dest[2];
+ char *buffer = (char *)buf;
+
+ pr_info("TVOUT Debug Message setting : ");
+ for (i = 0; i < 2; i++)
+ dest[i] = strsep(&buffer, ":");
+
+ if (strcmp(dest[0], "help") == 0) {
+ pr_info(
+ "bit3 : hw_if/hdmi.c\n"
+ "bit2 : s5p_tvout_hpd.c\n"
+ "bit1 : s5p_tvout_common_lib.h\n"
+ "bit0 : hw_if/hdcp.c\n"
+ "ex1) echo 1010 > dbg_msg\n"
+ " hw_if/hdmi.c On\n"
+ " s5p_tvout_hpd.c Off\n"
+ " s5p_tvout_common_lib.h On\n"
+ " hw_if/hdcp.c Off\n"
+ "ex2) echo hdcp:1 > dbg_msg\n"
+ " hw_if/hdcp.c On\n"
+ );
+ return size;
+ }
+
+ if (strcmp(dest[0], "hdcp") == 0) {
+ tvout_dbg_flag_bit = DBG_FLAG_HDCP;
+ } else if (strcmp(dest[0], "tvout") == 0) {
+ tvout_dbg_flag_bit = DBG_FLAG_TVOUT;
+ } else if (strcmp(dest[0], "hpd") == 0) {
+ tvout_dbg_flag_bit = DBG_FLAG_HPD;
+ } else if (strcmp(dest[0], "hdmi") == 0) {
+ tvout_dbg_flag_bit = DBG_FLAG_HDMI;
+ } else if (strlen(dest[0]) == 5) {
+ for (i = 0; i < 4; i++) {
+ value = dest[0][i] - '0';
+ if (value < 0 || 2 < value) {
+ pr_info("error : setting value!\n");
+ return size;
+ }
+ set_flag_value(&tvout_dbg_flag, 3-i, value);
+ }
+ show_tvout_dbg_flag();
+ return size;
+ } else {
+ pr_info("Error : Debug Message Taget\n");
+ return size;
+ }
+
+ if (strcmp(dest[1], "1\n") == 0) {
+ value = 1;
+ } else if (strcmp(dest[1], "0\n") == 0) {
+ value = 0;
+ } else {
+ pr_info("Error : Setting value!\n");
+ return size;
+ }
+
+ set_flag_value(&tvout_dbg_flag, tvout_dbg_flag_bit, value);
+ show_tvout_dbg_flag();
+
+ return size;
+}
+
+static CLASS_ATTR(dbg_msg, S_IRUGO | S_IWUSR,
+ sysfs_dbg_msg_show, sysfs_dbg_msg_store);
+#endif
+
+static int __devinit s5p_tvout_probe(struct platform_device *pdev)
+{
+#if defined(CONFIG_S5P_MEM_CMA)
+ struct cma_info mem_info;
+ int ret;
+#elif defined(CONFIG_S5P_MEM_BOOTMEM)
+ int mdev_id;
+#endif
+ unsigned int vp_buff_vir_addr;
+ unsigned int vp_buff_phy_addr = 0;
+ int i;
+
+#ifdef CONFIG_HDMI_EARJACK_MUTE
+ struct class *hdmi_audio_class;
+ struct device *hdmi_audio_dev;
+#endif
+
+#ifdef CONFIG_TVOUT_DEBUG
+ struct class *sec_tvout;
+ tvout_dbg_flag = 1 << DBG_FLAG_HPD;
+#endif
+ s5p_tvout_pm_runtime_enable(&pdev->dev);
+
+#if defined(CONFIG_S5P_SYSMMU_TV) && defined(CONFIG_VCM)
+ if (s5p_tvout_vcm_create_unified() < 0)
+ goto err;
+
+ if (s5p_tvout_vcm_init() < 0)
+ goto err;
+#elif defined(CONFIG_S5P_SYSMMU_TV) && defined(CONFIG_S5P_VMEM)
+ s5p_sysmmu_enable(&pdev->dev);
+ printk(KERN_WARNING "sysmmu on\n");
+ s5p_sysmmu_set_tablebase_pgd(&pdev->dev, __pa(swapper_pg_dir));
+#endif
+ if (s5p_tvout_clk_get(pdev, &s5ptv_status) < 0)
+ goto err;
+
+ if (s5p_vp_ctrl_constructor(pdev) < 0)
+ goto err;
+
+ /* s5p_mixer_ctrl_constructor must be called
+ before s5p_tvif_ctrl_constructor */
+ if (s5p_mixer_ctrl_constructor(pdev) < 0)
+ goto err_mixer;
+
+ if (s5p_tvif_ctrl_constructor(pdev) < 0)
+ goto err_tvif;
+
+ if (s5p_tvout_v4l2_constructor(pdev) < 0)
+ goto err_v4l2;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ spin_lock_init(&s5ptv_status.tvout_lock);
+ s5ptv_early_suspend.suspend = s5p_tvout_early_suspend;
+ s5ptv_early_suspend.resume = s5p_tvout_late_resume;
+ s5ptv_early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 4;
+ register_early_suspend(&s5ptv_early_suspend);
+ suspend_status = 0;
+#endif
+
+#ifdef CONFIG_TV_FB
+#ifndef CONFIG_USER_ALLOC_TVOUT
+ s5p_hdmi_phy_power(true);
+ if (s5p_tvif_ctrl_start(TVOUT_720P_60, TVOUT_HDMI) < 0)
+ goto err_tvif_start;
+#endif
+
+ /* prepare memory */
+ if (s5p_tvout_fb_alloc_framebuffer(&pdev->dev))
+ goto err_tvif_start;
+
+ if (s5p_tvout_fb_register_framebuffer(&pdev->dev))
+ goto err_tvif_start;
+#endif
+ on_stop_process = false;
+ on_start_process = false;
+#if !defined(CONFIG_CPU_EXYNOS4212) && !defined(CONFIG_CPU_EXYNOS4412)
+#if defined(CONFIG_S5P_MEM_CMA)
+ /* CMA */
+ ret = cma_info(&mem_info, &pdev->dev, 0);
+ tvout_dbg("[cma_info] start_addr : 0x%x, end_addr : 0x%x, "
+ "total_size : 0x%x, free_size : 0x%x\n",
+ mem_info.lower_bound, mem_info.upper_bound,
+ mem_info.total_size, mem_info.free_size);
+ if (ret) {
+ tvout_err("get cma info failed\n");
+ goto err_tvif_start;
+ }
+ s5ptv_vp_buff.size = mem_info.total_size;
+ if (s5ptv_vp_buff.size < S5PTV_VP_BUFF_CNT * S5PTV_VP_BUFF_SIZE) {
+ tvout_err("insufficient vp buffer size\n");
+ goto err_tvif_start;
+ }
+ vp_buff_phy_addr = (unsigned int)cma_alloc
+ (&pdev->dev, (char *)"tvout", (size_t) s5ptv_vp_buff.size,
+ (dma_addr_t) 0);
+
+#elif defined(CONFIG_S5P_MEM_BOOTMEM)
+ mdev_id = S5P_MDEV_TVOUT;
+ /* alloc from bank1 as default */
+ vp_buff_phy_addr = s5p_get_media_memory_bank(mdev_id, 1);
+ s5ptv_vp_buff.size = s5p_get_media_memsize_bank(mdev_id, 1);
+ if (s5ptv_vp_buff.size < S5PTV_VP_BUFF_CNT * S5PTV_VP_BUFF_SIZE) {
+ tvout_err("insufficient vp buffer size\n");
+ goto err_tvif_start;
+ }
+#endif
+
+ tvout_dbg("s5ptv_vp_buff.size = 0x%x\n", s5ptv_vp_buff.size);
+ tvout_dbg("s5ptv_vp_buff phy_base = 0x%x\n", vp_buff_phy_addr);
+
+ vp_buff_vir_addr = (unsigned int)phys_to_virt(vp_buff_phy_addr);
+ tvout_dbg("s5ptv_vp_buff vir_base = 0x%x\n", vp_buff_vir_addr);
+
+ if (!vp_buff_vir_addr) {
+ tvout_err("phys_to_virt failed\n");
+ goto err_ioremap;
+ }
+
+ for (i = 0; i < S5PTV_VP_BUFF_CNT; i++) {
+ s5ptv_vp_buff.vp_buffs[i].phy_base =
+ vp_buff_phy_addr + (i * S5PTV_VP_BUFF_SIZE);
+ s5ptv_vp_buff.vp_buffs[i].vir_base =
+ vp_buff_vir_addr + (i * S5PTV_VP_BUFF_SIZE);
+ }
+#else
+ for (i = 0; i < S5PTV_VP_BUFF_CNT; i++) {
+ s5ptv_vp_buff.vp_buffs[i].phy_base = 0;
+ s5ptv_vp_buff.vp_buffs[i].vir_base = 0;
+ }
+#endif
+
+ for (i = 0; i < S5PTV_VP_BUFF_CNT - 1; i++)
+ s5ptv_vp_buff.copy_buff_idxs[i] = i;
+
+ s5ptv_vp_buff.curr_copy_idx = 0;
+ s5ptv_vp_buff.vp_access_buff_idx = S5PTV_VP_BUFF_CNT - 1;
+
+#ifdef CONFIG_TVOUT_DEBUG
+ tvout_dbg("Create tvout class sysfile\n");
+
+ sec_tvout = class_create(THIS_MODULE, "tvout");
+ if (IS_ERR(sec_tvout)) {
+ tvout_err("Failed to create class(sec_tvout)!\n");
+ goto err_class;
+ }
+
+ if (class_create_file(sec_tvout, &class_attr_dbg_msg) < 0) {
+ tvout_err("failed to add sysfs entries\n");
+ goto err_sysfs;
+ }
+#endif
+
+ flag_after_resume = false;
+#ifdef CONFIG_HDMI_EARJACK_MUTE
+ hdmi_audio_class = class_create(THIS_MODULE, "hdmi_audio");
+ if (IS_ERR(hdmi_audio_class))
+ pr_err("Failed to create class(hdmi_audio)!\n");
+ hdmi_audio_dev = device_create(hdmi_audio_class, NULL, 0, NULL,
+ "hdmi_audio");
+ if (IS_ERR(hdmi_audio_dev))
+ pr_err("Failed to create device(hdmi_audio_dev)!\n");
+
+ if (device_create_file(hdmi_audio_dev,
+ &dev_attr_hdmi_audio_set_ext) < 0)
+ printk(KERN_ERR "Failed to create device file(%s)!\n",
+ dev_attr_hdmi_audio_set_ext.attr.name);
+
+ hdmi_audio_ext = false;
+#endif
+
+ return 0;
+
+err_sysfs:
+ class_destroy(sec_tvout);
+err_class:
+err_ioremap:
+#if defined(CONFIG_S5P_MEM_CMA)
+ cma_free(vp_buff_phy_addr);
+#endif
+err_tvif_start:
+ s5p_tvout_v4l2_destructor();
+err_v4l2:
+ s5p_tvif_ctrl_destructor();
+err_tvif:
+ s5p_mixer_ctrl_destructor();
+err_mixer:
+ s5p_vp_ctrl_destructor();
+err:
+ return -ENODEV;
+}
+
+static int s5p_tvout_remove(struct platform_device *pdev)
+{
+#if defined(CONFIG_S5P_SYSMMU_TV) && defined(CONFIG_S5P_VMEM)
+ s5p_sysmmu_off(&pdev->dev);
+ tvout_dbg("sysmmu off\n");
+#endif
+ s5p_vp_ctrl_destructor();
+ s5p_tvif_ctrl_destructor();
+ s5p_mixer_ctrl_destructor();
+
+ s5p_tvout_v4l2_destructor();
+
+ clk_disable(s5ptv_status.sclk_hdmi);
+
+ clk_put(s5ptv_status.sclk_hdmi);
+ clk_put(s5ptv_status.sclk_dac);
+ clk_put(s5ptv_status.sclk_pixel);
+ clk_put(s5ptv_status.sclk_hdmiphy);
+
+ s5p_tvout_pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static void s5p_tvout_early_suspend(struct early_suspend *h)
+{
+ tvout_dbg("\n");
+ mutex_lock(&s5p_tvout_mutex);
+ s5p_mixer_ctrl_set_vsync_interrupt(false);
+ s5p_vp_ctrl_suspend();
+ s5p_mixer_ctrl_suspend();
+ s5p_tvif_ctrl_suspend();
+ suspend_status = 1;
+ tvout_dbg("suspend_status is true\n");
+ mutex_unlock(&s5p_tvout_mutex);
+
+ return;
+}
+
+static void s5p_tvout_late_resume(struct early_suspend *h)
+{
+ tvout_dbg("\n");
+
+ mutex_lock(&s5p_tvout_mutex);
+
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ if (flag_after_resume) {
+ queue_work_on(0, tvout_resume_wq, &tvout_resume_work);
+ flag_after_resume = false;
+ }
+#endif
+ suspend_status = 0;
+ tvout_dbg("suspend_status is false\n");
+ s5p_tvif_ctrl_resume();
+ s5p_mixer_ctrl_resume();
+ s5p_vp_ctrl_resume();
+ s5p_mixer_ctrl_set_vsync_interrupt(s5p_mixer_ctrl_get_vsync_interrupt());
+ mutex_unlock(&s5p_tvout_mutex);
+
+ return;
+}
+
+void s5p_tvout_mutex_lock()
+{
+ mutex_lock(&s5p_tvout_mutex);
+}
+
+void s5p_tvout_mutex_unlock()
+{
+ mutex_unlock(&s5p_tvout_mutex);
+}
+#endif
+
+static void s5p_tvout_resume_work(void *arg)
+{
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ mutex_lock(&s5p_tvout_mutex);
+#endif
+ s5p_hdmi_ctrl_phy_power_resume();
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ mutex_unlock(&s5p_tvout_mutex);
+#endif
+}
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+static int s5p_tvout_suspend(struct device *dev)
+{
+ tvout_dbg("\n");
+ return 0;
+}
+
+static int s5p_tvout_resume(struct device *dev)
+{
+ tvout_dbg("\n");
+#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412)
+ flag_after_resume = true;
+#else
+ queue_work_on(0, tvout_resume_wq, &tvout_resume_work);
+#endif
+ return 0;
+}
+#else
+static int s5p_tvout_suspend(struct device *dev)
+{
+ s5p_vp_ctrl_suspend();
+ s5p_mixer_ctrl_suspend();
+ s5p_tvif_ctrl_suspend();
+ return 0;
+}
+
+static int s5p_tvout_resume(struct device *dev)
+{
+ s5p_tvif_ctrl_resume();
+ s5p_mixer_ctrl_resume();
+ s5p_vp_ctrl_resume();
+ return 0;
+}
+#endif
+static int s5p_tvout_runtime_suspend(struct device *dev)
+{
+ tvout_dbg("\n");
+ return 0;
+}
+
+static int s5p_tvout_runtime_resume(struct device *dev)
+{
+ tvout_dbg("\n");
+ return 0;
+}
+#else
+#define s5p_tvout_suspend NULL
+#define s5p_tvout_resume NULL
+#define s5p_tvout_runtime_suspend NULL
+#define s5p_tvout_runtime_resume NULL
+#endif
+
+static const struct dev_pm_ops s5p_tvout_pm_ops = {
+ .suspend = s5p_tvout_suspend,
+ .resume = s5p_tvout_resume,
+ .runtime_suspend = s5p_tvout_runtime_suspend,
+ .runtime_resume = s5p_tvout_runtime_resume
+};
+
+static struct platform_driver s5p_tvout_driver = {
+ .probe = s5p_tvout_probe,
+ .remove = s5p_tvout_remove,
+ .driver = {
+ .name = "s5p-tvout",
+ .owner = THIS_MODULE,
+ .pm = &s5p_tvout_pm_ops},
+};
+
+static char banner[] __initdata =
+ KERN_INFO "S5P TVOUT Driver v3.0 (c) 2010 Samsung Electronics\n";
+
+static int __init s5p_tvout_init(void)
+{
+ int ret;
+
+ printk(banner);
+
+ ret = platform_driver_register(&s5p_tvout_driver);
+
+ if (ret) {
+ printk(KERN_ERR "Platform Device Register Failed %d\n", ret);
+
+ return -1;
+ }
+#ifdef CONFIG_PM
+ tvout_resume_wq = create_freezable_workqueue("tvout resume work");
+ if (!tvout_resume_wq) {
+ printk(KERN_ERR "Platform Device Register Failed %d\n", ret);
+ platform_driver_unregister(&s5p_tvout_driver);
+ return -1;
+ }
+
+ INIT_WORK(&tvout_resume_work, (work_func_t) s5p_tvout_resume_work);
+#endif
+
+ return 0;
+}
+
+static void __exit s5p_tvout_exit(void)
+{
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ mutex_destroy(&s5p_tvout_mutex);
+#endif
+ platform_driver_unregister(&s5p_tvout_driver);
+}
+
+late_initcall(s5p_tvout_init);
+module_exit(s5p_tvout_exit);
+
+MODULE_AUTHOR("SangPil Moon");
+MODULE_DESCRIPTION("S5P TVOUT driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_cec.c b/drivers/media/video/samsung/tvout/s5p_tvout_cec.c
new file mode 100644
index 0000000..82e9994
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_cec.c
@@ -0,0 +1,428 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_cec_ctrl.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * cec interface file for Samsung TVOut driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+#include <linux/clk.h>
+#include <linux/sched.h>
+
+#include <plat/tvout.h>
+
+#include "hw_if/hw_if.h"
+#include "s5p_tvout_common_lib.h"
+
+#define CEC_IOC_MAGIC 'c'
+#define CEC_IOC_SETLADDR _IOW(CEC_IOC_MAGIC, 0, unsigned int)
+
+#define VERSION "1.0" /* Driver version number */
+#define CEC_MINOR 242 /* Major 10, Minor 242, /dev/cec */
+
+
+#define CEC_STATUS_TX_RUNNING (1<<0)
+#define CEC_STATUS_TX_TRANSFERRING (1<<1)
+#define CEC_STATUS_TX_DONE (1<<2)
+#define CEC_STATUS_TX_ERROR (1<<3)
+#define CEC_STATUS_TX_BYTES (0xFF<<8)
+#define CEC_STATUS_RX_RUNNING (1<<16)
+#define CEC_STATUS_RX_RECEIVING (1<<17)
+#define CEC_STATUS_RX_DONE (1<<18)
+#define CEC_STATUS_RX_ERROR (1<<19)
+#define CEC_STATUS_RX_BCAST (1<<20)
+#define CEC_STATUS_RX_BYTES (0xFF<<24)
+
+
+/* CEC Rx buffer size */
+#define CEC_RX_BUFF_SIZE 16
+/* CEC Tx buffer size */
+#define CEC_TX_BUFF_SIZE 16
+
+#define TV_CLK_GET_WITH_ERR_CHECK(clk, pdev, clk_name) \
+ do { \
+ clk = clk_get(&pdev->dev, clk_name); \
+ if (IS_ERR(clk)) { \
+ printk(KERN_ERR \
+ "failed to find clock %s\n", clk_name); \
+ return -ENOENT; \
+ } \
+ } while (0);
+
+static atomic_t hdmi_on = ATOMIC_INIT(0);
+static DEFINE_MUTEX(cec_lock);
+struct clk *hdmi_cec_clk;
+
+static int s5p_cec_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ mutex_lock(&cec_lock);
+ clk_enable(hdmi_cec_clk);
+
+ if (atomic_read(&hdmi_on)) {
+ tvout_dbg("do not allow multiple open for tvout cec\n");
+ ret = -EBUSY;
+ goto err_multi_open;
+ } else
+ atomic_inc(&hdmi_on);
+
+ s5p_cec_reset();
+
+ s5p_cec_set_divider();
+
+ s5p_cec_threshold();
+
+ s5p_cec_unmask_tx_interrupts();
+
+ s5p_cec_set_rx_state(STATE_RX);
+ s5p_cec_unmask_rx_interrupts();
+ s5p_cec_enable_rx();
+
+err_multi_open:
+ mutex_unlock(&cec_lock);
+
+ return ret;
+}
+
+static int s5p_cec_release(struct inode *inode, struct file *file)
+{
+ atomic_dec(&hdmi_on);
+
+ s5p_cec_mask_tx_interrupts();
+ s5p_cec_mask_rx_interrupts();
+
+ clk_disable(hdmi_cec_clk);
+ clk_put(hdmi_cec_clk);
+
+ return 0;
+}
+
+static ssize_t s5p_cec_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ ssize_t retval;
+ unsigned long spin_flags;
+
+ if (wait_event_interruptible(cec_rx_struct.waitq,
+ atomic_read(&cec_rx_struct.state) == STATE_DONE)) {
+ return -ERESTARTSYS;
+ }
+ spin_lock_irqsave(&cec_rx_struct.lock, spin_flags);
+
+ if (cec_rx_struct.size > count) {
+ spin_unlock_irqrestore(&cec_rx_struct.lock, spin_flags);
+
+ return -1;
+ }
+
+ if (copy_to_user(buffer, cec_rx_struct.buffer, cec_rx_struct.size)) {
+ spin_unlock_irqrestore(&cec_rx_struct.lock, spin_flags);
+ printk(KERN_ERR " copy_to_user() failed!\n");
+
+ return -EFAULT;
+ }
+
+ retval = cec_rx_struct.size;
+
+ s5p_cec_set_rx_state(STATE_RX);
+ spin_unlock_irqrestore(&cec_rx_struct.lock, spin_flags);
+
+ return retval;
+}
+
+static ssize_t s5p_cec_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+
+ /* check data size */
+
+ if (count > CEC_TX_BUFF_SIZE || count == 0)
+ return -1;
+
+ data = kmalloc(count, GFP_KERNEL);
+
+ if (!data) {
+ printk(KERN_ERR " kmalloc() failed!\n");
+
+ return -1;
+ }
+
+ if (copy_from_user(data, buffer, count)) {
+ printk(KERN_ERR " copy_from_user() failed!\n");
+ kfree(data);
+
+ return -EFAULT;
+ }
+
+ s5p_cec_copy_packet(data, count);
+
+ kfree(data);
+
+ /* wait for interrupt */
+ if (wait_event_interruptible(cec_tx_struct.waitq,
+ atomic_read(&cec_tx_struct.state)
+ != STATE_TX)) {
+
+ return -ERESTARTSYS;
+ }
+
+ if (atomic_read(&cec_tx_struct.state) == STATE_ERROR)
+ return -1;
+
+ return count;
+}
+
+#if 0
+static int s5p_cec_ioctl(struct inode *inode, struct file *file, u32 cmd,
+ unsigned long arg)
+#else
+static long s5p_cec_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+#endif
+{
+ u32 laddr;
+
+ switch (cmd) {
+ case CEC_IOC_SETLADDR:
+ if (get_user(laddr, (u32 __user *) arg))
+ return -EFAULT;
+
+ tvout_dbg("logical address = 0x%02x\n", laddr);
+
+ s5p_cec_set_addr(laddr);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u32 s5p_cec_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &cec_rx_struct.waitq, wait);
+
+ if (atomic_read(&cec_rx_struct.state) == STATE_DONE)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations cec_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_cec_open,
+ .release = s5p_cec_release,
+ .read = s5p_cec_read,
+ .write = s5p_cec_write,
+#if 1
+ .unlocked_ioctl = s5p_cec_ioctl,
+#else
+ .ioctl = s5p_cec_ioctl,
+#endif
+ .poll = s5p_cec_poll,
+};
+
+static struct miscdevice cec_misc_device = {
+ .minor = CEC_MINOR,
+ .name = "CEC",
+ .fops = &cec_fops,
+};
+
+static irqreturn_t s5p_cec_irq_handler(int irq, void *dev_id)
+{
+
+ u32 status = 0;
+
+ status = s5p_cec_get_status();
+
+ if (status & CEC_STATUS_TX_DONE) {
+ if (status & CEC_STATUS_TX_ERROR) {
+ tvout_dbg(" CEC_STATUS_TX_ERROR!\n");
+ s5p_cec_set_tx_state(STATE_ERROR);
+ } else {
+ tvout_dbg(" CEC_STATUS_TX_DONE!\n");
+ s5p_cec_set_tx_state(STATE_DONE);
+ }
+
+ s5p_clr_pending_tx();
+
+ wake_up_interruptible(&cec_tx_struct.waitq);
+ }
+
+ if (status & CEC_STATUS_RX_DONE) {
+ if (status & CEC_STATUS_RX_ERROR) {
+ tvout_dbg(" CEC_STATUS_RX_ERROR!\n");
+ s5p_cec_rx_reset();
+
+ } else {
+ u32 size;
+
+ tvout_dbg(" CEC_STATUS_RX_DONE!\n");
+
+ /* copy data from internal buffer */
+ size = status >> 24;
+
+ spin_lock(&cec_rx_struct.lock);
+
+ s5p_cec_get_rx_buf(size, cec_rx_struct.buffer);
+
+ cec_rx_struct.size = size;
+
+ s5p_cec_set_rx_state(STATE_DONE);
+
+ spin_unlock(&cec_rx_struct.lock);
+
+ s5p_cec_enable_rx();
+ }
+
+ /* clear interrupt pending bit */
+ s5p_clr_pending_rx();
+
+ wake_up_interruptible(&cec_rx_struct.waitq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit s5p_cec_probe(struct platform_device *pdev)
+{
+ struct s5p_platform_cec *pdata;
+ u8 *buffer;
+ int irq_num;
+ int ret;
+
+ pdata = to_tvout_plat(&pdev->dev);
+
+ if (pdata->cfg_gpio)
+ pdata->cfg_gpio(pdev);
+
+ /* get ioremap addr */
+ ret = s5p_cec_mem_probe(pdev);
+ if (ret != 0) {
+ printk(KERN_ERR "failed to s5p_cec_mem_probe ret = %d\n", ret);
+ goto err_mem_probe;
+ }
+
+ if (misc_register(&cec_misc_device)) {
+ printk(KERN_WARNING " Couldn't register device 10, %d.\n",
+ CEC_MINOR);
+ ret = -EBUSY;
+ goto err_misc_register;
+ }
+
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0) {
+ printk(KERN_ERR "failed to get %s irq resource\n", "cec");
+ ret = -ENOENT;
+ goto err_get_irq;
+ }
+
+ ret = request_irq(irq_num, s5p_cec_irq_handler, IRQF_DISABLED,
+ pdev->name, &pdev->id);
+ if (ret != 0) {
+ printk(KERN_ERR "failed to install %s irq (%d)\n", "cec", ret);
+ goto err_request_irq;
+ }
+
+ init_waitqueue_head(&cec_rx_struct.waitq);
+ spin_lock_init(&cec_rx_struct.lock);
+ init_waitqueue_head(&cec_tx_struct.waitq);
+
+ buffer = kmalloc(CEC_TX_BUFF_SIZE, GFP_KERNEL);
+ if (!buffer) {
+ printk(KERN_ERR " kmalloc() failed!\n");
+ misc_deregister(&cec_misc_device);
+ ret = -EIO;
+ goto err_kmalloc;
+ }
+
+ cec_rx_struct.buffer = buffer;
+ cec_rx_struct.size = 0;
+ TV_CLK_GET_WITH_ERR_CHECK(hdmi_cec_clk, pdev, "hdmicec");
+
+err_kmalloc:
+ free_irq(irq_num, &pdev->id);
+err_request_irq:
+err_get_irq:
+ misc_deregister(&cec_misc_device);
+err_misc_register:
+err_mem_probe:
+
+ return 0;
+}
+
+static int __devexit s5p_cec_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s5p_cec_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return 0;
+}
+
+static int s5p_cec_resume(struct platform_device *dev)
+{
+ return 0;
+}
+#else
+#define s5p_cec_suspend NULL
+#define s5p_cec_resume NULL
+#endif
+
+static struct platform_driver s5p_cec_driver = {
+ .probe = s5p_cec_probe,
+ .remove = __devexit_p(s5p_cec_remove),
+ .suspend = s5p_cec_suspend,
+ .resume = s5p_cec_resume,
+ .driver = {
+ .name = "s5p-tvout-cec",
+ .owner = THIS_MODULE,
+ },
+};
+
+static char banner[] __initdata =
+ "S5P CEC Driver, (c) 2009 Samsung Electronics\n";
+
+static int __init s5p_cec_init(void)
+{
+ int ret;
+
+ printk(banner);
+
+ ret = platform_driver_register(&s5p_cec_driver);
+
+ if (ret) {
+ printk(KERN_ERR "Platform Device Register Failed %d\n", ret);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit s5p_cec_exit(void)
+{
+ kfree(cec_rx_struct.buffer);
+
+ platform_driver_unregister(&s5p_cec_driver);
+}
+
+module_init(s5p_cec_init);
+module_exit(s5p_cec_exit);
+
+MODULE_AUTHOR("SangPil Moon");
+MODULE_DESCRIPTION("S5P CEC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_common_lib.c b/drivers/media/video/samsung/tvout/s5p_tvout_common_lib.c
new file mode 100644
index 0000000..dd69187
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_common_lib.c
@@ -0,0 +1,183 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvout_common_lib.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Common library file for SAMSUNG TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+
+#include <linux/pm_runtime.h>
+
+#include "s5p_tvout_common_lib.h"
+
+#ifdef CONFIG_VCM
+#include <plat/s5p-vcm.h>
+#endif
+
+#ifdef CONFIG_VCM
+static atomic_t s5p_tvout_vcm_usage = ATOMIC_INIT(0);
+
+static void tvout_tlb_invalidator(enum vcm_dev_id id)
+{
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (atomic_read(&s5p_tvout_vcm_usage) == 0) {
+ return (void)0;
+ }
+#endif
+}
+
+static void tvout_pgd_base_specifier(enum vcm_dev_id id, unsigned long base)
+{
+
+#if (defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME))
+ if (atomic_read(&s5p_tvout_vcm_usage) == 0) {
+ return (void)0;
+ }
+#endif
+}
+
+static struct s5p_vcm_driver s5ptv_vcm_driver = {
+ .tlb_invalidator = &tvout_tlb_invalidator,
+ .pgd_base_specifier = &tvout_pgd_base_specifier,
+ .phys_alloc = NULL,
+ .phys_free = NULL,
+};
+
+#endif
+
+
+#ifdef CONFIG_VCM
+static struct vcm *s5p_vcm;
+
+int s5p_tvout_vcm_create_unified(void)
+{
+ s5p_vcm = vcm_create_unified((SZ_64M), VCM_DEV_TV,
+ &s5ptv_vcm_driver);
+
+ if (IS_ERR(s5p_vcm))
+ return PTR_ERR(s5p_vcm);
+
+ return 0;
+}
+
+int s5p_tvout_vcm_init(void)
+{
+ if (vcm_activate(s5p_vcm) < 0)
+ return -1;
+
+ return 0;
+}
+
+void s5p_tvout_vcm_activate(void)
+{
+ vcm_set_pgtable_base(VCM_DEV_TV);
+}
+
+void s5p_tvout_vcm_deactivate(void)
+{
+}
+
+
+#endif
+int s5p_tvout_map_resource_mem(
+ struct platform_device *pdev, char *name,
+ void __iomem **base, struct resource **res)
+{
+ size_t size;
+ void __iomem *tmp_base;
+ struct resource *tmp_res;
+
+ tmp_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+
+ if (!tmp_res)
+ goto not_found;
+
+ size = (tmp_res->end - tmp_res->start) + 1;
+
+ tmp_res = request_mem_region(tmp_res->start, size, tmp_res->name);
+
+ if (!tmp_res) {
+ tvout_err("%s: fail to get memory region\n", __func__);
+ goto err_on_request_mem_region;
+ }
+
+ tmp_base = ioremap(tmp_res->start, size);
+
+ if (!tmp_base) {
+ tvout_err("%s: fail to ioremap address region\n", __func__);
+ goto err_on_ioremap;
+ }
+
+ *res = tmp_res;
+ *base = tmp_base;
+ return 0;
+
+err_on_ioremap:
+ release_resource(tmp_res);
+ kfree(tmp_res);
+
+err_on_request_mem_region:
+ return -ENXIO;
+
+not_found:
+ tvout_err("%s: fail to get IORESOURCE_MEM for %s\n", __func__, name);
+ return -ENODEV;
+}
+
+void s5p_tvout_unmap_resource_mem(
+ void __iomem *base, struct resource *res)
+{
+ if (base)
+ iounmap(base);
+
+ if (res) {
+ release_resource(res);
+ kfree(res);
+ }
+}
+
+/* Libraries for runtime PM */
+static struct device *s5p_tvout_dev;
+
+void s5p_tvout_pm_runtime_enable(struct device *dev)
+{
+ pm_runtime_enable(dev);
+
+ s5p_tvout_dev = dev;
+}
+
+void s5p_tvout_pm_runtime_disable(struct device *dev)
+{
+ pm_runtime_disable(dev);
+}
+
+void s5p_tvout_pm_runtime_get(void)
+{
+ pm_runtime_get_sync(s5p_tvout_dev);
+
+#ifdef CONFIG_VCM
+ atomic_inc(&s5p_tvout_vcm_usage);
+ if (atomic_read(&s5p_tvout_vcm_usage) == 1)
+ s5p_tvout_vcm_activate();
+#endif
+}
+
+void s5p_tvout_pm_runtime_put(void)
+{
+#ifdef CONFIG_VCM
+ if (atomic_read(&s5p_tvout_vcm_usage) == 1)
+ s5p_tvout_vcm_deactivate();
+
+ atomic_dec(&s5p_tvout_vcm_usage);
+#endif
+
+ pm_runtime_put_sync(s5p_tvout_dev);
+}
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_common_lib.h b/drivers/media/video/samsung/tvout/s5p_tvout_common_lib.h
new file mode 100644
index 0000000..e43b9c7
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_common_lib.h
@@ -0,0 +1,268 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvout_common_lib.h
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Header file of common library for SAMSUNG TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _S5P_TVOUT_COMMON_LIB_H_
+#define _S5P_TVOUT_COMMON_LIB_H_
+
+#include <linux/stddef.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/interrupt.h>
+
+/*****************************************************************************
+ * This file includes declarations for TVOUT driver's common library.
+ * All files in TVOUT driver can access function or definition in this file.
+ ****************************************************************************/
+
+#define DRV_NAME "TVOUT"
+
+#define tvout_err(fmt, ...) \
+ printk(KERN_ERR "[%s] %s(): " fmt, \
+ DRV_NAME, __func__, ##__VA_ARGS__)
+
+#define CONFIG_TVOUT_DEBUG
+
+#ifndef tvout_dbg
+#ifdef CONFIG_TVOUT_DEBUG
+#define tvout_dbg(fmt, ...) \
+do { \
+ if (unlikely(tvout_dbg_flag & (1 << DBG_FLAG_TVOUT))) { \
+ printk(KERN_INFO "[%s] %s(): " fmt, \
+ DRV_NAME, __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define tvout_dbg(fmt, ...)
+#endif
+#endif
+
+#define S5PTV_FB_CNT 2
+#define S5PTV_VP_BUFF_CNT 4
+#define S5PTV_VP_BUFF_SIZE (4*1024*1024)
+
+#define to_tvout_plat(d) (to_platform_device(d)->dev.platform_data)
+
+#define HDMI_START_NUM 0x1000
+
+enum s5p_tvout_disp_mode {
+ TVOUT_NTSC_M = 0,
+ TVOUT_PAL_BDGHI,
+ TVOUT_PAL_M,
+ TVOUT_PAL_N,
+ TVOUT_PAL_NC,
+ TVOUT_PAL_60,
+ TVOUT_NTSC_443,
+
+ TVOUT_480P_60_16_9 = HDMI_START_NUM,
+ TVOUT_480P_60_4_3,
+ TVOUT_480P_59,
+
+ TVOUT_576P_50_16_9,
+ TVOUT_576P_50_4_3,
+
+ TVOUT_720P_60,
+ TVOUT_720P_50,
+ TVOUT_720P_59,
+
+ TVOUT_1080P_60,
+ TVOUT_1080P_50,
+ TVOUT_1080P_59,
+ TVOUT_1080P_30,
+
+ TVOUT_1080I_60,
+ TVOUT_1080I_50,
+ TVOUT_1080I_59,
+#ifdef CONFIG_HDMI_14A_3D
+ TVOUT_720P_60_SBS_HALF,
+ TVOUT_720P_59_SBS_HALF,
+ TVOUT_720P_50_TB,
+ TVOUT_1080P_24_TB,
+ TVOUT_1080P_23_TB,
+#endif
+ TVOUT_INIT_DISP_VALUE
+};
+
+#ifdef CONFIG_HDMI_14A_3D
+enum s5p_tvout_3d_type {
+ HDMI_3D_FP_FORMAT,
+ HDMI_3D_SSH_FORMAT,
+ HDMI_3D_TB_FORMAT,
+ HDMI_2D_FORMAT,
+};
+#endif
+
+enum s5p_tvout_o_mode {
+ TVOUT_COMPOSITE,
+ TVOUT_HDMI,
+ TVOUT_HDMI_RGB,
+ TVOUT_DVI,
+ TVOUT_INIT_O_VALUE
+};
+
+enum s5p_mixer_burst_mode {
+ MIXER_BURST_8 = 0,
+ MIXER_BURST_16 = 1
+};
+
+enum s5ptv_audio_channel {
+ TVOUT_AUDIO_2CH = 0,
+ TVOUT_AUDIO_5_1CH = 1,
+ TVOUT_AUDIO_2CH_VAL = 2,
+ TVOUT_AUDIO_5_1CH_VAL = 6,
+};
+
+enum s5ptvfb_data_path_t {
+ DATA_PATH_FIFO = 0,
+ DATA_PATH_DMA = 1,
+};
+
+enum s5ptvfb_alpha_t {
+ LAYER_BLENDING,
+ PIXEL_BLENDING,
+ NONE_BLENDING,
+};
+
+enum s5ptvfb_ver_scaling_t {
+ VERTICAL_X1,
+ VERTICAL_X2,
+};
+
+enum s5ptvfb_hor_scaling_t {
+ HORIZONTAL_X1,
+ HORIZONTAL_X2,
+};
+
+struct s5ptvfb_alpha {
+ enum s5ptvfb_alpha_t mode;
+ int channel;
+ unsigned int value;
+};
+
+struct s5ptvfb_chroma {
+ int enabled;
+ unsigned int key;
+};
+
+struct s5ptvfb_user_window {
+ int x;
+ int y;
+};
+
+struct s5ptvfb_user_plane_alpha {
+ int channel;
+ unsigned char alpha;
+};
+
+struct s5ptvfb_user_chroma {
+ int enabled;
+ unsigned char red;
+ unsigned char green;
+ unsigned char blue;
+};
+
+struct s5ptvfb_user_scaling {
+ enum s5ptvfb_ver_scaling_t ver;
+ enum s5ptvfb_hor_scaling_t hor;
+};
+
+struct s5p_tvout_status {
+ struct clk *i2c_phy_clk;
+ struct clk *sclk_hdmiphy;
+ struct clk *sclk_pixel;
+ struct clk *sclk_dac;
+ struct clk *sclk_hdmi;
+ spinlock_t tvout_lock;
+};
+
+struct s5p_tvout_vp_buff {
+ unsigned int phy_base;
+ unsigned int vir_base;
+ unsigned int size;
+};
+
+struct s5p_tvout_vp_bufferinfo {
+ struct s5p_tvout_vp_buff vp_buffs[S5PTV_VP_BUFF_CNT];
+ unsigned int copy_buff_idxs[S5PTV_VP_BUFF_CNT - 1];
+ unsigned int curr_copy_idx;
+ unsigned int vp_access_buff_idx;
+ unsigned int size;
+};
+
+struct s5ptv_vp_buf_info {
+ unsigned int buff_cnt;
+ struct s5p_tvout_vp_buff *buffs;
+};
+
+struct reg_mem_info {
+ char *name;
+ struct resource *res;
+ void __iomem *base;
+};
+
+struct irq_info {
+ char *name;
+ irq_handler_t handler;
+ int no;
+};
+
+struct s5p_tvout_clk_info {
+ char *name;
+ struct clk *ptr;
+};
+
+#ifdef CONFIG_TVOUT_DEBUG
+enum tvout_dbg_flag_bit_num {
+ DBG_FLAG_HDCP = 0,
+ DBG_FLAG_TVOUT,
+ DBG_FLAG_HPD,
+ DBG_FLAG_HDMI
+};
+
+extern int tvout_dbg_flag;
+#endif
+
+extern struct s5p_tvout_status s5ptv_status;
+
+extern int s5p_tvout_vcm_create_unified(void);
+
+extern int s5p_tvout_vcm_init(void);
+
+extern void s5p_tvout_vcm_activate(void);
+
+extern void s5p_tvout_vcm_deactivate(void);
+
+extern int s5p_tvout_map_resource_mem(
+ struct platform_device *pdev, char *name,
+ void __iomem **base, struct resource **res);
+extern void s5p_tvout_unmap_resource_mem(
+ void __iomem *base, struct resource *res);
+
+extern void s5p_tvout_pm_runtime_enable(struct device *dev);
+extern void s5p_tvout_pm_runtime_disable(struct device *dev);
+extern void s5p_tvout_pm_runtime_get(void);
+extern void s5p_tvout_pm_runtime_put(void);
+
+extern void s5p_hdmi_ctrl_clock(bool on);
+extern bool on_stop_process;
+extern bool on_start_process;
+extern struct s5p_tvout_vp_bufferinfo s5ptv_vp_buff;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+extern unsigned int suspend_status;
+extern int s5p_hpd_get_status(void);
+extern void s5p_tvout_mutex_lock(void);
+extern void s5p_tvout_mutex_unlock(void);
+#endif
+#ifdef CONFIG_PM
+extern void s5p_hdmi_ctrl_phy_power_resume(void);
+#endif
+
+#endif /* _S5P_TVOUT_COMMON_LIB_H_ */
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_ctrl.h b/drivers/media/video/samsung/tvout/s5p_tvout_ctrl.h
new file mode 100644
index 0000000..43043b4
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_ctrl.h
@@ -0,0 +1,132 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvout_ctrl.h
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Header file for tvout control class of Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef _S5P_TVOUT_CTRL_H_
+#define _S5P_TVOUT_CTRL_H_ __FILE__
+
+/*****************************************************************************
+ * This file includes declarations for external functions of
+ * TVOUT driver's control class. So only external functions
+ * to be used by higher layer must exist in this file.
+ *
+ * Higher layer must use only the declarations included in this file.
+ ****************************************************************************/
+
+#include "hw_if/hw_if.h"
+#include "s5p_tvout_common_lib.h"
+
+/****************************************
+ * for Mixer control class
+ ***************************************/
+extern void s5p_mixer_ctrl_init_fb_addr_phy(enum s5p_mixer_layer layer,
+ dma_addr_t fb_addr);
+extern void s5p_mixer_ctrl_init_grp_layer(enum s5p_mixer_layer layer);
+extern int s5p_mixer_ctrl_set_pixel_format(enum s5p_mixer_layer layer, u32 bpp, u32 trans_len);
+extern int s5p_mixer_ctrl_enable_layer(enum s5p_mixer_layer layer);
+extern int s5p_mixer_ctrl_disable_layer(enum s5p_mixer_layer layer);
+extern int s5p_mixer_ctrl_set_priority(enum s5p_mixer_layer layer, u32 prio);
+extern int s5p_mixer_ctrl_set_dst_win_pos(enum s5p_mixer_layer layer,
+ int dst_x, int dst_y, u32 w, u32 h);
+extern int s5p_mixer_ctrl_set_src_win_pos(enum s5p_mixer_layer layer,
+ u32 src_x, u32 src_y, u32 w, u32 h);
+extern int s5p_mixer_ctrl_set_buffer_address(enum s5p_mixer_layer layer,
+ dma_addr_t start_addr);
+extern int s5p_mixer_ctrl_set_chroma_key(enum s5p_mixer_layer layer,
+ struct s5ptvfb_chroma chroma);
+extern int s5p_mixer_ctrl_set_alpha(enum s5p_mixer_layer layer, u32 alpha);
+extern int s5p_mixer_ctrl_set_blend_mode(enum s5p_mixer_layer layer,
+ enum s5ptvfb_alpha_t mode);
+extern int s5p_mixer_ctrl_set_alpha_blending(enum s5p_mixer_layer layer,
+ enum s5ptvfb_alpha_t blend_mode, unsigned int alpha);
+extern int s5p_mixer_ctrl_scaling(enum s5p_mixer_layer,
+ struct s5ptvfb_user_scaling scaling);
+extern int s5p_mixer_ctrl_mux_clk(struct clk *ptr);
+extern void s5p_mixer_ctrl_set_int_enable(bool en);
+extern void s5p_mixer_ctrl_set_vsync_interrupt(bool en);
+extern bool s5p_mixer_ctrl_get_vsync_interrupt(void);
+extern void s5p_mixer_ctrl_clear_pend_all(void);
+extern void s5p_mixer_ctrl_stop(void);
+extern void s5p_mixer_ctrl_internal_start(void);
+extern int s5p_mixer_ctrl_start(enum s5p_tvout_disp_mode disp,
+ enum s5p_tvout_o_mode out);
+extern int s5p_mixer_ctrl_constructor(struct platform_device *pdev);
+extern void s5p_mixer_ctrl_destructor(void);
+extern void s5p_mixer_ctrl_suspend(void);
+extern void s5p_mixer_ctrl_resume(void);
+
+/* Interrupt for Vsync */
+typedef struct {
+ wait_queue_head_t wq;
+ unsigned int wq_count;
+} s5p_tv_irq;
+
+extern wait_queue_head_t s5ptv_wq;
+
+/****************************************
+ * for TV interface control class
+ ***************************************/
+extern int s5p_tvif_ctrl_set_audio(bool en);
+extern void s5p_tvif_audio_channel(int channel);
+extern void s5p_tvif_q_color_range(int range);
+extern int s5p_tvif_get_q_range(void);
+extern int s5p_tvif_ctrl_set_av_mute(bool en);
+extern int s5p_tvif_ctrl_get_std_if(
+ enum s5p_tvout_disp_mode *std, enum s5p_tvout_o_mode *inf);
+extern bool s5p_tvif_ctrl_get_run_state(void);
+extern int s5p_tvif_ctrl_start(
+ enum s5p_tvout_disp_mode std, enum s5p_tvout_o_mode inf);
+extern void s5p_tvif_ctrl_stop(void);
+
+extern int s5p_tvif_ctrl_constructor(struct platform_device *pdev);
+extern void s5p_tvif_ctrl_destructor(void);
+extern void s5p_tvif_ctrl_suspend(void);
+extern void s5p_tvif_ctrl_resume(void);
+
+extern u8 s5p_hdmi_ctrl_get_mute(void);
+extern void s5p_hdmi_ctrl_set_hdcp(bool en);
+
+extern int s5p_hpd_set_hdmiint(void);
+extern int s5p_hpd_set_eint(void);
+
+#ifdef CONFIG_HDMI_EARJACK_MUTE
+extern void s5p_hdmi_ctrl_set_audio(bool en);
+#endif
+
+/****************************************
+ * for VP control class
+ ***************************************/
+enum s5p_vp_src_color {
+ VP_SRC_COLOR_NV12,
+ VP_SRC_COLOR_NV12IW,
+ VP_SRC_COLOR_TILE_NV12,
+ VP_SRC_COLOR_TILE_NV12IW,
+ VP_SRC_COLOR_NV21,
+ VP_SRC_COLOR_NV21IW,
+ VP_SRC_COLOR_TILE_NV21,
+ VP_SRC_COLOR_TILE_NV21IW
+};
+
+extern void s5p_vp_ctrl_set_src_plane(
+ u32 base_y, u32 base_c, u32 width, u32 height,
+ enum s5p_vp_src_color color, enum s5p_vp_field field);
+extern void s5p_vp_ctrl_set_src_win(u32 left, u32 top, u32 width, u32 height);
+extern void s5p_vp_ctrl_set_dest_win(u32 left, u32 top, u32 width, u32 height);
+extern void s5p_vp_ctrl_set_dest_win_alpha_val(u32 alpha);
+extern void s5p_vp_ctrl_set_dest_win_blend(bool enable);
+extern void s5p_vp_ctrl_set_dest_win_priority(u32 prio);
+extern int s5p_vp_ctrl_start(void);
+extern void s5p_vp_ctrl_stop(void);
+extern int s5p_vp_ctrl_constructor(struct platform_device *pdev);
+extern void s5p_vp_ctrl_destructor(void);
+extern void s5p_vp_ctrl_suspend(void);
+void s5p_vp_ctrl_resume(void);
+
+#endif /* _S5P_TVOUT_CTRL_H_ */
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_fb.c b/drivers/media/video/samsung/tvout/s5p_tvout_fb.c
new file mode 100644
index 0000000..5a2ce5a
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_fb.c
@@ -0,0 +1,754 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvout_fb.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Frame buffer ftn. file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/fb.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+#include "s5p_tvout_common_lib.h"
+#include "s5p_tvout_ctrl.h"
+#include "s5p_tvout_v4l2.h"
+
+#ifdef CONFIG_UMP_VCM_ALLOC
+#include "ump_kernel_interface.h"
+#endif
+
+#define S5PTVFB_NAME "s5ptvfb"
+
+#define S5PTV_FB_LAYER0_MINOR 10
+#define S5PTV_FB_LAYER1_MINOR 11
+
+#define FB_INDEX(id) (id - S5PTV_FB_LAYER0_MINOR)
+
+#define S5PTVFB_CHROMA(r, g, b) \
+ (((r & 0xff) << 16) | ((g & 0xff) << 8) | ((b & 0xff) << 0))
+
+#define S5PTVFB_WIN_POSITION \
+ _IOW('F', 213, struct s5ptvfb_user_window)
+#define S5PTVFB_WIN_SET_PLANE_ALPHA \
+ _IOW('F', 214, struct s5ptvfb_user_plane_alpha)
+#define S5PTVFB_WIN_SET_CHROMA \
+ _IOW('F', 215, struct s5ptvfb_user_chroma)
+#define S5PTVFB_WAITFORVSYNC \
+ _IO('F', 32)
+#define S5PTVFB_SET_VSYNC_INT \
+ _IOW('F', 216, u32)
+#define S5PTVFB_WIN_SET_ADDR \
+ _IOW('F', 219, u32)
+#define S5PTVFB_SCALING \
+ _IOW('F', 222, struct s5ptvfb_user_scaling)
+
+struct s5ptvfb_window {
+ int id;
+ struct device *dev_fb;
+ int enabled;
+ atomic_t in_use;
+ int x;
+ int y;
+ enum s5ptvfb_data_path_t path;
+ int local_channel;
+ int dma_burst;
+ unsigned int pseudo_pal[16];
+ struct s5ptvfb_alpha alpha;
+ struct s5ptvfb_chroma chroma;
+ int (*suspend_fifo)(void);
+ int (*resume_fifo)(void);
+};
+
+struct s5ptvfb_lcd_timing {
+ int h_fp;
+ int h_bp;
+ int h_sw;
+ int v_fp;
+ int v_fpe;
+ int v_bp;
+ int v_bpe;
+ int v_sw;
+};
+
+struct s5ptvfb_lcd_polarity {
+ int rise_vclk;
+ int inv_hsync;
+ int inv_vsync;
+ int inv_vden;
+};
+
+struct s5ptvfb_lcd {
+ int width;
+ int height;
+ int bpp;
+ int freq;
+ struct s5ptvfb_lcd_timing timing;
+ struct s5ptvfb_lcd_polarity polarity;
+
+ void (*init_ldi)(void);
+};
+
+static struct mutex fb_lock;
+
+static struct fb_info *fb[S5PTV_FB_CNT];
+static struct s5ptvfb_lcd lcd = {
+ .width = 1920,
+ .height = 1080,
+ .bpp = 32,
+ .freq = 60,
+
+ .timing = {
+ .h_fp = 49,
+ .h_bp = 17,
+ .h_sw = 33,
+ .v_fp = 4,
+ .v_fpe = 1,
+ .v_bp = 15,
+ .v_bpe = 1,
+ .v_sw = 6,
+ },
+
+ .polarity = {
+ .rise_vclk = 0,
+ .inv_hsync = 1,
+ .inv_vsync = 1,
+ .inv_vden = 0,
+ },
+};
+
+static int s5p_tvout_fb_wait_for_vsync(void)
+{
+ sleep_on_timeout(&s5ptv_wq, HZ / 10);
+
+ return 0;
+}
+
+static inline unsigned int s5p_tvout_fb_chan_to_field(unsigned int chan,
+ struct fb_bitfield bf)
+{
+ chan &= 0xffff;
+ chan >>= 16 - bf.length;
+
+ return chan << bf.offset;
+}
+
+static int s5p_tvout_fb_set_alpha_info(struct fb_var_screeninfo *var,
+ struct s5ptvfb_window *win)
+{
+ if (var->transp.length > 0)
+ win->alpha.mode = PIXEL_BLENDING;
+ else
+ win->alpha.mode = NONE_BLENDING;
+
+ return 0;
+}
+#if 0 /* This function will be used in the future */
+static int s5p_tvout_fb_map_video_memory(int id)
+{
+ enum s5p_mixer_layer layer;
+ struct s5ptvfb_window *win = fb[FB_INDEX(id)]->par;
+ struct fb_fix_screeninfo *fix = &fb[FB_INDEX(id)]->fix;
+
+ if (win->path == DATA_PATH_FIFO)
+ return 0;
+
+ fb[FB_INDEX(id)]->screen_base = dma_alloc_writecombine(win->dev_fb,
+ PAGE_ALIGN(fix->smem_len),
+ (unsigned int *) &fix->smem_start, GFP_KERNEL);
+
+ switch (id) {
+ case S5PTV_FB_LAYER0_MINOR:
+ layer = MIXER_GPR0_LAYER;
+ break;
+ case S5PTV_FB_LAYER1_MINOR:
+ layer = MIXER_GPR1_LAYER;
+ break;
+ default:
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+ s5p_mixer_ctrl_init_fb_addr_phy(layer, fix->smem_start);
+
+ if (!fb[FB_INDEX(id)]->screen_base)
+ return -1;
+ else
+ tvout_dbg("[fb%d] dma: 0x%08x, cpu: 0x%08x,size: 0x%08x\n",
+ win->id, (unsigned int) fix->smem_start,
+ (unsigned int) fb[FB_INDEX(id)]->screen_base,
+ fix->smem_len);
+
+ memset(fb[FB_INDEX(id)]->screen_base, 0, fix->smem_len);
+
+ return 0;
+}
+#endif
+static int s5p_tvout_fb_set_bitfield(struct fb_var_screeninfo *var)
+{
+ switch (var->bits_per_pixel) {
+ case 16:
+ if (var->transp.length == 1) {
+ var->red.offset = 10;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 5;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 15;
+ } else if (var->transp.length == 4) {
+ var->red.offset = 8;
+ var->red.length = 4;
+ var->green.offset = 4;
+ var->green.length = 4;
+ var->blue.offset = 0;
+ var->blue.length = 4;
+ var->transp.offset = 12;
+ } else {
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ }
+ break;
+
+ case 24:
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+
+ case 32:
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ break;
+ }
+
+ return 0;
+}
+
+static int s5p_tvout_fb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int transp, struct fb_info *fb)
+{
+ unsigned int *pal = (unsigned int *) fb->pseudo_palette;
+ unsigned int val = 0;
+
+ if (regno < 16) {
+ /* fake palette of 16 colors */
+ val |= s5p_tvout_fb_chan_to_field(red, fb->var.red);
+ val |= s5p_tvout_fb_chan_to_field(green, fb->var.green);
+ val |= s5p_tvout_fb_chan_to_field(blue, fb->var.blue);
+ val |= s5p_tvout_fb_chan_to_field(transp, fb->var.transp);
+
+ pal[regno] = val;
+ }
+
+ return 0;
+}
+
+static int s5p_tvout_fb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fb)
+{
+ dma_addr_t start_addr;
+ enum s5p_mixer_layer layer;
+ struct fb_fix_screeninfo *fix = &fb->fix;
+
+ if (var->yoffset + var->yres > var->yres_virtual) {
+ tvout_err("invalid y offset value\n");
+ return -1;
+ }
+
+ fb->var.yoffset = var->yoffset;
+
+ switch (fb->node) {
+ case S5PTV_FB_LAYER0_MINOR:
+ layer = MIXER_GPR0_LAYER;
+ break;
+ case S5PTV_FB_LAYER1_MINOR:
+ layer = MIXER_GPR1_LAYER;
+ break;
+ default:
+ tvout_err("invalid layer\n");
+ return -1;
+ }
+
+ start_addr = fix->smem_start + (var->xres_virtual *
+ (var->bits_per_pixel / 8) * var->yoffset);
+
+ s5p_mixer_ctrl_set_buffer_address(layer, start_addr);
+
+ return 0;
+}
+
+static int s5p_tvout_fb_blank(int blank_mode, struct fb_info *fb)
+{
+ enum s5p_mixer_layer layer = MIXER_GPR0_LAYER;
+
+ tvout_dbg("change blank mode\n");
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ switch (fb->node) {
+ case S5PTV_FB_LAYER0_MINOR:
+ layer = MIXER_GPR0_LAYER;
+ break;
+ case S5PTV_FB_LAYER1_MINOR:
+ layer = MIXER_GPR1_LAYER;
+ break;
+ default:
+ tvout_err("not supported layer\n");
+ goto err_fb_blank;
+ }
+
+ switch (blank_mode) {
+ case FB_BLANK_UNBLANK:
+ if (fb->fix.smem_start)
+ s5p_mixer_ctrl_enable_layer(layer);
+ else
+ tvout_dbg("[fb%d] no alloc memory for unblank\n",
+ fb->node);
+ break;
+
+ case FB_BLANK_POWERDOWN:
+ s5p_mixer_ctrl_disable_layer(layer);
+ break;
+
+ default:
+ tvout_err("not supported blank mode\n");
+ goto err_fb_blank;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return 1;
+err_fb_blank:
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return -1;
+}
+
+static int s5p_tvout_fb_set_par(struct fb_info *fb)
+{
+ u32 bpp, trans_len;
+ u32 src_x, src_y, w, h;
+ struct s5ptvfb_window *win = fb->par;
+ enum s5p_mixer_layer layer = MIXER_GPR0_LAYER;
+
+ tvout_dbg("[fb%d] set_par\n", win->id);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ if (!fb->fix.smem_start) {
+#ifndef CONFIG_USER_ALLOC_TVOUT
+ printk(KERN_INFO " The frame buffer is allocated here\n");
+ /* s5p_tvout_fb_map_video_memory(win->id);*/
+#else
+ printk(KERN_ERR
+ "[Warning] The frame buffer should be allocated by ioctl\n");
+#endif
+ }
+
+ bpp = fb->var.bits_per_pixel;
+ trans_len = fb->var.transp.length;
+ w = fb->var.xres;
+ h = fb->var.yres;
+ src_x = fb->var.xoffset;
+ src_y = fb->var.yoffset;
+
+ switch (fb->node) {
+ case S5PTV_FB_LAYER0_MINOR:
+ layer = MIXER_GPR0_LAYER;
+ break;
+ case S5PTV_FB_LAYER1_MINOR:
+ layer = MIXER_GPR1_LAYER;
+ break;
+ }
+
+ s5p_mixer_ctrl_init_grp_layer(layer);
+ s5p_mixer_ctrl_set_pixel_format(layer, bpp, trans_len);
+ s5p_mixer_ctrl_set_src_win_pos(layer, src_x, src_y, w, h);
+ s5p_mixer_ctrl_set_alpha_blending(layer, win->alpha.mode,
+ win->alpha.value);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return 0;
+}
+
+static int s5p_tvout_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *fb)
+{
+ struct fb_fix_screeninfo *fix = &fb->fix;
+ struct s5ptvfb_window *win = fb->par;
+
+ tvout_dbg("[fb%d] check_var\n", win->id);
+
+ if (var->bits_per_pixel != 16 && var->bits_per_pixel != 24 &&
+ var->bits_per_pixel != 32) {
+ tvout_err("invalid bits per pixel\n");
+ return -1;
+ }
+
+ if (var->xres > lcd.width)
+ var->xres = lcd.width;
+
+ if (var->yres > lcd.height)
+ var->yres = lcd.height;
+
+ if (var->xres_virtual != var->xres)
+ var->xres_virtual = var->xres;
+
+ if (var->yres_virtual > var->yres * (fb->fix.ypanstep + 1))
+ var->yres_virtual = var->yres * (fb->fix.ypanstep + 1);
+
+ if (var->xoffset != 0)
+ var->xoffset = 0;
+
+ if (var->yoffset + var->yres > var->yres_virtual)
+ var->yoffset = var->yres_virtual - var->yres;
+
+ if (win->x + var->xres > lcd.width)
+ win->x = lcd.width - var->xres;
+
+ if (win->y + var->yres > lcd.height)
+ win->y = lcd.height - var->yres;
+
+ /* modify the fix info */
+ fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
+ fix->smem_len = fix->line_length * var->yres_virtual;
+
+ s5p_tvout_fb_set_bitfield(var);
+ s5p_tvout_fb_set_alpha_info(var, win);
+
+ return 0;
+}
+
+static int s5p_tvout_fb_release(struct fb_info *fb, int user)
+{
+ struct s5ptvfb_window *win = fb->par;
+
+ atomic_dec(&win->in_use);
+
+ return 0;
+}
+
+static int s5p_tvout_fb_ioctl(struct fb_info *fb, unsigned int cmd,
+ unsigned long arg)
+{
+ dma_addr_t start_addr;
+ enum s5p_mixer_layer layer;
+ struct fb_var_screeninfo *var = &fb->var;
+ struct s5ptvfb_window *win = fb->par;
+ int ret = 0;
+ void *argp = (void *) arg;
+
+ union {
+ struct s5ptvfb_user_window user_window;
+ struct s5ptvfb_user_plane_alpha user_alpha;
+ struct s5ptvfb_user_chroma user_chroma;
+ struct s5ptvfb_user_scaling user_scaling;
+ int vsync;
+ } p;
+
+ tvout_dbg("\n");
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ switch (fb->node) {
+ case S5PTV_FB_LAYER0_MINOR:
+ layer = MIXER_GPR0_LAYER;
+ break;
+ case S5PTV_FB_LAYER1_MINOR:
+ layer = MIXER_GPR1_LAYER;
+ break;
+ default:
+ printk(KERN_ERR "[Error] invalid layer\n");
+ goto err_fb_ioctl;
+ }
+
+ switch (cmd) {
+ case S5PTVFB_WIN_POSITION:
+ if (copy_from_user(&p.user_window,
+ (struct s5ptvfb_user_window __user *) arg,
+ sizeof(p.user_window)))
+ ret = -EFAULT;
+ else {
+ s5p_mixer_ctrl_set_dst_win_pos(layer, p.user_window.x,
+ p.user_window.y, var->xres, var->yres);
+ }
+ break;
+
+ case S5PTVFB_WIN_SET_PLANE_ALPHA:
+ if (copy_from_user(&p.user_alpha,
+ (struct s5ptvfb_user_plane_alpha __user *) arg,
+ sizeof(p.user_alpha)))
+ ret = -EFAULT;
+ else {
+ win->alpha.mode = LAYER_BLENDING;
+ win->alpha.value = p.user_alpha.alpha;
+ s5p_mixer_ctrl_set_alpha_blending(layer,
+ win->alpha.mode, win->alpha.value);
+ }
+ break;
+ case S5PTVFB_WIN_SET_CHROMA:
+ if (copy_from_user(&p.user_chroma,
+ (struct s5ptvfb_user_chroma __user *) arg,
+ sizeof(p.user_chroma)))
+ ret = -EFAULT;
+ else {
+ win->chroma.enabled = p.user_chroma.enabled;
+ win->chroma.key = S5PTVFB_CHROMA(p.user_chroma.red,
+ p.user_chroma.green,
+ p.user_chroma.blue);
+
+ s5p_mixer_ctrl_set_chroma_key(layer, win->chroma);
+ }
+ break;
+ case S5PTVFB_SET_VSYNC_INT:
+ s5p_mixer_ctrl_set_vsync_interrupt((int)argp);
+ break;
+ case S5PTVFB_WAITFORVSYNC:
+ s5p_tvout_fb_wait_for_vsync();
+ break;
+ case S5PTVFB_WIN_SET_ADDR:
+#if defined(CONFIG_S5P_SYSMMU_TV) && defined(CONFIG_UMP_VCM_ALLOC)
+ fb->fix.smem_start = ump_dd_dev_virtual_get_from_secure_id(
+ (unsigned int)argp);
+#else
+ fb->fix.smem_start = (unsigned long)argp;
+#endif
+ start_addr = fb->fix.smem_start + (var->xres_virtual *
+ (var->bits_per_pixel / 8) * var->yoffset);
+
+ s5p_mixer_ctrl_set_buffer_address(layer, start_addr);
+ break;
+ case S5PTVFB_SCALING:
+ if (copy_from_user(&p.user_scaling,
+ (struct s5ptvfb_user_scaling __user *) arg,
+ sizeof(p.user_scaling)))
+ ret = -EFAULT;
+ else
+ s5p_mixer_ctrl_scaling(layer, p.user_scaling);
+ break;
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+
+ return 0;
+err_fb_ioctl:
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return -1;
+
+}
+
+static int s5p_tvout_fb_open(struct fb_info *fb, int user)
+{
+ struct s5ptvfb_window *win = fb->par;
+ int ret = 0;
+
+ tvout_dbg("\n");
+ mutex_lock(&fb_lock);
+
+ if (atomic_read(&win->in_use)) {
+ tvout_dbg("do not allow multiple open for tvout framebuffer\n");
+ ret = -EBUSY;
+ } else
+ atomic_inc(&win->in_use);
+
+ mutex_unlock(&fb_lock);
+
+ return ret;
+}
+
+struct fb_ops s5ptvfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_check_var = s5p_tvout_fb_check_var,
+ .fb_set_par = s5p_tvout_fb_set_par,
+ .fb_blank = s5p_tvout_fb_blank,
+ .fb_pan_display = s5p_tvout_fb_pan_display,
+ .fb_setcolreg = s5p_tvout_fb_setcolreg,
+ .fb_ioctl = s5p_tvout_fb_ioctl,
+ .fb_open = s5p_tvout_fb_open,
+ .fb_release = s5p_tvout_fb_release,
+};
+
+static int s5p_tvout_fb_init_fbinfo(int id, struct device *dev_fb)
+{
+ struct fb_fix_screeninfo *fix = &fb[FB_INDEX(id)]->fix;
+ struct fb_var_screeninfo *var = &fb[FB_INDEX(id)]->var;
+ struct s5ptvfb_window *win = fb[FB_INDEX(id)]->par;
+ struct s5ptvfb_alpha *alpha = &win->alpha;
+
+ memset(win, 0, sizeof(struct s5ptvfb_window));
+
+ platform_set_drvdata(to_platform_device(dev_fb), fb[FB_INDEX(id)]);
+
+ strcpy(fix->id, S5PTVFB_NAME);
+
+ /* fimd specific */
+ win->id = id;
+ win->path = DATA_PATH_DMA;
+ win->dma_burst = 16;
+ win->dev_fb = dev_fb;
+ alpha->mode = LAYER_BLENDING;
+ alpha->value = 0xff;
+
+ /* fbinfo */
+ fb[FB_INDEX(id)]->fbops = &s5ptvfb_ops;
+ fb[FB_INDEX(id)]->flags = FBINFO_FLAG_DEFAULT;
+ fb[FB_INDEX(id)]->pseudo_palette = &win->pseudo_pal;
+ fix->xpanstep = 0;
+ fix->ypanstep = 0;
+ fix->type = FB_TYPE_PACKED_PIXELS;
+ fix->accel = FB_ACCEL_NONE;
+ fix->visual = FB_VISUAL_TRUECOLOR;
+ var->xres = lcd.width;
+ var->yres = lcd.height;
+ var->xres_virtual = var->xres;
+ var->yres_virtual = var->yres + (var->yres * fix->ypanstep);
+ var->bits_per_pixel = 32;
+ var->xoffset = 0;
+ var->yoffset = 0;
+ var->width = 0;
+ var->height = 0;
+ var->transp.length = 0;
+
+ fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
+ fix->smem_len = fix->line_length * var->yres_virtual;
+
+ var->nonstd = 0;
+ var->activate = FB_ACTIVATE_NOW;
+ var->vmode = FB_VMODE_NONINTERLACED;
+ var->hsync_len = lcd.timing.h_sw;
+ var->vsync_len = lcd.timing.v_sw;
+ var->left_margin = lcd.timing.h_fp;
+ var->right_margin = lcd.timing.h_bp;
+ var->upper_margin = lcd.timing.v_fp;
+ var->lower_margin = lcd.timing.v_bp;
+
+ var->pixclock = lcd.freq * (var->left_margin + var->right_margin +
+ var->hsync_len + var->xres) *
+ (var->upper_margin + var->lower_margin +
+ var->vsync_len + var->yres);
+
+ tvout_dbg("pixclock: %d\n", var->pixclock);
+
+ s5p_tvout_fb_set_bitfield(var);
+ s5p_tvout_fb_set_alpha_info(var, win);
+
+ mutex_init(&fb_lock);
+
+ return 0;
+}
+
+int s5p_tvout_fb_alloc_framebuffer(struct device *dev_fb)
+{
+ int ret, i;
+
+ /* alloc for each framebuffer */
+ for (i = 0; i < S5PTV_FB_CNT; i++) {
+ fb[i] = framebuffer_alloc(sizeof(struct s5ptvfb_window),
+ dev_fb);
+ if (!fb[i]) {
+ tvout_err("not enough memory\n");
+ ret = -1;
+ goto err_alloc_fb;
+ }
+
+ ret = s5p_tvout_fb_init_fbinfo(i + S5PTV_FB_LAYER0_MINOR,
+ dev_fb);
+ if (ret) {
+ tvout_err("fail to allocate memory for tv fb\n");
+ ret = -1;
+ goto err_alloc_fb;
+ }
+
+#ifndef CONFIG_USER_ALLOC_TVOUT
+#if 0
+ if (s5p_tvout_fb_map_video_memory(i + S5PTV_FB_LAYER0_MINOR)) {
+ tvout_err("fail to map video mem for default window\n");
+ ret = -1;
+ goto err_alloc_fb;
+ }
+#endif
+#endif
+ }
+
+ return 0;
+
+err_alloc_fb:
+ for (i = 0; i < S5PTV_FB_CNT; i++) {
+ if (fb[i])
+ framebuffer_release(fb[i]);
+ }
+
+ return ret;
+}
+
+int s5p_tvout_fb_register_framebuffer(struct device *dev_fb)
+{
+ int ret, j, i = 0;
+
+ do {
+ ret = register_framebuffer(fb[0]);
+ if (ret) {
+ tvout_err("fail to register framebuffer device\n");
+ return -1;
+ }
+ } while (fb[0]->node < S5PTV_FB_LAYER0_MINOR);
+
+ for (i = 1; i < S5PTV_FB_CNT; i++) {
+ ret = register_framebuffer(fb[i]);
+ if (ret) {
+ tvout_err("fail to register framebuffer device\n");
+ ret = -1;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < S5PTV_FB_CNT; i++)
+ tvout_dbg("fb[%d] = %d\n", i, fb[i]->node);
+
+ for (i = 0; i < S5PTV_FB_CNT; i++) {
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+#ifndef CONFIG_USER_ALLOC_TVOUT
+ s5p_tvout_fb_check_var(&fb[i]->var, fb[i]);
+ s5p_tvout_fb_set_par(fb[i]);
+#endif
+#endif
+ }
+
+ return 0;
+
+err:
+ for (j = 0; j < i; j++)
+ unregister_framebuffer(fb[j]);
+ return ret;
+}
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_fb.h b/drivers/media/video/samsung/tvout/s5p_tvout_fb.h
new file mode 100644
index 0000000..9f79bda
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_fb.h
@@ -0,0 +1,21 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvout_fb.h
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * frame buffer header file. file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _LINUX_S5P_TVOUT_FB_H_
+#define _LINUX_S5P_TVOUT_FB_H_
+
+#include <linux/fb.h>
+
+extern int s5p_tvout_fb_alloc_framebuffer(struct device *dev_fb);
+extern int s5p_tvout_fb_register_framebuffer(struct device *dev_fb);
+
+#endif /* _LINUX_S5P_TVOUT_FB_H_ */
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_hpd.c b/drivers/media/video/samsung/tvout/s5p_tvout_hpd.c
new file mode 100644
index 0000000..4f35a91
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_hpd.c
@@ -0,0 +1,672 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_hdmi_hpd.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * HPD interface function file for Samsung TVOut driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/poll.h>
+
+#include <plat/tvout.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#ifdef CONFIG_HDMI_SWITCH_HPD
+#include <linux/switch.h>
+#endif
+
+#include "s5p_tvout_common_lib.h"
+#include "hw_if/hw_if.h"
+
+#ifdef CONFIG_TVOUT_DEBUG
+#define HPDIFPRINTK(fmt, args...) \
+do { \
+ if (unlikely(tvout_dbg_flag & (1 << DBG_FLAG_HPD))) { \
+ printk(KERN_INFO "[HPD_IF] %s: " fmt, \
+ __func__ , ## args); \
+ } \
+} while (0)
+#else
+#define HPDIFPRINTK(fmt, args...)
+#endif
+
+#define HPDPRINTK(fmt, args...) \
+ printk(KERN_INFO "[HPD_IF] %s: " fmt, __func__ , ## args)
+
+#define VERSION "1.2" /* Driver version number */
+#define HPD_MINOR 243 /* Major 10, Minor 243, /dev/hpd */
+
+#define HPD_LO 0
+#define HPD_HI 1
+
+#define HDMI_ON 1
+#define HDMI_OFF 0
+
+#define RETRY_COUNT 50
+
+struct hpd_struct {
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ atomic_t state;
+ void (*int_src_hdmi_hpd) (void);
+ void (*int_src_ext_hpd) (void);
+ int (*read_gpio) (void);
+ int irq_n;
+#ifdef CONFIG_HDMI_SWITCH_HPD
+ struct switch_dev hpd_switch;
+#endif
+#ifdef CONFIG_HDMI_CONTROLLED_BY_EXT_IC
+ void (*ext_ic_control) (bool ic_on);
+#endif
+};
+
+static struct hpd_struct hpd_struct;
+
+static int last_hpd_state;
+static int last_uevent_state;
+atomic_t hdmi_status;
+atomic_t poll_state;
+
+static int s5p_hpd_open(struct inode *inode, struct file *file);
+static int s5p_hpd_release(struct inode *inode, struct file *file);
+static ssize_t s5p_hpd_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos);
+static unsigned int s5p_hpd_poll(struct file *file, poll_table *wait);
+static long s5p_hpd_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+static const struct file_operations hpd_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_hpd_open,
+ .release = s5p_hpd_release,
+ .read = s5p_hpd_read,
+ .poll = s5p_hpd_poll,
+ .unlocked_ioctl = s5p_hpd_ioctl,
+};
+
+static struct miscdevice hpd_misc_device = {
+ HPD_MINOR,
+ "HPD",
+ &hpd_fops,
+};
+
+#ifdef CONFIG_LSI_HDMI_AUDIO_CH_EVENT
+ static struct switch_dev g_audio_ch_switch;
+#endif
+
+static void s5p_hpd_kobject_uevent(void)
+{
+ char env_buf[120];
+#ifndef CONFIG_HDMI_SWITCH_HPD
+ char *envp[2];
+ int env_offset = 0;
+#endif
+ int i = 0;
+ int hpd_state = atomic_read(&hpd_struct.state);
+
+ HPDIFPRINTK("++\n");
+ memset(env_buf, 0, sizeof(env_buf));
+
+ if (hpd_state) {
+ while (on_stop_process && (i < RETRY_COUNT)) {
+ HPDIFPRINTK("waiting on_stop_process\n");
+ usleep_range(5000, 5000);
+ i++;
+ };
+ } else {
+ while (on_start_process && (i < RETRY_COUNT)) {
+ HPDIFPRINTK("waiting on_start_process\n");
+ usleep_range(5000, 5000);
+ i++;
+ };
+ }
+
+ if (i == RETRY_COUNT) {
+ on_stop_process = false;
+ on_start_process = false;
+ printk(KERN_ERR "[ERROR] %s() %s fail !!\n", __func__,
+ hpd_state ? "on_stop_process" : "on_start_process");
+ }
+
+ hpd_state = atomic_read(&hpd_struct.state);
+ if (hpd_state) {
+ if (last_uevent_state == -1 || last_uevent_state == HPD_LO) {
+#ifdef CONFIG_HDMI_CONTROLLED_BY_EXT_IC
+ hpd_struct.ext_ic_control(true);
+ msleep(20);
+#endif
+#ifdef CONFIG_HDMI_SWITCH_HPD
+ hpd_struct.hpd_switch.state = 0;
+ switch_set_state(&hpd_struct.hpd_switch, 1);
+#else
+ sprintf(env_buf, "HDMI_STATE=online");
+ envp[env_offset++] = env_buf;
+ envp[env_offset] = NULL;
+ HPDIFPRINTK("online event\n");
+ kobject_uevent_env(&(hpd_misc_device.this_device->kobj),
+ KOBJ_CHANGE, envp);
+#endif
+ HPDPRINTK("[HDMI] HPD event -connect!!!\n");
+ on_start_process = true;
+ HPDIFPRINTK("%s() on_start_process(%d)\n",
+ __func__, on_start_process);
+ }
+ last_uevent_state = HPD_HI;
+ } else {
+ if (last_uevent_state == -1 || last_uevent_state == HPD_HI) {
+#ifdef CONFIG_LSI_HDMI_AUDIO_CH_EVENT
+ switch_set_state(&g_audio_ch_switch, (int)-1);
+#endif
+#ifdef CONFIG_HDMI_SWITCH_HPD
+ hpd_struct.hpd_switch.state = 1;
+ switch_set_state(&hpd_struct.hpd_switch, 0);
+#else
+ sprintf(env_buf, "HDMI_STATE=offline");
+ envp[env_offset++] = env_buf;
+ envp[env_offset] = NULL;
+ HPDIFPRINTK("offline event\n");
+ kobject_uevent_env(&(hpd_misc_device.this_device->kobj),
+ KOBJ_CHANGE, envp);
+#endif
+ HPDPRINTK("[HDMI] HPD event -disconnet!!!\n");
+ on_stop_process = true;
+#ifdef CONFIG_HDMI_CONTROLLED_BY_EXT_IC
+ hpd_struct.ext_ic_control(false);
+#endif
+ }
+ last_uevent_state = HPD_LO;
+ }
+}
+
+static DECLARE_WORK(hpd_work, (void *)s5p_hpd_kobject_uevent);
+
+static int s5p_hpd_open(struct inode *inode, struct file *file)
+{
+ atomic_set(&poll_state, 1);
+
+ return 0;
+}
+
+static int s5p_hpd_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static ssize_t s5p_hpd_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ ssize_t retval;
+ unsigned long spin_flags;
+
+ spin_lock_irqsave(&hpd_struct.lock, spin_flags);
+
+ retval = put_user(atomic_read(&hpd_struct.state),
+ (unsigned int __user *)buffer);
+
+ atomic_set(&poll_state, -1);
+ spin_unlock_irqrestore(&hpd_struct.lock, spin_flags);
+
+ return retval;
+}
+
+static unsigned int s5p_hpd_poll(struct file *file, poll_table * wait)
+{
+ poll_wait(file, &hpd_struct.waitq, wait);
+
+ if (atomic_read(&poll_state) != -1)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+#define HPD_GET_STATE _IOR('H', 100, unsigned int)
+#define AUDIO_CH_SET_STATE _IOR('H', 101, unsigned int)
+
+#ifdef CONFIG_LSI_HDMI_AUDIO_CH_EVENT
+void hdmi_send_audio_ch_num(
+ int supported_ch_num, struct switch_dev *p_audio_ch_switch)
+{
+ printk(KERN_INFO "%s() hdmi_send_audio_ch_num :: "
+ "HDMI Audio supported ch = %d",
+ __func__, supported_ch_num);
+ p_audio_ch_switch->state = 0;
+ switch_set_state(p_audio_ch_switch, (int)supported_ch_num);
+}
+#endif
+
+static long s5p_hpd_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case HPD_GET_STATE:
+ {
+ unsigned int *status = (unsigned int *)arg;
+ *status = atomic_read(&hpd_struct.state);
+
+ if (last_uevent_state == -1)
+ last_uevent_state = *status;
+
+ if (last_uevent_state != *status) {
+ on_start_process = false;
+ on_stop_process = false;
+ HPDIFPRINTK("%s() on_start_process, "
+ "on_stop_process = false" , __func__);
+ }
+
+ HPDIFPRINTK("HPD status is %s\n",
+ (*status) ? "plugged" : "unplugged");
+ return 0;
+ }
+#ifdef CONFIG_LSI_HDMI_AUDIO_CH_EVENT
+ case AUDIO_CH_SET_STATE:
+ {
+ int supported_ch_num;
+ if (copy_from_user(&supported_ch_num,
+ (void __user *)arg, sizeof(supported_ch_num))) {
+ printk(KERN_ERR "%s() -copy_from_user error\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ printk(KERN_INFO "%s() - AUDIO_CH_SET_STATE = 0x%x\n",
+ __func__, supported_ch_num);
+ hdmi_send_audio_ch_num(supported_ch_num,
+ &g_audio_ch_switch);
+ return 0;
+ }
+#endif
+ default:
+ printk(KERN_ERR "(%d) unknown ioctl, HPD_GET_STATE(%d)\n",
+ (unsigned int)cmd, (unsigned int)HPD_GET_STATE);
+ return -EFAULT;
+ }
+
+}
+
+int s5p_hpd_set_hdmiint(void)
+{
+ /* EINT -> HDMI */
+
+ HPDIFPRINTK("\n");
+ irq_set_irq_type(hpd_struct.irq_n, IRQ_TYPE_NONE);
+
+ if (last_hpd_state)
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 0);
+ else
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 0);
+
+ atomic_set(&hdmi_status, HDMI_ON);
+
+ hpd_struct.int_src_hdmi_hpd();
+
+ s5p_hdmi_reg_hpd_gen();
+
+ if (s5p_hdmi_reg_get_hpd_status())
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 1);
+ else
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 1);
+
+ return 0;
+}
+
+int s5p_hpd_set_eint(void)
+{
+ HPDIFPRINTK("\n");
+ /* HDMI -> EINT */
+ atomic_set(&hdmi_status, HDMI_OFF);
+
+ s5p_hdmi_reg_intc_clear_pending(HDMI_IRQ_HPD_PLUG);
+ s5p_hdmi_reg_intc_clear_pending(HDMI_IRQ_HPD_UNPLUG);
+
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 0);
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 0);
+
+ hpd_struct.int_src_ext_hpd();
+
+ return 0;
+}
+
+int s5p_hpd_get_status(void)
+{
+ int hpd_state = atomic_read(&hpd_struct.state);
+ return hpd_state;
+
+}
+
+static int s5p_hpd_irq_eint(int irq)
+{
+
+ HPDIFPRINTK("\n");
+
+ if (hpd_struct.read_gpio()) {
+ HPDIFPRINTK("gpio is high\n");
+ irq_set_irq_type(hpd_struct.irq_n, IRQ_TYPE_LEVEL_LOW);
+ if (atomic_read(&hpd_struct.state) == HPD_HI)
+ return IRQ_HANDLED;
+
+ atomic_set(&hpd_struct.state, HPD_HI);
+ atomic_set(&poll_state, 1);
+
+ last_hpd_state = HPD_HI;
+ wake_up_interruptible(&hpd_struct.waitq);
+ } else {
+ HPDIFPRINTK("gpio is low\n");
+ irq_set_irq_type(hpd_struct.irq_n, IRQ_TYPE_LEVEL_HIGH);
+ if (atomic_read(&hpd_struct.state) == HPD_LO)
+ return IRQ_HANDLED;
+
+ atomic_set(&hpd_struct.state, HPD_LO);
+ atomic_set(&poll_state, 1);
+
+ last_hpd_state = HPD_LO;
+
+ wake_up_interruptible(&hpd_struct.waitq);
+ }
+ schedule_work(&hpd_work);
+
+ HPDIFPRINTK("%s\n", atomic_read(&hpd_struct.state) == HPD_HI ?
+ "HPD HI" : "HPD LO");
+
+ return IRQ_HANDLED;
+}
+
+static int s5p_hpd_irq_hdmi(int irq)
+{
+ u8 flag;
+ int ret = IRQ_HANDLED;
+ HPDIFPRINTK("\n");
+
+ /* read flag register */
+ flag = s5p_hdmi_reg_intc_status();
+
+ if (s5p_hdmi_reg_get_hpd_status())
+ s5p_hdmi_reg_intc_clear_pending(HDMI_IRQ_HPD_PLUG);
+ else
+ s5p_hdmi_reg_intc_clear_pending(HDMI_IRQ_HPD_UNPLUG);
+
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 0);
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 0);
+
+ /* is this our interrupt? */
+ if (!(flag & (1 << HDMI_IRQ_HPD_PLUG | 1 << HDMI_IRQ_HPD_UNPLUG))) {
+ printk(KERN_WARNING "%s() flag is wrong : 0x%x\n",
+ __func__, flag);
+ ret = IRQ_NONE;
+
+ goto out;
+ }
+
+ if (flag == (1 << HDMI_IRQ_HPD_PLUG | 1 << HDMI_IRQ_HPD_UNPLUG)) {
+ HPDIFPRINTK("HPD_HI && HPD_LO\n");
+
+ if (last_hpd_state == HPD_HI && s5p_hdmi_reg_get_hpd_status())
+ flag = 1 << HDMI_IRQ_HPD_UNPLUG;
+ else
+ flag = 1 << HDMI_IRQ_HPD_PLUG;
+ }
+
+ if (flag & (1 << HDMI_IRQ_HPD_PLUG)) {
+ HPDIFPRINTK("HPD_HI\n");
+
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 1);
+ if (atomic_read(&hpd_struct.state) == HPD_HI)
+ return IRQ_HANDLED;
+
+ atomic_set(&hpd_struct.state, HPD_HI);
+ atomic_set(&poll_state, 1);
+
+ last_hpd_state = HPD_HI;
+ wake_up_interruptible(&hpd_struct.waitq);
+
+ } else if (flag & (1 << HDMI_IRQ_HPD_UNPLUG)) {
+ HPDIFPRINTK("HPD_LO\n");
+
+ s5p_hdcp_stop();
+
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 1);
+ if (atomic_read(&hpd_struct.state) == HPD_LO)
+ return IRQ_HANDLED;
+
+ atomic_set(&hpd_struct.state, HPD_LO);
+ atomic_set(&poll_state, 1);
+
+ last_hpd_state = HPD_LO;
+
+ wake_up_interruptible(&hpd_struct.waitq);
+ }
+
+ schedule_work(&hpd_work);
+
+ out:
+ return IRQ_HANDLED;
+}
+
+/*
+ * HPD interrupt handler
+ *
+ * Handles interrupt requests from HPD hardware.
+ * Handler changes value of internal variable and notifies waiting thread.
+ */
+static irqreturn_t s5p_hpd_irq_handler(int irq, void *dev_id)
+{
+ irqreturn_t ret = IRQ_HANDLED;
+
+ /* check HDMI status */
+ if (atomic_read(&hdmi_status)) {
+ /* HDMI on */
+ HPDIFPRINTK("HDMI HPD interrupt\n");
+ ret = s5p_hpd_irq_hdmi(irq);
+ HPDIFPRINTK("HDMI HPD interrupt - end\n");
+ } else {
+ /* HDMI off */
+ HPDIFPRINTK("EINT HPD interrupt\n");
+ ret = s5p_hpd_irq_eint(irq);
+ HPDIFPRINTK("EINT HPD interrupt - end\n");
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_SAMSUNG_WORKAROUND_HPD_GLANCE
+static irqreturn_t s5p_hpd_irq_default_handler(int irq, void *dev_id)
+{
+ u8 flag;
+
+ flag = s5p_hdmi_reg_intc_status();
+
+ if (s5p_hdmi_reg_get_hpd_status())
+ s5p_hdmi_reg_intc_clear_pending(HDMI_IRQ_HPD_PLUG);
+ else
+ s5p_hdmi_reg_intc_clear_pending(HDMI_IRQ_HPD_UNPLUG);
+
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_UNPLUG, 0);
+ s5p_hdmi_reg_intc_enable(HDMI_IRQ_HPD_PLUG, 0);
+
+ if (flag & (1 << HDMI_IRQ_HPD_PLUG))
+ HPDIFPRINTK("HPD_HI\n");
+ else if (flag & (1 << HDMI_IRQ_HPD_UNPLUG))
+ HPDIFPRINTK("HPD_LO\n");
+ else
+ HPDIFPRINTK("UNKNOWN EVENT\n");
+
+ return IRQ_HANDLED;
+}
+
+void mhl_hpd_handler(bool onoff)
+{
+ static int old_state;
+
+ if (old_state == onoff) {
+ printk(KERN_INFO "%s() state is aready %s\n",
+ __func__, onoff ? "on" : "off");
+ return;
+ } else {
+ printk(KERN_INFO "%s(%d), old_state(%d)\n",
+ __func__, onoff, old_state);
+ old_state = onoff;
+ }
+
+ if (onoff == true) {
+ enable_irq(hpd_struct.irq_n);
+ s5p_hdmi_reg_intc_set_isr(s5p_hpd_irq_handler,
+ (u8) HDMI_IRQ_HPD_PLUG);
+ } else {
+ disable_irq_nosync(hpd_struct.irq_n);
+ s5p_hdmi_reg_intc_set_isr(s5p_hpd_irq_default_handler,
+ (u8) HDMI_IRQ_HPD_PLUG);
+ }
+}
+EXPORT_SYMBOL(mhl_hpd_handler);
+#endif
+
+static int __devinit s5p_hpd_probe(struct platform_device *pdev)
+{
+ struct s5p_platform_hpd *pdata;
+ int ret;
+
+ if (misc_register(&hpd_misc_device)) {
+ printk(KERN_WARNING " Couldn't register device 10, %d.\n",
+ HPD_MINOR);
+
+ return -EBUSY;
+ }
+
+ init_waitqueue_head(&hpd_struct.waitq);
+
+ spin_lock_init(&hpd_struct.lock);
+
+ atomic_set(&hpd_struct.state, -1);
+
+ atomic_set(&hdmi_status, HDMI_OFF);
+
+ pdata = to_tvout_plat(&pdev->dev);
+
+ if (pdata->int_src_hdmi_hpd)
+ hpd_struct.int_src_hdmi_hpd =
+ (void (*)(void))pdata->int_src_hdmi_hpd;
+ if (pdata->int_src_ext_hpd)
+ hpd_struct.int_src_ext_hpd =
+ (void (*)(void))pdata->int_src_ext_hpd;
+ if (pdata->read_gpio)
+ hpd_struct.read_gpio = (int (*)(void))pdata->read_gpio;
+#ifdef CONFIG_HDMI_CONTROLLED_BY_EXT_IC
+ if (pdata->ext_ic_control)
+ hpd_struct.ext_ic_control = pdata->ext_ic_control;
+#endif
+ hpd_struct.irq_n = platform_get_irq(pdev, 0);
+
+ hpd_struct.int_src_ext_hpd();
+ if (hpd_struct.read_gpio()) {
+ atomic_set(&hpd_struct.state, HPD_HI);
+ last_hpd_state = HPD_HI;
+ } else {
+ atomic_set(&hpd_struct.state, HPD_LO);
+ last_hpd_state = HPD_LO;
+ }
+
+#ifdef CONFIG_HDMI_SWITCH_HPD
+ hpd_struct.hpd_switch.name = "hdmi";
+ switch_dev_register(&hpd_struct.hpd_switch);
+#endif
+ irq_set_irq_type(hpd_struct.irq_n, IRQ_TYPE_EDGE_BOTH);
+
+ ret = request_irq(hpd_struct.irq_n, (irq_handler_t) s5p_hpd_irq_handler,
+ IRQF_DISABLED, "hpd", (void *)(&pdev->dev));
+
+ if (ret) {
+ printk(KERN_ERR "failed to install hpd irq\n");
+ misc_deregister(&hpd_misc_device);
+ return -EIO;
+ }
+#ifdef CONFIG_SAMSUNG_WORKAROUND_HPD_GLANCE
+ disable_irq(hpd_struct.irq_n);
+#endif
+
+ s5p_hdmi_reg_intc_set_isr(s5p_hpd_irq_handler, (u8) HDMI_IRQ_HPD_PLUG);
+ s5p_hdmi_reg_intc_set_isr(s5p_hpd_irq_handler,
+ (u8) HDMI_IRQ_HPD_UNPLUG);
+
+ last_uevent_state = -1;
+
+#ifdef CONFIG_LSI_HDMI_AUDIO_CH_EVENT
+ g_audio_ch_switch.name = "ch_hdmi_audio";
+ switch_dev_register(&g_audio_ch_switch);
+#endif
+ return 0;
+}
+
+static int __devexit s5p_hpd_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_HDMI_SWITCH_HPD
+ switch_dev_unregister(&hpd_struct.hpd_switch);
+#endif
+#ifdef CONFIG_LSI_HDMI_AUDIO_CH_EVENT
+ switch_dev_unregister(&g_audio_ch_switch);
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int s5p_hpd_suspend(struct platform_device *dev, pm_message_t state)
+{
+ hpd_struct.int_src_ext_hpd();
+ return 0;
+}
+
+static int s5p_hpd_resume(struct platform_device *dev)
+{
+ if (atomic_read(&hdmi_status) == HDMI_ON)
+ hpd_struct.int_src_hdmi_hpd();
+
+ return 0;
+}
+#else
+#define s5p_hpd_suspend NULL
+#define s5p_hpd_resume NULL
+#endif
+
+static struct platform_driver s5p_hpd_driver = {
+ .probe = s5p_hpd_probe,
+ .remove = __devexit_p(s5p_hpd_remove),
+ .suspend = s5p_hpd_suspend,
+ .resume = s5p_hpd_resume,
+ .driver = {
+ .name = "s5p-tvout-hpd",
+ .owner = THIS_MODULE,
+ },
+};
+
+static char banner[] __initdata =
+ "S5P HPD Driver, (c) 2009 Samsung Electronics\n";
+
+static int __init s5p_hpd_init(void)
+{
+ int ret;
+
+ printk(banner);
+
+ ret = platform_driver_register(&s5p_hpd_driver);
+
+ if (ret) {
+ printk(KERN_ERR "Platform Device Register Failed %d\n", ret);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit s5p_hpd_exit(void)
+{
+ misc_deregister(&hpd_misc_device);
+}
+
+module_init(s5p_hpd_init);
+module_exit(s5p_hpd_exit);
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_v4l2.c b/drivers/media/video/samsung/tvout/s5p_tvout_v4l2.c
new file mode 100644
index 0000000..7af8a15
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_v4l2.c
@@ -0,0 +1,1427 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvout_v4l2.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * V4L2 API file for Samsung TVOOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+
+#include <linux/videodev2_exynos_camera.h>
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+
+#include "s5p_tvout_common_lib.h"
+#include "s5p_tvout_ctrl.h"
+#include "s5p_tvout_v4l2.h"
+
+#if defined(CONFIG_S5P_SYSMMU_TV)
+#include <plat/sysmmu.h>
+#endif
+
+#ifdef CONFIG_UMP_VCM_ALLOC
+#include "ump_kernel_interface.h"
+#endif
+
+#ifdef CONFIG_VCM
+#include <plat/s5p-vcm.h>
+#endif
+
+#define MAJOR_VERSION 0
+#define MINOR_VERSION 3
+#define RELEASE_VERSION 0
+
+#if defined(CONFIG_S5P_SYSMMU_TV)
+#ifdef CONFIG_S5P_VMEM
+/* temporary used for testing system mmu */
+extern void *s5p_getaddress(unsigned int cookie);
+#endif
+#endif
+
+extern struct s5p_tvout_vp_bufferinfo s5ptv_vp_buff;
+
+#define V4L2_STD_ALL_HD ((v4l2_std_id)0xffffffff)
+
+#ifdef CONFIG_CPU_EXYNOS4210
+#define S5P_TVOUT_TVIF_MINOR 14
+#define S5P_TVOUT_VO_MINOR 21
+#else
+#define S5P_TVOUT_TVIF_MINOR 16
+#define S5P_TVOUT_VO_MINOR 20
+#endif
+
+#define V4L2_OUTPUT_TYPE_COMPOSITE 5
+#define V4L2_OUTPUT_TYPE_HDMI 10
+#define V4L2_OUTPUT_TYPE_HDMI_RGB 11
+#define V4L2_OUTPUT_TYPE_DVI 12
+
+#define V4L2_STD_PAL_BDGHI (V4L2_STD_PAL_B|\
+ V4L2_STD_PAL_D| \
+ V4L2_STD_PAL_G| \
+ V4L2_STD_PAL_H| \
+ V4L2_STD_PAL_I)
+
+#define V4L2_STD_480P_60_16_9 ((v4l2_std_id)0x04000000)
+#define V4L2_STD_480P_60_4_3 ((v4l2_std_id)0x05000000)
+#define V4L2_STD_576P_50_16_9 ((v4l2_std_id)0x06000000)
+#define V4L2_STD_576P_50_4_3 ((v4l2_std_id)0x07000000)
+#define V4L2_STD_720P_60 ((v4l2_std_id)0x08000000)
+#define V4L2_STD_720P_50 ((v4l2_std_id)0x09000000)
+#define V4L2_STD_1080P_60 ((v4l2_std_id)0x0a000000)
+#define V4L2_STD_1080P_50 ((v4l2_std_id)0x0b000000)
+#define V4L2_STD_1080I_60 ((v4l2_std_id)0x0c000000)
+#define V4L2_STD_1080I_50 ((v4l2_std_id)0x0d000000)
+#define V4L2_STD_480P_59 ((v4l2_std_id)0x0e000000)
+#define V4L2_STD_720P_59 ((v4l2_std_id)0x0f000000)
+#define V4L2_STD_1080I_59 ((v4l2_std_id)0x10000000)
+#define V4L2_STD_1080P_59 ((v4l2_std_id)0x11000000)
+#define V4L2_STD_1080P_30 ((v4l2_std_id)0x12000000)
+
+#ifdef CONFIG_HDMI_14A_3D
+#define V4L2_STD_TVOUT_720P_60_SBS_HALF ((v4l2_std_id)0x13000000)
+#define V4L2_STD_TVOUT_720P_59_SBS_HALF ((v4l2_std_id)0x14000000)
+#define V4L2_STD_TVOUT_720P_50_TB ((v4l2_std_id)0x15000000)
+#define V4L2_STD_TVOUT_1080P_24_TB ((v4l2_std_id)0x16000000)
+#define V4L2_STD_TVOUT_1080P_23_TB ((v4l2_std_id)0x17000000)
+#endif
+
+#define CVBS_S_VIDEO (V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_JP| \
+ V4L2_STD_PAL | V4L2_STD_PAL_M | V4L2_STD_PAL_N | V4L2_STD_PAL_Nc | \
+ V4L2_STD_PAL_60 | V4L2_STD_NTSC_443)
+
+struct v4l2_vid_overlay_src {
+ void *base_y;
+ void *base_c;
+ struct v4l2_pix_format pix_fmt;
+};
+
+static const struct v4l2_output s5p_tvout_tvif_output[] = {
+ {
+ .index = 0,
+ .name = "Analog COMPOSITE",
+ .type = V4L2_OUTPUT_TYPE_COMPOSITE,
+ .audioset = 0,
+ .modulator = 0,
+ .std = CVBS_S_VIDEO,
+ }, {
+ .index = 1,
+ .name = "Digital HDMI(YCbCr)",
+ .type = V4L2_OUTPUT_TYPE_HDMI,
+ .audioset = 2,
+ .modulator = 0,
+ .std = V4L2_STD_480P_60_16_9 |
+ V4L2_STD_480P_60_16_9 | V4L2_STD_720P_60 |
+ V4L2_STD_720P_50
+ | V4L2_STD_1080P_60 | V4L2_STD_1080P_50 |
+ V4L2_STD_1080I_60 | V4L2_STD_1080I_50 |
+ V4L2_STD_480P_59 | V4L2_STD_720P_59 |
+ V4L2_STD_1080I_59 | V4L2_STD_1080P_59 |
+ V4L2_STD_1080P_30,
+ }, {
+ .index = 2,
+ .name = "Digital HDMI(RGB)",
+ .type = V4L2_OUTPUT_TYPE_HDMI_RGB,
+ .audioset = 2,
+ .modulator = 0,
+ .std = V4L2_STD_480P_60_16_9 |
+ V4L2_STD_480P_60_16_9 |
+ V4L2_STD_720P_60 | V4L2_STD_720P_50
+ | V4L2_STD_1080P_60 | V4L2_STD_1080P_50 |
+ V4L2_STD_1080I_60 | V4L2_STD_1080I_50 |
+ V4L2_STD_480P_59 | V4L2_STD_720P_59 |
+ V4L2_STD_1080I_59 | V4L2_STD_1080P_59 |
+ V4L2_STD_1080P_30,
+ }, {
+ .index = 3,
+ .name = "Digital DVI",
+ .type = V4L2_OUTPUT_TYPE_DVI,
+ .audioset = 2,
+ .modulator = 0,
+ .std = V4L2_STD_480P_60_16_9 |
+ V4L2_STD_480P_60_16_9 |
+ V4L2_STD_720P_60 | V4L2_STD_720P_50
+ | V4L2_STD_1080P_60 | V4L2_STD_1080P_50 |
+ V4L2_STD_1080I_60 | V4L2_STD_1080I_50 |
+ V4L2_STD_480P_59 | V4L2_STD_720P_59 |
+ V4L2_STD_1080I_59 | V4L2_STD_1080P_59 |
+ V4L2_STD_1080P_30,
+ }
+
+};
+
+#define S5P_TVOUT_TVIF_NO_OF_OUTPUT ARRAY_SIZE(s5p_tvout_tvif_output)
+
+static const struct v4l2_standard s5p_tvout_tvif_standard[] = {
+ {
+ .index = 0,
+ .id = V4L2_STD_NTSC_M,
+ .name = "NTSC_M",
+ }, {
+ .index = 1,
+ .id = V4L2_STD_PAL_BDGHI,
+ .name = "PAL_BDGHI",
+ }, {
+ .index = 2,
+ .id = V4L2_STD_PAL_M,
+ .name = "PAL_M",
+ }, {
+ .index = 3,
+ .id = V4L2_STD_PAL_N,
+ .name = "PAL_N",
+ }, {
+ .index = 4,
+ .id = V4L2_STD_PAL_Nc,
+ .name = "PAL_Nc",
+ }, {
+ .index = 5,
+ .id = V4L2_STD_PAL_60,
+ .name = "PAL_60",
+ }, {
+ .index = 6,
+ .id = V4L2_STD_NTSC_443,
+ .name = "NTSC_443",
+ }, {
+ .index = 7,
+ .id = V4L2_STD_480P_60_16_9,
+ .name = "480P_60_16_9",
+ }, {
+ .index = 8,
+ .id = V4L2_STD_480P_60_4_3,
+ .name = "480P_60_4_3",
+ }, {
+ .index = 9,
+ .id = V4L2_STD_576P_50_16_9,
+ .name = "576P_50_16_9",
+ }, {
+ .index = 10,
+ .id = V4L2_STD_576P_50_4_3,
+ .name = "576P_50_4_3",
+ }, {
+ .index = 11,
+ .id = V4L2_STD_720P_60,
+ .name = "720P_60",
+ }, {
+ .index = 12,
+ .id = V4L2_STD_720P_50,
+ .name = "720P_50",
+ }, {
+ .index = 13,
+ .id = V4L2_STD_1080P_60,
+ .name = "1080P_60",
+ }, {
+ .index = 14,
+ .id = V4L2_STD_1080P_50,
+ .name = "1080P_50",
+ }, {
+ .index = 15,
+ .id = V4L2_STD_1080I_60,
+ .name = "1080I_60",
+ }, {
+ .index = 16,
+ .id = V4L2_STD_1080I_50,
+ .name = "1080I_50",
+ }, {
+ .index = 17,
+ .id = V4L2_STD_480P_59,
+ .name = "480P_59",
+ }, {
+ .index = 18,
+ .id = V4L2_STD_720P_59,
+ .name = "720P_59",
+ }, {
+ .index = 19,
+ .id = V4L2_STD_1080I_59,
+ .name = "1080I_59",
+ }, {
+ .index = 20,
+ .id = V4L2_STD_1080P_59,
+ .name = "1080I_50",
+ }, {
+ .index = 21,
+ .id = V4L2_STD_1080P_30,
+ .name = "1080I_30",
+ },
+#ifdef CONFIG_HDMI_14A_3D
+ {
+ .index = 22,
+ .id = V4L2_STD_TVOUT_720P_60_SBS_HALF,
+ .name = "720P_60_SBS_HALF",
+ },
+ {
+ .index = 23,
+ .id = V4L2_STD_TVOUT_720P_59_SBS_HALF,
+ .name = "720P_59_SBS_HALF",
+ },
+ {
+ .index = 24,
+ .id = V4L2_STD_TVOUT_720P_50_TB,
+ .name = "720P_50_TB",
+ },
+ {
+ .index = 25,
+ .id = V4L2_STD_TVOUT_1080P_24_TB,
+ .name = "1080P_24_TB",
+ },
+ {
+ .index = 26,
+ .id = V4L2_STD_TVOUT_1080P_23_TB,
+ .name = "1080P_23_TB",
+ },
+#endif
+};
+
+#define S5P_TVOUT_TVIF_NO_OF_STANDARD ARRAY_SIZE(s5p_tvout_tvif_standard)
+
+
+static const struct v4l2_fmtdesc s5p_tvout_vo_fmt_desc[] = {
+ {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_PRIVATE,
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .description = "NV12 (Linear YUV420 2 Planes)",
+ }, {
+ .index = 1,
+ .type = V4L2_BUF_TYPE_PRIVATE,
+ .pixelformat = V4L2_PIX_FMT_NV12T,
+ .description = "NV12T (Tiled YUV420 2 Planes)",
+ },
+/* This block will be used on EXYNOS4210 */
+ {
+ .index = 2,
+ .type = V4L2_BUF_TYPE_PRIVATE,
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .description = "NV21 (Linear YUV420 2 Planes)",
+ }, {
+ .index = 3,
+ .type = V4L2_BUF_TYPE_PRIVATE,
+ .pixelformat = V4L2_PIX_FMT_NV21T,
+ .description = "NV21T (Tiled YUV420 2 Planes)",
+ },
+
+
+};
+
+
+static DEFINE_MUTEX(s5p_tvout_tvif_mutex);
+static DEFINE_MUTEX(s5p_tvout_vo_mutex);
+
+struct s5p_tvout_v4l2_private_data {
+ struct v4l2_vid_overlay_src vo_src_fmt;
+ struct v4l2_rect vo_src_rect;
+ struct v4l2_window vo_dst_fmt;
+ struct v4l2_framebuffer vo_dst_plane;
+
+ int tvif_output_index;
+ v4l2_std_id tvif_standard_id;
+
+ atomic_t tvif_use;
+ atomic_t vo_use;
+};
+
+static struct s5p_tvout_v4l2_private_data s5p_tvout_v4l2_private = {
+ .tvif_output_index = -1,
+ .tvif_standard_id = 0,
+
+ .tvif_use = ATOMIC_INIT(0),
+ .vo_use = ATOMIC_INIT(0),
+};
+
+static void s5p_tvout_v4l2_init_private(void)
+{
+}
+
+static int s5p_tvout_tvif_querycap(
+ struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "s5p-tvout-tvif");
+ strcpy(cap->card, "Samsung TVOUT TV Interface");
+ cap->capabilities = V4L2_CAP_VIDEO_OUTPUT;
+ cap->version = KERNEL_VERSION(
+ MAJOR_VERSION, MINOR_VERSION, RELEASE_VERSION);
+
+ return 0;
+}
+
+static int s5p_tvout_tvif_g_std(
+ struct file *file, void *fh, v4l2_std_id *norm)
+{
+ if (s5p_tvout_v4l2_private.tvif_standard_id == 0) {
+ tvout_err("Standard has not set\n");
+ return -1;
+ }
+
+ *norm = s5p_tvout_v4l2_private.tvif_standard_id;
+
+ return 0;
+}
+
+static int s5p_tvout_tvif_s_std(
+ struct file *file, void *fh, v4l2_std_id *norm)
+{
+ int i;
+ v4l2_std_id std_id = *norm;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ for (i = 0; i < S5P_TVOUT_TVIF_NO_OF_STANDARD; i++) {
+ if (s5p_tvout_tvif_standard[i].id == std_id)
+ break;
+ }
+
+ if (i == S5P_TVOUT_TVIF_NO_OF_STANDARD) {
+ tvout_err("There is no TV standard(0x%08Lx)\n", std_id);
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return -EINVAL;
+ }
+
+ s5p_tvout_v4l2_private.tvif_standard_id = std_id;
+
+ tvout_dbg("standard id=0x%X, name=\"%s\"\n",
+ (u32) std_id, s5p_tvout_tvif_standard[i].name);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+
+ return 0;
+}
+
+static int s5p_tvout_tvif_enum_output(
+ struct file *file, void *fh, struct v4l2_output *a)
+{
+ unsigned int index = a->index;
+
+ if (index >= S5P_TVOUT_TVIF_NO_OF_OUTPUT) {
+ tvout_err("Invalid index(%d)\n", index);
+
+ return -EINVAL;
+ }
+
+ memcpy(a, &s5p_tvout_tvif_output[index], sizeof(struct v4l2_output));
+
+ return 0;
+}
+
+static int s5p_tvout_tvif_g_output(
+ struct file *file, void *fh, unsigned int *i)
+{
+ if (s5p_tvout_v4l2_private.tvif_output_index == -1) {
+ tvout_err("Output has not set\n");
+ return -EINVAL;
+ }
+
+ *i = s5p_tvout_v4l2_private.tvif_output_index;
+
+ return 0;
+}
+
+static int s5p_tvout_tvif_s_output(
+ struct file *file, void *fh, unsigned int i)
+{
+ enum s5p_tvout_disp_mode tv_std;
+ enum s5p_tvout_o_mode tv_if;
+
+ if (i >= S5P_TVOUT_TVIF_NO_OF_OUTPUT) {
+ tvout_err("Invalid index(%d)\n", i);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ on_start_process = true;
+ s5p_tvout_v4l2_private.tvif_output_index = i;
+
+ tvout_dbg("output id=%d, name=\"%s\"\n",
+ (int) i, s5p_tvout_tvif_output[i].name);
+
+ switch (s5p_tvout_tvif_output[i].type) {
+ case V4L2_OUTPUT_TYPE_COMPOSITE:
+ tv_if = TVOUT_COMPOSITE;
+ break;
+
+ case V4L2_OUTPUT_TYPE_HDMI:
+ tv_if = TVOUT_HDMI;
+ break;
+
+ case V4L2_OUTPUT_TYPE_HDMI_RGB:
+ tv_if = TVOUT_HDMI_RGB;
+ break;
+
+ case V4L2_OUTPUT_TYPE_DVI:
+ tv_if = TVOUT_DVI;
+ break;
+
+ default:
+ tvout_err("Invalid output type(%d)\n",
+ s5p_tvout_tvif_output[i].type);
+ goto error_on_tvif_s_output;
+ }
+
+ switch (s5p_tvout_v4l2_private.tvif_standard_id) {
+ case V4L2_STD_NTSC_M:
+ tv_std = TVOUT_NTSC_M;
+ break;
+
+ case V4L2_STD_PAL_BDGHI:
+ tv_std = TVOUT_PAL_BDGHI;
+ break;
+
+ case V4L2_STD_PAL_M:
+ tv_std = TVOUT_PAL_M;
+ break;
+
+ case V4L2_STD_PAL_N:
+ tv_std = TVOUT_PAL_N;
+ break;
+
+ case V4L2_STD_PAL_Nc:
+ tv_std = TVOUT_PAL_NC;
+ break;
+
+ case V4L2_STD_PAL_60:
+ tv_std = TVOUT_PAL_60;
+ break;
+
+ case V4L2_STD_NTSC_443:
+ tv_std = TVOUT_NTSC_443;
+ break;
+
+ case V4L2_STD_480P_60_16_9:
+ tv_std = TVOUT_480P_60_16_9;
+ break;
+
+ case V4L2_STD_480P_60_4_3:
+ tv_std = TVOUT_480P_60_4_3;
+ break;
+
+ case V4L2_STD_480P_59:
+ tv_std = TVOUT_480P_59;
+ break;
+ case V4L2_STD_576P_50_16_9:
+ tv_std = TVOUT_576P_50_16_9;
+ break;
+
+ case V4L2_STD_576P_50_4_3:
+ tv_std = TVOUT_576P_50_4_3;
+ break;
+
+ case V4L2_STD_720P_60:
+ tv_std = TVOUT_720P_60;
+ break;
+
+ case V4L2_STD_720P_59:
+ tv_std = TVOUT_720P_59;
+ break;
+
+ case V4L2_STD_720P_50:
+ tv_std = TVOUT_720P_50;
+ break;
+
+ case V4L2_STD_1080I_60:
+ tv_std = TVOUT_1080I_60;
+ break;
+
+ case V4L2_STD_1080I_59:
+ tv_std = TVOUT_1080I_59;
+ break;
+
+ case V4L2_STD_1080I_50:
+ tv_std = TVOUT_1080I_50;
+ break;
+
+ case V4L2_STD_1080P_30:
+ tv_std = TVOUT_1080P_30;
+ break;
+
+ case V4L2_STD_1080P_60:
+ tv_std = TVOUT_1080P_60;
+ break;
+
+ case V4L2_STD_1080P_59:
+ tv_std = TVOUT_1080P_59;
+ break;
+
+ case V4L2_STD_1080P_50:
+ tv_std = TVOUT_1080P_50;
+ break;
+
+#ifdef CONFIG_HDMI_14A_3D
+ case V4L2_STD_TVOUT_720P_60_SBS_HALF:
+ tv_std = TVOUT_720P_60_SBS_HALF;
+ break;
+ case V4L2_STD_TVOUT_720P_59_SBS_HALF:
+ tv_std = TVOUT_720P_59_SBS_HALF;
+ break;
+ case V4L2_STD_TVOUT_720P_50_TB:
+ tv_std = TVOUT_720P_50_TB;
+ break;
+ case V4L2_STD_TVOUT_1080P_24_TB:
+ tv_std = TVOUT_1080P_24_TB;
+ break;
+ case V4L2_STD_TVOUT_1080P_23_TB:
+ tv_std = TVOUT_1080P_23_TB;
+ break;
+#endif
+ default:
+ tvout_err("Invalid standard id(0x%08Lx)\n",
+ s5p_tvout_v4l2_private.tvif_standard_id);
+ goto error_on_tvif_s_output;
+ }
+
+ s5p_tvif_ctrl_start(tv_std, tv_if);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return 0;
+error_on_tvif_s_output:
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return -1;
+};
+
+static int s5p_tvout_tvif_cropcap(
+ struct file *file, void *fh, struct v4l2_cropcap *a)
+{
+ enum s5p_tvout_disp_mode std;
+ enum s5p_tvout_o_mode inf;
+
+ struct v4l2_cropcap *cropcap = a;
+
+ if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ tvout_err("Invalid buf type(%d)\n", cropcap->type);
+ return -EINVAL;
+ }
+
+ /* below part will be modified and moved to tvif ctrl class */
+ s5p_tvif_ctrl_get_std_if(&std, &inf);
+
+ switch (std) {
+ case TVOUT_NTSC_M:
+ case TVOUT_NTSC_443:
+ case TVOUT_480P_60_16_9:
+ case TVOUT_480P_60_4_3:
+ case TVOUT_480P_59:
+ cropcap->bounds.top = 0;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.width = 720;
+ cropcap->bounds.height = 480;
+
+ cropcap->defrect.top = 0;
+ cropcap->defrect.left = 0;
+ cropcap->defrect.width = 720;
+ cropcap->defrect.height = 480;
+ break;
+
+ case TVOUT_PAL_M:
+ case TVOUT_PAL_BDGHI:
+ case TVOUT_PAL_N:
+ case TVOUT_PAL_NC:
+ case TVOUT_PAL_60:
+ case TVOUT_576P_50_16_9:
+ case TVOUT_576P_50_4_3:
+ cropcap->bounds.top = 0;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.width = 720;
+ cropcap->bounds.height = 576;
+
+ cropcap->defrect.top = 0;
+ cropcap->defrect.left = 0;
+ cropcap->defrect.width = 720;
+ cropcap->defrect.height = 576;
+ break;
+
+ case TVOUT_720P_60:
+ case TVOUT_720P_59:
+ case TVOUT_720P_50:
+ cropcap->bounds.top = 0;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.width = 1280;
+ cropcap->bounds.height = 720;
+
+ cropcap->defrect.top = 0;
+ cropcap->defrect.left = 0;
+ cropcap->defrect.width = 1280;
+ cropcap->defrect.height = 720;
+ break;
+
+ case TVOUT_1080I_60:
+ case TVOUT_1080I_59:
+ case TVOUT_1080I_50:
+ case TVOUT_1080P_60:
+ case TVOUT_1080P_59:
+ case TVOUT_1080P_50:
+ case TVOUT_1080P_30:
+ cropcap->bounds.top = 0;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.width = 1920;
+ cropcap->bounds.height = 1080;
+
+ cropcap->defrect.top = 0;
+ cropcap->defrect.left = 0;
+ cropcap->defrect.width = 1920;
+ cropcap->defrect.height = 1080;
+ break;
+
+#ifdef CONFIG_HDMI_14A_3D
+ case TVOUT_720P_60_SBS_HALF:
+ case TVOUT_720P_59_SBS_HALF:
+ case TVOUT_720P_50_TB:
+ cropcap->bounds.top = 0;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.width = 1280;
+ cropcap->bounds.height = 720;
+
+ cropcap->defrect.top = 0;
+ cropcap->defrect.left = 0;
+ cropcap->defrect.width = 1280;
+ cropcap->defrect.height = 720;
+ break;
+
+ case TVOUT_1080P_24_TB:
+ case TVOUT_1080P_23_TB:
+ cropcap->bounds.top = 0;
+ cropcap->bounds.left = 0;
+ cropcap->bounds.width = 1920;
+ cropcap->bounds.height = 1080;
+
+ cropcap->defrect.top = 0;
+ cropcap->defrect.left = 0;
+ cropcap->defrect.width = 1920;
+ cropcap->defrect.height = 1080;
+ break;
+#endif
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int s5p_tvout_tvif_wait_for_vsync(void)
+{
+ sleep_on_timeout(&s5ptv_wq, HZ / 10);
+
+ return 0;
+}
+
+const struct v4l2_ioctl_ops s5p_tvout_tvif_ioctl_ops = {
+ .vidioc_querycap = s5p_tvout_tvif_querycap,
+ .vidioc_g_std = s5p_tvout_tvif_g_std,
+ .vidioc_s_std = s5p_tvout_tvif_s_std,
+ .vidioc_enum_output = s5p_tvout_tvif_enum_output,
+ .vidioc_g_output = s5p_tvout_tvif_g_output,
+ .vidioc_s_output = s5p_tvout_tvif_s_output,
+ .vidioc_cropcap = s5p_tvout_tvif_cropcap,
+};
+
+#define VIDIOC_HDCP_ENABLE _IOWR('V', 100, unsigned int)
+#define VIDIOC_HDCP_STATUS _IOR('V', 101, unsigned int)
+#define VIDIOC_HDCP_PROT_STATUS _IOR('V', 102, unsigned int)
+#define VIDIOC_INIT_AUDIO _IOR('V', 103, unsigned int)
+#define VIDIOC_AV_MUTE _IOR('V', 104, unsigned int)
+#define VIDIOC_G_AVMUTE _IOR('V', 105, unsigned int)
+#define VIDIOC_SET_VSYNC_INT _IOR('V', 106, unsigned int)
+#define VIDIOC_WAITFORVSYNC _IOR('V', 107, unsigned int)
+#define VIDIOC_G_VP_BUFF_INFO _IOR('V', 108, unsigned int)
+#define VIDIOC_S_VP_BUFF_INFO _IOR('V', 109, unsigned int)
+#define VIDIOC_S_AUDIO_CHANNEL _IOR('V', 110, unsigned int)
+#define VIDIOC_S_Q_COLOR_RANGE _IOR('V', 111, unsigned int)
+
+long s5p_tvout_tvif_ioctl(
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret = 0;
+ void *argp = (void *) arg;
+ int i = 0;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+
+ tvout_dbg("\n");
+
+ switch (cmd) {
+ case VIDIOC_INIT_AUDIO:
+ tvout_dbg("VIDIOC_INIT_AUDIO(%d)\n", (int) arg);
+
+/* s5ptv_status.hdmi.audio = (unsigned int) arg; */
+
+ if (arg)
+ s5p_tvif_ctrl_set_audio(true);
+ else
+ s5p_tvif_ctrl_set_audio(false);
+
+ goto end_tvif_ioctl;
+
+ case VIDIOC_AV_MUTE:
+ tvout_dbg("VIDIOC_AV_MUTE(%d)\n", (int) arg);
+
+ if (arg)
+ s5p_tvif_ctrl_set_av_mute(true);
+ else
+ s5p_tvif_ctrl_set_av_mute(false);
+
+ goto end_tvif_ioctl;
+
+ case VIDIOC_G_AVMUTE:
+ s5p_hdmi_ctrl_get_mute();
+ goto end_tvif_ioctl;
+
+ case VIDIOC_HDCP_ENABLE:
+ tvout_dbg("VIDIOC_HDCP_ENABLE(%d)\n", (int) arg);
+
+/* s5ptv_status.hdmi.hdcp_en = (unsigned int) arg; */
+
+ s5p_hdmi_ctrl_set_hdcp((bool) arg);
+ goto end_tvif_ioctl;
+
+ case VIDIOC_HDCP_STATUS: {
+ unsigned int *status = (unsigned int *)&arg;
+
+ *status = 1;
+
+ goto end_tvif_ioctl;
+ }
+
+ case VIDIOC_HDCP_PROT_STATUS: {
+ unsigned int *prot = (unsigned int *)&arg;
+
+ *prot = 1;
+
+ goto end_tvif_ioctl;
+ }
+
+ case VIDIOC_ENUMSTD: {
+ struct v4l2_standard *p = (struct v4l2_standard *)arg;
+
+ if (p->index >= S5P_TVOUT_TVIF_NO_OF_STANDARD) {
+ tvout_dbg("VIDIOC_ENUMSTD: Invalid index(%d)\n",
+ p->index);
+
+ ret = -EINVAL;
+ goto end_tvif_ioctl;
+ }
+
+ memcpy(p, &s5p_tvout_tvif_standard[p->index],
+ sizeof(struct v4l2_standard));
+
+ goto end_tvif_ioctl;
+ }
+
+ case VIDIOC_SET_VSYNC_INT:
+ s5p_mixer_ctrl_set_vsync_interrupt((int)argp);
+ goto end_tvif_ioctl;
+
+ case VIDIOC_WAITFORVSYNC:
+ s5p_tvout_tvif_wait_for_vsync();
+ goto end_tvif_ioctl;
+
+ case VIDIOC_G_VP_BUFF_INFO: {
+ struct s5ptv_vp_buf_info __user *buff_info =
+ (struct s5ptv_vp_buf_info __user *)arg;
+ struct s5p_tvout_vp_buff __user *buffs;
+ unsigned int tmp = S5PTV_VP_BUFF_CNT;
+ ret = copy_to_user(&buff_info->buff_cnt, &tmp, sizeof(tmp));
+ if (WARN_ON(ret))
+ goto end_tvif_ioctl;
+ ret = copy_from_user(&buffs, &buff_info->buffs,
+ sizeof(struct s5p_tvout_vp_buff *));
+ if (WARN_ON(ret))
+ goto end_tvif_ioctl;
+ for (i = 0; i < S5PTV_VP_BUFF_CNT; i++) {
+ ret = copy_to_user(&buffs[i].phy_base,
+ &s5ptv_vp_buff.vp_buffs[i].phy_base,
+ sizeof(unsigned int));
+ if (WARN_ON(ret))
+ goto end_tvif_ioctl;
+ ret = copy_to_user(&buffs[i].vir_base,
+ &s5ptv_vp_buff.vp_buffs[i].vir_base,
+ sizeof(unsigned int));
+ if (WARN_ON(ret))
+ goto end_tvif_ioctl;
+ tmp = S5PTV_VP_BUFF_SIZE;
+ ret = copy_to_user(&buffs[i].size, &tmp, sizeof(tmp));
+ if (WARN_ON(ret))
+ goto end_tvif_ioctl;
+ }
+ goto end_tvif_ioctl;
+ }
+ case VIDIOC_S_VP_BUFF_INFO: {
+ struct s5ptv_vp_buf_info buff_info;
+ struct s5p_tvout_vp_buff buffs[S5PTV_VP_BUFF_CNT];
+ ret = copy_from_user(&buff_info,
+ (struct s5ptv_vp_buf_info __user *)arg,
+ sizeof(buff_info));
+ if (WARN_ON(ret))
+ goto end_tvif_ioctl;
+ ret = copy_from_user(buffs, buff_info.buffs, sizeof(buffs));
+ if (WARN_ON(ret))
+ goto end_tvif_ioctl;
+
+ if (buff_info.buff_cnt != S5PTV_VP_BUFF_CNT) {
+ tvout_err("Insufficient buffer count (%d, %d)",
+ buff_info.buff_cnt, S5PTV_VP_BUFF_CNT);
+ ret = -EINVAL;
+ goto end_tvif_ioctl;
+ }
+ for (i = 0; i < S5PTV_VP_BUFF_CNT; i++) {
+ s5ptv_vp_buff.vp_buffs[i].phy_base = buffs[i].phy_base;
+ s5ptv_vp_buff.vp_buffs[i].vir_base =
+ (unsigned int)phys_to_virt(buffs[i].phy_base);
+ s5ptv_vp_buff.vp_buffs[i].size = buffs[i].size;
+ tvout_dbg("s5ptv_vp_buff phy_base = 0x%x, vir_base = 0x%8x\n",
+ s5ptv_vp_buff.vp_buffs[i].phy_base,
+ s5ptv_vp_buff.vp_buffs[i].vir_base);
+ }
+ goto end_tvif_ioctl;
+ }
+ case VIDIOC_S_AUDIO_CHANNEL: {
+ if (!arg)
+ s5p_tvif_audio_channel(TVOUT_AUDIO_2CH_VAL);
+ else
+ s5p_tvif_audio_channel(TVOUT_AUDIO_5_1CH_VAL);
+ /* TODO Runtime change
+ s5p_tvif_ctrl_stop();
+ if (s5p_tvif_ctrl_start(TVOUT_720P_60, TVOUT_HDMI) < 0)
+ goto end_tvif_ioctl; */
+ break;
+ }
+
+ case VIDIOC_S_Q_COLOR_RANGE: {
+ if ((int)arg != 0 && (int)arg != 1) {
+ printk(KERN_ERR "Quantaization range has wrong value!\n");
+ goto end_tvif_ioctl;
+ }
+
+ s5p_tvif_q_color_range((int)arg);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return video_ioctl2(file, cmd, arg);
+
+end_tvif_ioctl:
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return ret;
+}
+
+
+static int s5p_tvout_tvif_open(struct file *file)
+{
+ mutex_lock(&s5p_tvout_tvif_mutex);
+
+ atomic_inc(&s5p_tvout_v4l2_private.tvif_use);
+
+ mutex_unlock(&s5p_tvout_tvif_mutex);
+
+ tvout_dbg("count=%d\n", atomic_read(&s5p_tvout_v4l2_private.tvif_use));
+
+ return 0;
+}
+
+static int s5p_tvout_tvif_release(struct file *file)
+{
+ tvout_dbg("count=%d\n", atomic_read(&s5p_tvout_v4l2_private.tvif_use));
+
+ mutex_lock(&s5p_tvout_tvif_mutex);
+
+ on_start_process = false;
+ on_stop_process = true;
+ tvout_dbg("on_stop_process(%d)\n", on_stop_process);
+ atomic_dec(&s5p_tvout_v4l2_private.tvif_use);
+
+ if (atomic_read(&s5p_tvout_v4l2_private.tvif_use) == 0)
+ s5p_tvif_ctrl_stop();
+
+ on_stop_process = false;
+ tvout_dbg("on_stop_process(%d)\n", on_stop_process);
+ mutex_unlock(&s5p_tvout_tvif_mutex);
+
+ return 0;
+}
+
+static struct v4l2_file_operations s5p_tvout_tvif_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_tvout_tvif_open,
+ .release = s5p_tvout_tvif_release,
+ .ioctl = s5p_tvout_tvif_ioctl
+};
+
+
+static int s5p_tvout_vo_querycap(
+ struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ strcpy(cap->driver, "s5p-tvout-vo");
+ strcpy(cap->card, "Samsung TVOUT Video Overlay");
+ cap->capabilities = V4L2_CAP_VIDEO_OVERLAY;
+ cap->version = KERNEL_VERSION(
+ MAJOR_VERSION, MINOR_VERSION, RELEASE_VERSION);
+
+ return 0;
+}
+
+static int s5p_tvout_vo_enum_fmt_type_private(
+ struct file *file, void *fh, struct v4l2_fmtdesc *f)
+{
+ int index = f->index;
+
+ if (index >= ARRAY_SIZE(s5p_tvout_vo_fmt_desc)) {
+ tvout_err("Invalid index(%d)\n", index);
+
+ return -EINVAL;
+ }
+
+ memcpy(f, &s5p_tvout_vo_fmt_desc[index], sizeof(struct v4l2_fmtdesc));
+
+ return 0;
+}
+
+static int s5p_tvout_vo_g_fmt_type_private(
+ struct file *file, void *fh, struct v4l2_format *a)
+{
+ memcpy(a->fmt.raw_data, &s5p_tvout_v4l2_private.vo_src_fmt,
+ sizeof(struct v4l2_vid_overlay_src));
+
+ return 0;
+}
+
+static int s5p_tvout_vo_s_fmt_type_private(
+ struct file *file, void *fh, struct v4l2_format *a)
+{
+ struct v4l2_vid_overlay_src vparam;
+ struct v4l2_pix_format *pix_fmt;
+ enum s5p_vp_src_color color;
+ enum s5p_vp_field field;
+ unsigned int src_vir_y_addr;
+ unsigned int src_vir_cb_addr;
+ int y_size;
+ int cbcr_size;
+ unsigned int copy_buff_idx;
+
+#if defined(CONFIG_S5P_SYSMMU_TV)
+ unsigned long base_y, base_c;
+#endif
+ memcpy(&vparam, a->fmt.raw_data, sizeof(struct v4l2_vid_overlay_src));
+
+ pix_fmt = &vparam.pix_fmt;
+
+ tvout_dbg("base_y=0x%X, base_c=0x%X, field=%d\n",
+ (u32) vparam.base_y, (u32) vparam.base_c,
+ pix_fmt->field);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ /* check progressive or not */
+ if (pix_fmt->field == V4L2_FIELD_NONE) {
+ /* progressive */
+ switch (pix_fmt->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ /* linear */
+ tvout_dbg("pixelformat=V4L2_PIX_FMT_NV12\n");
+
+ color = VP_SRC_COLOR_NV12;
+ break;
+
+ case V4L2_PIX_FMT_NV12T:
+ /* tiled */
+ tvout_dbg("pixelformat=V4L2_PIX_FMT_NV12T\n");
+ color = VP_SRC_COLOR_TILE_NV12;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ /* linear */
+ color = VP_SRC_COLOR_NV21;
+ break;
+
+ case V4L2_PIX_FMT_NV21T:
+ /* tiled */
+ color = VP_SRC_COLOR_TILE_NV21;
+ break;
+
+ default:
+ tvout_err("src img format not supported\n");
+ goto error_on_s_fmt_type_private;
+ }
+
+ field = VP_TOP_FIELD;
+ } else if ((pix_fmt->field == V4L2_FIELD_TOP) ||
+ (pix_fmt->field == V4L2_FIELD_BOTTOM)) {
+ /* interlaced */
+ switch (pix_fmt->pixelformat) {
+ case V4L2_PIX_FMT_NV12:
+ /* linear */
+ tvout_dbg("pixelformat=V4L2_PIX_FMT_NV12\n");
+ color = VP_SRC_COLOR_NV12IW;
+ break;
+
+ case V4L2_PIX_FMT_NV12T:
+ /* tiled */
+ tvout_dbg("pixelformat=V4L2_PIX_FMT_NV12T\n");
+ color = VP_SRC_COLOR_TILE_NV12IW;
+ break;
+ case V4L2_PIX_FMT_NV21:
+ /* linear */
+ color = VP_SRC_COLOR_NV21IW;
+ break;
+
+ case V4L2_PIX_FMT_NV21T:
+ /* tiled */
+ color = VP_SRC_COLOR_TILE_NV21IW;
+ break;
+
+ default:
+ tvout_err("src img format not supported\n");
+ goto error_on_s_fmt_type_private;
+ }
+
+ field = (pix_fmt->field == V4L2_FIELD_BOTTOM) ?
+ VP_BOTTOM_FIELD : VP_TOP_FIELD;
+
+ } else {
+ tvout_err("this field id not supported\n");
+
+ goto error_on_s_fmt_type_private;
+ }
+
+ s5p_tvout_v4l2_private.vo_src_fmt = vparam;
+#if defined(CONFIG_S5P_SYSMMU_TV) && defined(CONFIG_UMP_VCM_ALLOC)
+ /*
+ * For TV system mmu test using UMP and VCMM
+ * vparam.base_y : secure ID
+ * vparam.base_c : offset of base_c from base_y
+ */
+ base_y = ump_dd_dev_virtual_get_from_secure_id((unsigned int)
+ vparam.base_y);
+ base_c = base_y + (unsigned long)vparam.base_c;
+ s5p_vp_ctrl_set_src_plane(base_y, base_c, pix_fmt->width,
+ pix_fmt->height, color, field);
+#elif defined(CONFIG_S5P_SYSMMU_TV) && defined(CONFIG_S5P_VMEM)
+ /*
+ * For TV system mmu test
+ * vparam.base_y : cookie
+ * vparam.base_c : offset of base_c from base_y
+ */
+ base_y = (unsigned long) s5p_getaddress((unsigned int)vparam.base_y);
+ base_c = base_y + (unsigned long)vparam.base_c;
+ s5p_vp_ctrl_set_src_plane(base_y, base_c, pix_fmt->width,
+ pix_fmt->height, color, field);
+#else
+ if (pix_fmt->priv) {
+ copy_buff_idx = s5ptv_vp_buff.copy_buff_idxs[s5ptv_vp_buff.curr_copy_idx];
+
+ if ((void *)s5ptv_vp_buff.vp_buffs[copy_buff_idx].vir_base == NULL) {
+ s5p_vp_ctrl_set_src_plane((u32) vparam.base_y, (u32) vparam.base_c,
+ pix_fmt->width, pix_fmt->height, color, field);
+ } else {
+ if (pix_fmt->pixelformat == V4L2_PIX_FMT_NV12T
+ || pix_fmt->pixelformat == V4L2_PIX_FMT_NV21T) {
+ y_size = ALIGN(ALIGN(pix_fmt->width, 128) * ALIGN(pix_fmt->height, 32), SZ_8K);
+ cbcr_size = ALIGN(ALIGN(pix_fmt->width, 128) * ALIGN(pix_fmt->height >> 1, 32), SZ_8K);
+ } else {
+ y_size = pix_fmt->width * pix_fmt->height;
+ cbcr_size = pix_fmt->width * (pix_fmt->height >> 1);
+ }
+
+ src_vir_y_addr = (unsigned int)phys_to_virt((unsigned long)vparam.base_y);
+ src_vir_cb_addr = (unsigned int)phys_to_virt((unsigned long)vparam.base_c);
+
+ memcpy((void *)s5ptv_vp_buff.vp_buffs[copy_buff_idx].vir_base,
+ (void *)src_vir_y_addr, y_size);
+ memcpy((void *)s5ptv_vp_buff.vp_buffs[copy_buff_idx].vir_base + y_size,
+ (void *)src_vir_cb_addr, cbcr_size);
+
+ flush_all_cpu_caches();
+ outer_flush_all();
+
+ s5p_vp_ctrl_set_src_plane((u32) s5ptv_vp_buff.vp_buffs[copy_buff_idx].phy_base,
+ (u32) s5ptv_vp_buff.vp_buffs[copy_buff_idx].phy_base + y_size,
+ pix_fmt->width, pix_fmt->height, color, field);
+
+ s5ptv_vp_buff.curr_copy_idx++;
+ if (s5ptv_vp_buff.curr_copy_idx >= S5PTV_VP_BUFF_CNT - 1)
+ s5ptv_vp_buff.curr_copy_idx = 0;
+ }
+ } else {
+ s5p_vp_ctrl_set_src_plane((u32) vparam.base_y, (u32) vparam.base_c,
+ pix_fmt->width, pix_fmt->height, color, field);
+ }
+#endif
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return 0;
+
+error_on_s_fmt_type_private:
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return -1;
+}
+
+static int s5p_tvout_vo_g_fmt_vid_overlay(
+ struct file *file, void *fh, struct v4l2_format *a)
+{
+ a->fmt.win = s5p_tvout_v4l2_private.vo_dst_fmt;
+
+ return 0;
+}
+
+static int s5p_tvout_vo_s_fmt_vid_overlay(
+ struct file *file, void *fh, struct v4l2_format *a)
+{
+ struct v4l2_rect *rect = &a->fmt.win.w;
+
+ tvout_dbg("l=%d, t=%d, w=%d, h=%d, g_alpha_value=%d\n",
+ rect->left, rect->top, rect->width, rect->height,
+ a->fmt.win.global_alpha);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ s5p_tvout_v4l2_private.vo_dst_fmt = a->fmt.win;
+
+ s5p_vp_ctrl_set_dest_win_alpha_val(a->fmt.win.global_alpha);
+ s5p_vp_ctrl_set_dest_win(
+ rect->left, rect->top,
+ rect->width, rect->height);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return 0;
+}
+
+static int s5p_tvout_vo_g_crop(
+ struct file *file, void *fh, struct v4l2_crop *a)
+{
+ switch (a->type) {
+ case V4L2_BUF_TYPE_PRIVATE:
+ a->c = s5p_tvout_v4l2_private.vo_src_rect;
+ break;
+
+ default:
+ tvout_err("Invalid buf type(0x%08x)\n", a->type);
+ break;
+ }
+
+ return 0;
+}
+
+static int s5p_tvout_vo_s_crop(
+ struct file *file, void *fh, struct v4l2_crop *a)
+{
+ tvout_dbg("\n");
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ switch (a->type) {
+ case V4L2_BUF_TYPE_PRIVATE: {
+ struct v4l2_rect *rect =
+ &s5p_tvout_v4l2_private.vo_src_rect;
+
+ *rect = a->c;
+
+ tvout_dbg("l=%d, t=%d, w=%d, h=%d\n",
+ rect->left, rect->top,
+ rect->width, rect->height);
+
+ s5p_vp_ctrl_set_src_win(
+ rect->left, rect->top,
+ rect->width, rect->height);
+ break;
+ }
+ default:
+ tvout_err("Invalid buf type(0x%08x)\n", a->type);
+ break;
+ }
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return 0;
+}
+
+static int s5p_tvout_vo_g_fbuf(
+ struct file *file, void *fh, struct v4l2_framebuffer *a)
+{
+ *a = s5p_tvout_v4l2_private.vo_dst_plane;
+
+ a->capability = V4L2_FBUF_CAP_GLOBAL_ALPHA;
+
+ return 0;
+}
+
+static int s5p_tvout_vo_s_fbuf(
+ struct file *file, void *fh, struct v4l2_framebuffer *a)
+{
+ s5p_tvout_v4l2_private.vo_dst_plane = *a;
+
+ tvout_dbg("g_alpha_enable=%d, priority=%d\n",
+ (a->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) ? 1 : 0,
+ a->fmt.priv);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+
+ s5p_vp_ctrl_set_dest_win_blend(
+ (a->flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) ? 1 : 0);
+
+ s5p_vp_ctrl_set_dest_win_priority(a->fmt.priv);
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return 0;
+}
+
+static int s5p_tvout_vo_overlay(
+ struct file *file, void *fh, unsigned int i)
+{
+ tvout_dbg("%s\n", (i) ? "start" : "stop");
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_lock();
+#endif
+ if (i)
+ s5p_vp_ctrl_start();
+ else
+ s5p_vp_ctrl_stop();
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ s5p_tvout_mutex_unlock();
+#endif
+ return 0;
+}
+
+const struct v4l2_ioctl_ops s5p_tvout_vo_ioctl_ops = {
+ .vidioc_querycap = s5p_tvout_vo_querycap,
+
+ .vidioc_enum_fmt_type_private = s5p_tvout_vo_enum_fmt_type_private,
+ .vidioc_g_fmt_type_private = s5p_tvout_vo_g_fmt_type_private,
+ .vidioc_s_fmt_type_private = s5p_tvout_vo_s_fmt_type_private,
+
+ .vidioc_g_fmt_vid_overlay = s5p_tvout_vo_g_fmt_vid_overlay,
+ .vidioc_s_fmt_vid_overlay = s5p_tvout_vo_s_fmt_vid_overlay,
+
+ .vidioc_g_crop = s5p_tvout_vo_g_crop,
+ .vidioc_s_crop = s5p_tvout_vo_s_crop,
+
+ .vidioc_g_fbuf = s5p_tvout_vo_g_fbuf,
+ .vidioc_s_fbuf = s5p_tvout_vo_s_fbuf,
+
+ .vidioc_overlay = s5p_tvout_vo_overlay,
+};
+
+static int s5p_tvout_vo_open(struct file *file)
+{
+ int ret = 0;
+
+ tvout_dbg("\n");
+
+ mutex_lock(&s5p_tvout_vo_mutex);
+
+ if (atomic_read(&s5p_tvout_v4l2_private.vo_use)) {
+ tvout_err("Can't open TVOUT TVIF control\n");
+ ret = -EBUSY;
+ } else
+ atomic_inc(&s5p_tvout_v4l2_private.vo_use);
+
+ mutex_unlock(&s5p_tvout_vo_mutex);
+
+ return ret;
+}
+
+static int s5p_tvout_vo_release(struct file *file)
+{
+ tvout_dbg("\n");
+
+ s5p_vp_ctrl_stop();
+
+ s5p_mixer_ctrl_disable_layer(MIXER_VIDEO_LAYER);
+
+ atomic_dec(&s5p_tvout_v4l2_private.vo_use);
+
+ return 0;
+}
+
+static struct v4l2_file_operations s5p_tvout_vo_fops = {
+ .owner = THIS_MODULE,
+ .open = s5p_tvout_vo_open,
+ .release = s5p_tvout_vo_release,
+ .ioctl = video_ioctl2
+};
+
+
+/* dummy function for release callback of v4l2 video device */
+static void s5p_tvout_video_dev_release(struct video_device *vdev)
+{
+}
+
+static struct video_device s5p_tvout_video_dev[] = {
+ [0] = {
+ .name = "S5P TVOUT TVIF control",
+ .fops = &s5p_tvout_tvif_fops,
+ .ioctl_ops = &s5p_tvout_tvif_ioctl_ops,
+ .minor = S5P_TVOUT_TVIF_MINOR,
+ .release = s5p_tvout_video_dev_release,
+ .tvnorms = V4L2_STD_ALL_HD,
+ },
+ [1] = {
+ .name = "S5P TVOUT Video Overlay",
+ .fops = &s5p_tvout_vo_fops,
+ .ioctl_ops = &s5p_tvout_vo_ioctl_ops,
+ .release = s5p_tvout_video_dev_release,
+ .minor = S5P_TVOUT_VO_MINOR
+ }
+};
+
+int s5p_tvout_v4l2_constructor(struct platform_device *pdev)
+{
+ int i;
+
+ /* v4l2 video device registration */
+ for (i = 0; i < ARRAY_SIZE(s5p_tvout_video_dev); i++) {
+
+ if (video_register_device(
+ &s5p_tvout_video_dev[i],
+ VFL_TYPE_GRABBER,
+ s5p_tvout_video_dev[i].minor) != 0) {
+ tvout_err("Fail to register v4l2 video device\n");
+
+ return -1;
+ }
+ }
+
+ s5p_tvout_v4l2_init_private();
+
+ return 0;
+}
+
+void s5p_tvout_v4l2_destructor(void)
+{
+ mutex_destroy(&s5p_tvout_tvif_mutex);
+ mutex_destroy(&s5p_tvout_vo_mutex);
+}
diff --git a/drivers/media/video/samsung/tvout/s5p_tvout_v4l2.h b/drivers/media/video/samsung/tvout/s5p_tvout_v4l2.h
new file mode 100644
index 0000000..62a949c
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_tvout_v4l2.h
@@ -0,0 +1,19 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_tvout_v4l2.h
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Video4Linux API header file. file for Samsung TVOut driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _S5P_TVOUT_V4L2_H_
+#define _S5P_TVOUT_V4L2_H_
+
+extern int s5p_tvout_v4l2_constructor(struct platform_device *pdev);
+extern void s5p_tvout_v4l2_destructor(void);
+
+#endif /* _LINUX_S5P_TVOUT_V4L2_H_ */
diff --git a/drivers/media/video/samsung/tvout/s5p_vp_ctrl.c b/drivers/media/video/samsung/tvout/s5p_vp_ctrl.c
new file mode 100644
index 0000000..d074da3
--- /dev/null
+++ b/drivers/media/video/samsung/tvout/s5p_vp_ctrl.c
@@ -0,0 +1,742 @@
+/* linux/drivers/media/video/samsung/tvout/s5p_vp_ctrl.c
+ *
+ * Copyright (c) 2009 Samsung Electronics
+ * http://www.samsung.com/
+ *
+ * Control class functions for S5P video processor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "hw_if/hw_if.h"
+#include "s5p_tvout_ctrl.h"
+
+#if defined(CONFIG_BUSFREQ)
+#include <mach/cpufreq.h>
+#endif
+
+#define INTERLACED 0
+#define PROGRESSIVE 1
+
+struct s5p_vp_ctrl_op_mode {
+ bool ipc;
+ bool line_skip;
+ bool auto_toggling;
+};
+
+struct s5p_vp_ctrl_bc_line_eq {
+ enum s5p_vp_line_eq eq_num;
+ u32 intc;
+ u32 slope;
+};
+
+struct s5p_vp_ctrl_rect {
+ u32 x;
+ u32 y;
+ u32 w;
+ u32 h;
+};
+
+struct s5p_vp_ctrl_plane {
+ u32 top_y_addr;
+ u32 top_c_addr;
+ u32 w;
+ u32 h;
+
+ enum s5p_vp_src_color color_t;
+ enum s5p_vp_field field_id;
+ enum s5p_vp_mem_type mem_type;
+ enum s5p_vp_mem_mode mem_mode;
+};
+
+struct s5p_vp_ctrl_pp_param {
+ bool bypass;
+
+ bool csc_en;
+ enum s5p_vp_csc_type csc_t;
+ bool csc_default_coef;
+ bool csc_sub_y_offset_en;
+
+ u32 saturation;
+ u8 contrast;
+ bool brightness;
+ u32 bright_offset;
+ struct s5p_vp_ctrl_bc_line_eq bc_line_eq[8];
+
+ /* sharpness */
+ u32 th_hnoise;
+ enum s5p_vp_sharpness_control sharpness;
+
+
+ bool default_poly_filter;
+
+ enum s5p_vp_chroma_expansion chroma_exp;
+};
+
+struct s5p_vp_ctrl_mixer_param {
+ bool blend;
+ u32 alpha;
+ u32 prio;
+};
+
+struct s5p_vp_ctrl_private_data {
+ struct s5p_vp_ctrl_plane src_plane;
+ struct s5p_vp_ctrl_rect src_win;
+
+ struct s5p_vp_ctrl_rect dst_win;
+ struct s5p_vp_ctrl_op_mode op_mode;
+
+ struct s5p_vp_ctrl_pp_param pp_param;
+ struct s5p_vp_ctrl_mixer_param mixer_param;
+
+ bool running;
+
+ struct reg_mem_info reg_mem;
+
+ struct s5p_tvout_clk_info clk;
+ char *pow_name;
+
+ struct device *dev;
+};
+
+static struct s5p_vp_ctrl_private_data s5p_vp_ctrl_private = {
+ .reg_mem = {
+ .name = "s5p-vp",
+ .res = NULL,
+ .base = NULL
+ },
+
+ .clk = {
+ .name = "vp",
+ .ptr = NULL
+ },
+
+ .pow_name = "vp_pd",
+
+ .src_plane = {
+ .field_id = VP_TOP_FIELD,
+ },
+
+ .pp_param = {
+ .default_poly_filter = true,
+ .bypass = false,
+
+ .saturation = 0x80,
+ .brightness = 0x00,
+ .bright_offset = 0x00,
+ .contrast = 0x80,
+
+ .th_hnoise = 0,
+ .sharpness = VP_SHARPNESS_NO,
+
+ .chroma_exp = 0,
+
+ .csc_en = false,
+ .csc_default_coef = true,
+ .csc_sub_y_offset_en = false,
+ },
+
+ .running = false
+};
+
+extern int s5p_vp_get_top_field_address(u32* top_y_addr, u32* top_c_addr);
+
+static u8 s5p_vp_ctrl_get_src_scan_mode(void)
+{
+ struct s5p_vp_ctrl_plane *src_plane = &s5p_vp_ctrl_private.src_plane;
+ u8 ret = PROGRESSIVE;
+
+ if (src_plane->color_t == VP_SRC_COLOR_NV12IW ||
+ src_plane->color_t == VP_SRC_COLOR_TILE_NV12IW ||
+ src_plane->color_t == VP_SRC_COLOR_NV21IW ||
+ src_plane->color_t == VP_SRC_COLOR_TILE_NV21IW)
+ ret = INTERLACED;
+
+ return ret;
+}
+
+static u8 s5p_vp_ctrl_get_dest_scan_mode(
+ enum s5p_tvout_disp_mode display, enum s5p_tvout_o_mode out)
+{
+ u8 ret = PROGRESSIVE;
+
+ switch (out) {
+ case TVOUT_COMPOSITE:
+ ret = INTERLACED;
+ break;
+
+ case TVOUT_HDMI_RGB:
+ case TVOUT_HDMI:
+ case TVOUT_DVI:
+ if (display == TVOUT_1080I_60 ||
+ display == TVOUT_1080I_59 ||
+ display == TVOUT_1080I_50)
+ ret = INTERLACED;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void s5p_vp_ctrl_set_src_dst_win(
+ struct s5p_vp_ctrl_rect src_win,
+ struct s5p_vp_ctrl_rect dst_win,
+ enum s5p_tvout_disp_mode disp,
+ enum s5p_tvout_o_mode out,
+ enum s5p_vp_src_color color_t,
+ bool ipc)
+{
+ struct s5p_vp_ctrl_op_mode *op_mode = &s5p_vp_ctrl_private.op_mode;
+
+ if (s5p_vp_ctrl_get_dest_scan_mode(disp, out) == INTERLACED) {
+ if (op_mode->line_skip) {
+ src_win.y /= 2;
+ src_win.h /= 2;
+ }
+
+ dst_win.y /= 2;
+ dst_win.h /= 2;
+ } else if (s5p_vp_ctrl_get_src_scan_mode() == INTERLACED) {
+ src_win.y /= 2;
+ src_win.h /= 2;
+ }
+
+ s5p_vp_set_src_position(src_win.x, 0, src_win.y);
+ s5p_vp_set_dest_position(dst_win.x, dst_win.y);
+ s5p_vp_set_src_dest_size(
+ src_win.w, src_win.h, dst_win.w, dst_win.h, ipc);
+}
+
+int s5p_vp_ctrl_get_src_addr(u32* top_y_addr, u32* top_c_addr)
+{
+ if (s5p_vp_ctrl_private.running)
+ s5p_vp_get_top_field_address(top_y_addr, top_c_addr);
+ else {
+ *top_y_addr = 0;
+ *top_c_addr = 0;
+ }
+
+ return 0;
+}
+
+static int s5p_vp_ctrl_set_src_addr(
+ u32 top_y_addr, u32 top_c_addr,
+ u32 img_w, enum s5p_vp_src_color color_t)
+{
+ if (s5p_vp_set_top_field_address(top_y_addr, top_c_addr))
+ return -1;
+
+ if (s5p_vp_ctrl_get_src_scan_mode() == INTERLACED) {
+ u32 bot_y = 0;
+ u32 bot_c = 0;
+
+ if (color_t == VP_SRC_COLOR_NV12IW ||
+ color_t == VP_SRC_COLOR_NV21IW) {
+ bot_y = top_y_addr + img_w;
+ bot_c = top_c_addr + img_w;
+ } else if (color_t == VP_SRC_COLOR_TILE_NV12IW ||
+ color_t == VP_SRC_COLOR_TILE_NV21IW) {
+ bot_y = top_y_addr + 0x40;
+ bot_c = top_c_addr + 0x40;
+ }
+
+ if (s5p_vp_set_bottom_field_address(bot_y, bot_c))
+ return -1;
+ }
+
+ return 0;
+}
+
+static void s5p_vp_ctrl_init_private(void)
+{
+ int i;
+ struct s5p_vp_ctrl_pp_param *pp_param = &s5p_vp_ctrl_private.pp_param;
+
+ for (i = 0; i < 8; i++)
+ pp_param->bc_line_eq[i].eq_num = VP_LINE_EQ_DEFAULT;
+}
+
+static int s5p_vp_ctrl_set_reg(void)
+{
+ int i;
+ int ret = 0;
+
+ enum s5p_tvout_disp_mode tv_std;
+ enum s5p_tvout_o_mode tv_if;
+
+ struct s5p_vp_ctrl_plane *src_plane = &s5p_vp_ctrl_private.src_plane;
+ struct s5p_vp_ctrl_pp_param *pp_param = &s5p_vp_ctrl_private.pp_param;
+ struct s5p_vp_ctrl_op_mode *op_mode = &s5p_vp_ctrl_private.op_mode;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ } else
+#endif
+ {
+ s5p_tvif_ctrl_get_std_if(&tv_std, &tv_if);
+
+ s5p_vp_sw_reset();
+
+ s5p_vp_set_endian(TVOUT_BIG_ENDIAN);
+
+ s5p_vp_set_op_mode(
+ op_mode->line_skip, src_plane->mem_type,
+ src_plane->mem_mode, pp_param->chroma_exp,
+ op_mode->auto_toggling);
+
+ s5p_vp_set_field_id(src_plane->field_id);
+
+ s5p_vp_set_img_size(src_plane->w, src_plane->h);
+
+ s5p_vp_ctrl_set_src_addr(
+ src_plane->top_y_addr, src_plane->top_c_addr,
+ src_plane->w, src_plane->color_t);
+
+ s5p_vp_ctrl_set_src_dst_win(
+ s5p_vp_ctrl_private.src_win,
+ s5p_vp_ctrl_private.dst_win,
+ tv_std,
+ tv_if,
+ s5p_vp_ctrl_private.src_plane.color_t,
+ op_mode->ipc);
+
+ if (pp_param->default_poly_filter)
+ s5p_vp_set_poly_filter_coef_default(
+ s5p_vp_ctrl_private.src_win.w,
+ s5p_vp_ctrl_private.src_win.h,
+ s5p_vp_ctrl_private.dst_win.w,
+ s5p_vp_ctrl_private.dst_win.h,
+ op_mode->ipc);
+
+ s5p_vp_set_bypass_post_process(pp_param->bypass);
+ s5p_vp_set_sharpness(pp_param->th_hnoise, pp_param->sharpness);
+ s5p_vp_set_saturation(pp_param->saturation);
+ s5p_vp_set_brightness_contrast(
+ pp_param->brightness, pp_param->contrast);
+
+ for (i = VP_LINE_EQ_0; i <= VP_LINE_EQ_7; i++) {
+ if (pp_param->bc_line_eq[i].eq_num == i)
+ ret = s5p_vp_set_brightness_contrast_control(
+ pp_param->bc_line_eq[i].eq_num,
+ pp_param->bc_line_eq[i].intc,
+ pp_param->bc_line_eq[i].slope);
+
+ if (ret != 0)
+ return -1;
+ }
+
+ s5p_vp_set_brightness_offset(pp_param->bright_offset);
+
+ s5p_vp_set_csc_control(
+ pp_param->csc_sub_y_offset_en,
+ pp_param->csc_en);
+
+ if (pp_param->csc_en && pp_param->csc_default_coef) {
+ if (s5p_vp_set_csc_coef_default(pp_param->csc_t))
+ return -1;
+ }
+
+ if (s5p_vp_start())
+ return -1;
+
+ }
+
+ s5p_mixer_ctrl_enable_layer(MIXER_VIDEO_LAYER);
+
+ mdelay(50);
+
+ return 0;
+}
+
+static void s5p_vp_ctrl_internal_stop(void)
+{
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ } else
+#endif
+ s5p_vp_stop();
+
+ s5p_mixer_ctrl_disable_layer(MIXER_VIDEO_LAYER);
+}
+
+static void s5p_vp_ctrl_clock(bool on)
+{
+ if (on) {
+#ifdef CONFIG_ARCH_EXYNOS4
+ s5p_tvout_pm_runtime_get();
+#endif
+ clk_enable(s5p_vp_ctrl_private.clk.ptr);
+ // Restore vp_base address
+ s5p_vp_init(s5p_vp_ctrl_private.reg_mem.base);
+
+ } else {
+ clk_disable(s5p_vp_ctrl_private.clk.ptr);
+#ifdef CONFIG_ARCH_EXYNOS4
+ s5p_tvout_pm_runtime_put();
+#endif
+ // Set vp_base to NULL
+ s5p_vp_init(NULL);
+ }
+}
+
+
+
+void s5p_vp_ctrl_set_src_plane(
+ u32 base_y, u32 base_c, u32 width, u32 height,
+ enum s5p_vp_src_color color, enum s5p_vp_field field)
+{
+ struct s5p_vp_ctrl_plane *src_plane = &s5p_vp_ctrl_private.src_plane;
+
+ src_plane->color_t = color;
+ src_plane->field_id = field;
+
+ src_plane->top_y_addr = base_y;
+ src_plane->top_c_addr = base_c;
+
+ src_plane->w = width;
+ src_plane->h = height;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return;
+ }
+#endif
+ if (s5p_vp_ctrl_private.running) {
+ s5p_vp_set_img_size(width, height);
+
+ s5p_vp_set_field_id(field);
+ s5p_vp_ctrl_set_src_addr(base_y, base_c, width, color);
+
+ s5p_vp_update();
+ }
+}
+
+void s5p_vp_ctrl_set_src_win(u32 left, u32 top, u32 width, u32 height)
+{
+ struct s5p_vp_ctrl_rect *src_win = &s5p_vp_ctrl_private.src_win;
+
+ src_win->x = left;
+ src_win->y = top;
+ src_win->w = width;
+ src_win->h = height;
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return;
+ }
+#endif
+ if (s5p_vp_ctrl_private.running) {
+ enum s5p_tvout_disp_mode tv_std;
+ enum s5p_tvout_o_mode tv_if;
+
+ s5p_tvif_ctrl_get_std_if(&tv_std, &tv_if);
+
+ s5p_vp_ctrl_set_src_dst_win(
+ *src_win,
+ s5p_vp_ctrl_private.dst_win,
+ tv_std,
+ tv_if,
+ s5p_vp_ctrl_private.src_plane.color_t,
+ s5p_vp_ctrl_private.op_mode.ipc);
+
+ s5p_vp_update();
+ }
+}
+
+void s5p_vp_ctrl_set_dest_win(u32 left, u32 top, u32 width, u32 height)
+{
+ struct s5p_vp_ctrl_rect *dst_win = &s5p_vp_ctrl_private.dst_win;
+
+ dst_win->x = left;
+ dst_win->y = top;
+ dst_win->w = width;
+ dst_win->h = height;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return;
+ }
+#endif
+ if (s5p_vp_ctrl_private.running) {
+ enum s5p_tvout_disp_mode tv_std;
+ enum s5p_tvout_o_mode tv_if;
+
+ s5p_tvif_ctrl_get_std_if(&tv_std, &tv_if);
+
+ s5p_vp_ctrl_set_src_dst_win(
+ s5p_vp_ctrl_private.src_win,
+ *dst_win,
+ tv_std,
+ tv_if,
+ s5p_vp_ctrl_private.src_plane.color_t,
+ s5p_vp_ctrl_private.op_mode.ipc);
+
+ s5p_vp_update();
+ }
+}
+
+void s5p_vp_ctrl_set_dest_win_alpha_val(u32 alpha)
+{
+ s5p_vp_ctrl_private.mixer_param.alpha = alpha;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return;
+ }
+#endif
+ s5p_mixer_ctrl_set_alpha(MIXER_VIDEO_LAYER, alpha);
+}
+
+void s5p_vp_ctrl_set_dest_win_blend(bool enable)
+{
+ s5p_vp_ctrl_private.mixer_param.blend = enable;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return;
+ }
+#endif
+ s5p_mixer_ctrl_set_blend_mode(MIXER_VIDEO_LAYER,
+ LAYER_BLENDING);
+}
+
+void s5p_vp_ctrl_set_dest_win_priority(u32 prio)
+{
+ s5p_vp_ctrl_private.mixer_param.prio = prio;
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ return;
+ }
+#endif
+ s5p_mixer_ctrl_set_priority(MIXER_VIDEO_LAYER, prio);
+}
+
+void s5p_vp_ctrl_stop(void)
+{
+ if (s5p_vp_ctrl_private.running) {
+ s5p_vp_ctrl_internal_stop();
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ } else
+#endif
+ {
+ s5p_vp_ctrl_clock(0);
+ }
+
+ s5p_vp_ctrl_private.running = false;
+#if defined(CONFIG_BUSFREQ) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ exynos4_busfreq_lock_free(DVFS_LOCK_ID_TV);
+#endif
+ }
+}
+
+int s5p_vp_ctrl_start(void)
+{
+ struct s5p_vp_ctrl_plane *src_plane = &s5p_vp_ctrl_private.src_plane;
+ enum s5p_tvout_disp_mode disp;
+ enum s5p_tvout_o_mode out;
+
+ struct s5p_vp_ctrl_rect *src_win = &s5p_vp_ctrl_private.src_win;
+ struct s5p_vp_ctrl_rect *dst_win = &s5p_vp_ctrl_private.dst_win;
+
+ bool i_mode, o_mode; /* 0 for interlaced, 1 for progressive */
+
+ s5p_tvif_ctrl_get_std_if(&disp, &out);
+
+ switch (disp) {
+ case TVOUT_480P_60_16_9:
+ case TVOUT_480P_60_4_3:
+ case TVOUT_576P_50_16_9:
+ case TVOUT_576P_50_4_3:
+ case TVOUT_480P_59:
+ s5p_vp_ctrl_private.pp_param.csc_t = VP_CSC_SD_HD;
+ break;
+
+ case TVOUT_1080I_50:
+ case TVOUT_1080I_60:
+ case TVOUT_1080P_50:
+ case TVOUT_1080P_30:
+ case TVOUT_1080P_60:
+ case TVOUT_720P_59:
+ case TVOUT_1080I_59:
+ case TVOUT_1080P_59:
+ case TVOUT_720P_50:
+ case TVOUT_720P_60:
+ s5p_vp_ctrl_private.pp_param.csc_t = VP_CSC_HD_SD;
+ break;
+#ifdef CONFIG_HDMI_14A_3D
+ case TVOUT_720P_60_SBS_HALF:
+ case TVOUT_720P_59_SBS_HALF:
+ case TVOUT_720P_50_TB:
+ case TVOUT_1080P_24_TB:
+ case TVOUT_1080P_23_TB:
+ s5p_vp_ctrl_private.pp_param.csc_t = VP_CSC_HD_SD;
+ break;
+
+#endif
+
+ default:
+ break;
+ }
+
+ i_mode = s5p_vp_ctrl_get_src_scan_mode();
+ o_mode = s5p_vp_ctrl_get_dest_scan_mode(disp, out);
+
+ /* check o_mode */
+ if (i_mode == INTERLACED) {
+ if (o_mode == INTERLACED) {
+ /* i to i : line skip 1, ipc 0, auto toggle 0 */
+ s5p_vp_ctrl_private.op_mode.line_skip = true;
+ s5p_vp_ctrl_private.op_mode.ipc = false;
+ s5p_vp_ctrl_private.op_mode.auto_toggling = false;
+ } else {
+ /* i to p : line skip 1, ipc 1, auto toggle 0 */
+ s5p_vp_ctrl_private.op_mode.line_skip = true;
+ s5p_vp_ctrl_private.op_mode.ipc = true;
+ s5p_vp_ctrl_private.op_mode.auto_toggling = false;
+ }
+ } else {
+ if (o_mode == INTERLACED) {
+ /* p to i : line skip 0, ipc 0, auto toggle 0 */
+ if (dst_win->h > src_win->h &&
+ ((dst_win->h << 16)/src_win->h < 0x100000))
+ s5p_vp_ctrl_private.op_mode.line_skip = false;
+ /* p to i : line skip 1, ipc 0, auto toggle 0 */
+ else
+ s5p_vp_ctrl_private.op_mode.line_skip = true;
+ s5p_vp_ctrl_private.op_mode.ipc = false;
+ s5p_vp_ctrl_private.op_mode.auto_toggling = false;
+ } else {
+ /* p to p : line skip 0, ipc 0, auto toggle 0 */
+ s5p_vp_ctrl_private.op_mode.line_skip = false;
+ s5p_vp_ctrl_private.op_mode.ipc = false;
+ s5p_vp_ctrl_private.op_mode.auto_toggling = false;
+ }
+ }
+ src_plane->mem_type
+ = ((src_plane->color_t == VP_SRC_COLOR_NV12) ||
+ (src_plane->color_t == VP_SRC_COLOR_NV12IW) ||
+ (src_plane->color_t == VP_SRC_COLOR_TILE_NV12) ||
+ (src_plane->color_t == VP_SRC_COLOR_TILE_NV12IW)) ?
+ VP_YUV420_NV12 : VP_YUV420_NV21;
+
+ src_plane->mem_mode
+ = ((src_plane->color_t == VP_SRC_COLOR_NV12) ||
+ (src_plane->color_t == VP_SRC_COLOR_NV12IW) ||
+ (src_plane->color_t == VP_SRC_COLOR_NV21) ||
+ (src_plane->color_t == VP_SRC_COLOR_NV21IW)) ?
+ VP_LINEAR_MODE : VP_2D_TILE_MODE;
+
+ if (s5p_vp_ctrl_private.running)
+ s5p_vp_ctrl_internal_stop();
+ else {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ if (suspend_status) {
+ tvout_dbg("driver is suspend_status\n");
+ } else
+#endif
+ {
+#if defined(CONFIG_BUSFREQ) || defined(CONFIG_BUSFREQ_LOCK_WRAPPER)
+ if ((disp == TVOUT_1080P_60) || (disp == TVOUT_1080P_59)
+ || (disp == TVOUT_1080P_50)) {
+ if (exynos4_busfreq_lock(DVFS_LOCK_ID_TV, BUS_L0))
+ printk(KERN_ERR "%s: failed lock DVFS\n", __func__);
+ }
+#endif
+ s5p_vp_ctrl_clock(1);
+ }
+ s5p_vp_ctrl_private.running = true;
+ }
+ s5p_vp_ctrl_set_reg();
+
+ return 0;
+}
+
+int s5p_vp_ctrl_constructor(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = s5p_tvout_map_resource_mem(
+ pdev,
+ s5p_vp_ctrl_private.reg_mem.name,
+ &(s5p_vp_ctrl_private.reg_mem.base),
+ &(s5p_vp_ctrl_private.reg_mem.res));
+
+ if (ret)
+ goto err_on_res;
+
+ s5p_vp_ctrl_private.clk.ptr =
+ clk_get(&pdev->dev, s5p_vp_ctrl_private.clk.name);
+
+ if (IS_ERR(s5p_vp_ctrl_private.clk.ptr)) {
+ tvout_err("Failed to find clock %s\n",
+ s5p_vp_ctrl_private.clk.name);
+ ret = -ENOENT;
+ goto err_on_clk;
+ }
+
+ s5p_vp_init(s5p_vp_ctrl_private.reg_mem.base);
+ s5p_vp_ctrl_init_private();
+
+ return 0;
+
+err_on_clk:
+ iounmap(s5p_vp_ctrl_private.reg_mem.base);
+ release_resource(s5p_vp_ctrl_private.reg_mem.res);
+ kfree(s5p_vp_ctrl_private.reg_mem.res);
+
+err_on_res:
+ return ret;
+}
+
+void s5p_vp_ctrl_destructor(void)
+{
+ if (s5p_vp_ctrl_private.reg_mem.base)
+ iounmap(s5p_vp_ctrl_private.reg_mem.base);
+
+ if (s5p_vp_ctrl_private.reg_mem.res) {
+ release_resource(s5p_vp_ctrl_private.reg_mem.res);
+ kfree(s5p_vp_ctrl_private.reg_mem.res);
+ }
+
+ if (s5p_vp_ctrl_private.clk.ptr) {
+ if (s5p_vp_ctrl_private.running)
+ clk_disable(s5p_vp_ctrl_private.clk.ptr);
+ clk_put(s5p_vp_ctrl_private.clk.ptr);
+ }
+}
+
+void s5p_vp_ctrl_suspend(void)
+{
+ tvout_dbg("running(%d)\n", s5p_vp_ctrl_private.running);
+ if (s5p_vp_ctrl_private.running) {
+ s5p_vp_stop();
+ s5p_vp_ctrl_clock(0);
+ }
+}
+
+void s5p_vp_ctrl_resume(void)
+{
+ tvout_dbg("running(%d)\n", s5p_vp_ctrl_private.running);
+ if (s5p_vp_ctrl_private.running) {
+ s5p_vp_ctrl_clock(1);
+ s5p_vp_ctrl_set_reg();
+ }
+}
diff --git a/drivers/media/video/samsung/ump/Kconfig b/drivers/media/video/samsung/ump/Kconfig
new file mode 100644
index 0000000..aaae26e
--- /dev/null
+++ b/drivers/media/video/samsung/ump/Kconfig
@@ -0,0 +1,51 @@
+
+#
+## S3C Multimedia Mali configuration
+##
+#
+# For UMP
+config VIDEO_UMP
+ bool "Enable UMP(Unified Memory Provider)"
+ default y
+ ---help---
+ This enables UMP memory provider
+
+config UMP_VCM_ALLOC
+ depends on VIDEO_UMP && VCM
+ default y
+ bool "Enable ump-vcm(virtual contiguous memory) memory"
+ help
+ Use VCM(virtual-contiguous-memory) to allocate physical memory.
+
+choice
+depends on VIDEO_UMP
+prompt "UMP MEMEMORY OPTION"
+default UMP_OSMEM_ONLY
+config UMP_DED_ONLY
+ bool "ump dedicated memory only"
+ ---help---
+ This enables UMP dedicated memory only option
+config UMP_OSMEM_ONLY
+ bool "ump OS memory only"
+ ---help---
+ This enables UMP OS memory only option
+config UMP_VCM_ONLY
+ bool "ump VCM memory"
+ ---help---
+ This enables UMP VCM memory only option
+
+endchoice
+config UMP_MEM_SIZE
+int "UMP Memory Size"
+ depends on VIDEO_UMP
+ default "512"
+ ---help---
+ This value is dedicated memory size of UMP (unit is MByte).
+# For UMP_DEBUG
+config VIDEO_UMP_DEBUG
+ bool "Enables debug messages"
+ depends on VIDEO_UMP
+ default n
+ help
+ This enables UMP driver debug messages.
+
diff --git a/drivers/media/video/samsung/ump/Makefile b/drivers/media/video/samsung/ump/Makefile
new file mode 100644
index 0000000..f429c40
--- /dev/null
+++ b/drivers/media/video/samsung/ump/Makefile
@@ -0,0 +1,94 @@
+#
+# Copyright (C) 2010 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+ifeq ($(CONFIG_UMP_DED_ONLY),y)
+UMP_MEM_SIZE= $(CONFIG_UMP_MEM_SIZE)
+USING_MEMORY=0
+endif
+
+ifeq ($(CONFIG_UMP_OSMEM_ONLY),y)
+UMP_MEM_SIZE= $(CONFIG_UMP_MEM_SIZE)
+USING_MEMORY=1
+endif
+
+ifeq ($(CONFIG_UMP_VCM_ONLY),y)
+UMP_MEM_SIZE= $(CONFIG_UMP_MEM_SIZE)
+USING_MEMORY=2
+endif
+
+
+# For UMP Debug
+ifeq ($(CONFIG_VIDEO_UMP_DEBUG),y)
+DEFINES += -DDEBUG
+endif
+
+# Set up our defines, which will be passed to gcc
+DEFINES += -DKERNEL_BUILTIN=1
+DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER
+DEFINES += -DUSING_MEMORY=$(USING_MEMORY)
+DEFINES += -DUMP_MEM_SIZE=$(UMP_MEM_SIZE)
+DEFINES += -DMALI_STATE_TRACKING=1
+
+UDD_FILE_PREFIX := drivers/media/video/samsung/ump/
+KBUILDROOT =
+
+# linux build system integration
+
+obj-$(CONFIG_VIDEO_UMP) += ump.o
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+INCLUDES += \
+ -I$(UDD_FILE_PREFIX)\
+ -I$(UDD_FILE_PREFIX)common\
+ -I$(UDD_FILE_PREFIX)linux\
+ -I$(UDD_FILE_PREFIX)include\
+ -I$(UDD_FILE_PREFIX)linux/license/gpl/\
+ -I$(UDD_FILE_PREFIX)../mali/common\
+ -I$(UDD_FILE_PREFIX)../mali/linux
+
+OSKFILES+=\
+ $(KBUILDROOT)../mali/linux/mali_osk_atomics.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_locks.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_math.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_memory.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_misc.o
+
+ump-y := \
+ $(KBUILDROOT)linux/ump_kernel_linux.o \
+ $(KBUILDROOT)linux/ump_kernel_memory_backend_os.o \
+ $(KBUILDROOT)linux/ump_kernel_memory_backend_dedicated.o \
+ $(KBUILDROOT)linux/ump_memory_backend.o \
+ $(KBUILDROOT)linux/ump_ukk_wrappers.o \
+ $(KBUILDROOT)linux/ump_ukk_ref_wrappers.o \
+ $(KBUILDROOT)linux/ump_osk_atomics.o \
+ $(KBUILDROOT)linux/ump_osk_low_level_mem.o \
+ $(KBUILDROOT)linux/ump_osk_misc.o \
+ $(KBUILDROOT)common/ump_kernel_common.o \
+ $(KBUILDROOT)common/ump_kernel_descriptor_mapping.o \
+ $(KBUILDROOT)common/ump_kernel_api.o \
+ $(KBUILDROOT)common/ump_kernel_ref_drv.o\
+ $(OSKFILES)
+
+ump-$(CONFIG_UMP_VCM_ALLOC) += \
+ $(KBUILDROOT)linux/ump_kernel_memory_backend_vcm.o \
+
+EXTRA_CFLAGS += $(INCLUDES) \
+ $(DEFINES)
+
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qv exported && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
diff --git a/drivers/media/video/samsung/ump/Makefile.common b/drivers/media/video/samsung/ump/Makefile.common
new file mode 100644
index 0000000..4b5db24
--- /dev/null
+++ b/drivers/media/video/samsung/ump/Makefile.common
@@ -0,0 +1,17 @@
+#
+# Copyright (C) 2010 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+UMP_FILE_PREFIX = ./drivers/video/samsung/ump
+
+SRC = $(UMP_FILE_PREFIX)/common/ump_kernel_common.c \
+ $(UMP_FILE_PREFIX)/common/ump_kernel_descriptor_mapping.c \
+ $(UMP_FILE_PREFIX)/common/ump_kernel_api.c \
+ $(UMP_FILE_PREFIX)/common/ump_kernel_ref_drv.c
+
diff --git a/drivers/media/video/samsung/ump/Makefile_backup b/drivers/media/video/samsung/ump/Makefile_backup
new file mode 100644
index 0000000..632cb0c
--- /dev/null
+++ b/drivers/media/video/samsung/ump/Makefile_backup
@@ -0,0 +1,80 @@
+#
+# Copyright (C) 2010 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+BUILD ?= debug
+USING_MALI400 ?= 1
+USING_ZBT ?= 0
+USING_MMU ?= 1
+USING_UMP ?= 1
+CPU ?= vega1
+CONFIG ?= marcopolo-vega1-m400
+
+
+# Set up our defines, which will be passed to gcc
+DEFINES += -DUSING_MALI400=$(USING_MALI400)
+DEFINES += -DUSING_ZBT=$(USING_ZBT)
+DEFINES += -DUSING_MMU=$(USING_MMU)
+DEFINES += -DUSING_UMP=$(USING_UMP)
+DEFINES += -DMALI_USE_UNIFIED_MEMORY_PROVIDER
+ifeq ($(BUILD), debug)
+DEFINES += -DDEBUG
+endif
+
+
+UMP_FILE_PREFIX := drivers/video/samsung/ump
+UDD_FILE_PREFIX := drivers/video/samsung/mali
+KBUILDROOT =
+
+# linux build system integration
+
+obj-y += ump.o
+
+# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
+# The ARM proprietary product will only include the license/proprietary directory
+# The GPL product will only include the license/gpl directory
+
+INCLUDES = \
+ -I$(UMP_FILE_PREFIX)\
+ -I$(UMP_FILE_PREFIX)/common\
+ -I$(UMP_FILE_PREFIX)/linux\
+ -I$(UMP_FILE_PREFIX)/include\
+ -I$(UMP_FILE_PREFIX)/linux/license/proprietary/\
+ -I$(UDD_FILE_PREFIX)/common\
+ -I$(UDD_FILE_PREFIX)/linux
+
+ump-y := \
+ $(KBUILDROOT)linux/ump_kernel_linux.o \
+ $(KBUILDROOT)linux/ump_kernel_memory_backend_os.o \
+ $(KBUILDROOT)linux/ump_kernel_memory_backend_dedicated.o \
+ $(KBUILDROOT)linux/ump_memory_backend.o \
+ $(KBUILDROOT)linux/ump_ukk_wrappers.o \
+ $(KBUILDROOT)linux/ump_ukk_ref_wrappers.o \
+ $(KBUILDROOT)linux/ump_osk_atomics.o \
+ $(KBUILDROOT)linux/ump_osk_low_level_mem.o \
+ $(KBUILDROOT)linux/ump_osk_misc.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_atomics.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_locks.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_memory.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_math.o \
+ $(KBUILDROOT)../mali/linux/mali_osk_misc.o \
+ $(KBUILDROOT)common/ump_kernel_common.o \
+ $(KBUILDROOT)common/ump_kernel_descriptor_mapping.o \
+ $(KBUILDROOT)common/ump_kernel_api.o \
+ $(KBUILDROOT)common/ump_kernel_ref_drv.o
+
+EXTRA_CFLAGS += $(INCLUDES) \
+ $(DEFINES)
+
+
+# Get subversion revision number, fall back to 0000 if no svn info is available
+SVN_REV:=$(shell ((svnversion | grep -qv exported && echo -n 'Revision: ' && svnversion) || git svn info | sed -e 's/$$$$/M/' | grep '^Revision: ' || echo ${MALI_RELEASE_NAME}) 2>/dev/null | sed -e 's/^Revision: //')
+
+EXTRA_CFLAGS += -DSVN_REV=$(SVN_REV)
+EXTRA_CFLAGS += -DSVN_REV_STRING=\"$(SVN_REV)\"
+
diff --git a/drivers/media/video/samsung/ump/arch b/drivers/media/video/samsung/ump/arch
new file mode 120000
index 0000000..a65a3fc
--- /dev/null
+++ b/drivers/media/video/samsung/ump/arch
@@ -0,0 +1 @@
+./arch-orion-m400 \ No newline at end of file
diff --git a/drivers/media/video/samsung/ump/arch-marcopolo-vega1-m400/config.h b/drivers/media/video/samsung/ump/arch-marcopolo-vega1-m400/config.h
new file mode 100644
index 0000000..014c4bb
--- /dev/null
+++ b/drivers/media/video/samsung/ump/arch-marcopolo-vega1-m400/config.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0x2C000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 0x04000000
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/media/video/samsung/ump/arch-marcopolo-vega1-m400/config.h.org b/drivers/media/video/samsung/ump/arch-marcopolo-vega1-m400/config.h.org
new file mode 100755
index 0000000..c92a32a
--- /dev/null
+++ b/drivers/media/video/samsung/ump/arch-marcopolo-vega1-m400/config.h.org
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Configuration for the EB platform with ZBT memory enabled */
+#define MALI_BASE_ADDR 0xf0000000
+#define GP_ADDR MALI_BASE_ADDR
+#define L2_ADDR MALI_BASE_ADDR+0x1000
+#define PMU_ADDR MALI_BASE_ADDR+0x2000
+#define GP_MMU_ADDR MALI_BASE_ADDR+0x3000
+#define PP_MMU_ADDR MALI_BASE_ADDR+0x4000
+#define PP_ADDR MALI_BASE_ADDR+0x8000
+
+// See 3-11 page in trm. It describes control register address map. cglee
+
+static _mali_osk_resource_t arch_configuration [] =
+{
+ {
+ .type = MALI400GP,
+ .description = "Mali-400 GP",
+ .base = GP_ADDR,
+ .irq = 18+32,
+ .mmu_id = 1
+ },
+ {
+ .type = MALI400PP,
+ .base = PP_ADDR,
+ .irq = 16+32,
+ .description = "Mali-400 PP 0",
+ .mmu_id = 2
+ },
+#if USING_MMU
+ {
+ .type = MMU,
+ .base = GP_MMU_ADDR,
+ .irq = 19+32,
+ .description = "Mali-400 MMU for GP",
+ .mmu_id = 1
+ },
+ {
+ .type = MMU,
+ .base = PP_MMU_ADDR,
+ .irq = 17+32,
+ .description = "Mali-400 MMU for PP 0",
+ .mmu_id = 2
+ },
+ {
+ .type = OS_MEMORY,
+ .description = "System Memory",
+ .size = 0x06000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE | _MALI_GP_READABLE | _MALI_GP_WRITEABLE
+ },
+ {
+ .type = MEM_VALIDATION,
+ .description = "memory validation",
+ .base = 0x204e0000,
+ .size = 0x7B20000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE | _MALI_GP_READABLE | _MALI_GP_WRITEABLE |
+ _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE
+ },
+#else
+ {
+ .type = MEMORY,
+ .description = "Dedicated Memory",
+ .base = 0x2E000000,
+ .size = 0x02000000,
+ .flags = _MALI_CPU_WRITEABLE | _MALI_CPU_READABLE | _MALI_PP_READABLE | _MALI_PP_WRITEABLE | _MALI_GP_READABLE | _MALI_GP_WRITEABLE |
+ _MALI_MMU_READABLE | _MALI_MMU_WRITEABLE
+ },
+#endif
+ {
+ .type = MALI400L2,
+ .base = L2_ADDR,
+ .description = "Mali-400 L2 cache"
+ },
+};
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/media/video/samsung/ump/arch-orion-m400/config.h b/drivers/media/video/samsung/ump/arch-orion-m400/config.h
new file mode 100644
index 0000000..117cc6e
--- /dev/null
+++ b/drivers/media/video/samsung/ump/arch-orion-m400/config.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_UMP_H__
+#define __ARCH_CONFIG_UMP_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT USING_MEMORY
+#if (USING_MEMORY == 0) /* Dedicated Memory */
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0x2C000000
+#else
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0
+#endif
+
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT UMP_MEM_SIZE*1024*1024
+#endif /* __ARCH_CONFIG_UMP_H__ */
diff --git a/drivers/media/video/samsung/ump/arch-pb-virtex5/config.h b/drivers/media/video/samsung/ump/arch-pb-virtex5/config.h
new file mode 100644
index 0000000..560eda9
--- /dev/null
+++ b/drivers/media/video/samsung/ump/arch-pb-virtex5/config.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0xC8000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 32UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
diff --git a/drivers/media/video/samsung/ump/common/ump_kernel_api.c b/drivers/media/video/samsung/ump/common/ump_kernel_api.c
new file mode 100644
index 0000000..ddc9ef7
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_kernel_api.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_common.h"
+
+
+
+/* ---------------- UMP kernel space API functions follows ---------------- */
+
+
+
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning secure ID. ID: %u\n", mem->secure_id));
+
+ return mem->secure_id;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
+{
+ ump_dd_mem * mem;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+ if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ ump_dd_reference_add(mem);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return (ump_dd_handle)mem;
+}
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_get(ump_secure_id secure_id)
+{
+ ump_dd_mem * mem;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
+ if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return (ump_dd_handle)mem;
+}
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*) memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ return mem->nr_blocks;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (blocks == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_blocks_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (mem->nr_blocks != num_blocks)
+ {
+ DBG_MSG(1, ("Specified number of blocks do not match actual number of blocks\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u\n", mem->secure_id));
+
+ _mali_osk_memcpy(blocks, mem->block_array, sizeof(ump_dd_physical_block) * mem->nr_blocks);
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
+{
+ ump_dd_mem * mem = (ump_dd_mem *)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ if (block == NULL)
+ {
+ DBG_MSG(1, ("NULL parameter in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ if (index >= mem->nr_blocks)
+ {
+ DBG_MSG(5, ("Invalid index specified in ump_dd_phys_block_get()\n"));
+ return UMP_DD_INVALID;
+ }
+
+ DBG_MSG(5, ("Returning physical block information. ID: %u, index: %lu\n", mem->secure_id, index));
+
+ *block = mem->block_array[index];
+
+ return UMP_DD_SUCCESS;
+}
+
+
+
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ DBG_MSG(5, ("Returning size. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return mem->size_bytes;
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
+{
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+ int new_ref;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ new_ref = _ump_osk_atomic_inc_and_read(&mem->ref_count);
+
+ DBG_MSG(4, ("Memory reference incremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+}
+
+
+
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
+{
+ int new_ref;
+ ump_dd_mem * mem = (ump_dd_mem*)memh;
+
+ DEBUG_ASSERT_POINTER(mem);
+
+ /* We must hold this mutex while doing the atomic_dec_and_read, to protect
+ that elements in the ump_descriptor_mapping table is always valid. If they
+ are not, userspace may accidently map in this secure_ids right before its freed
+ giving a mapped backdoor into unallocated memory.*/
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+
+ DBG_MSG(4, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
+
+ if (0 == new_ref)
+ {
+ DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+ ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ mem->release_func(mem->ctx, mem);
+ _mali_osk_free(mem);
+ }
+ else
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
+
+
+
+/* --------------- Handling of user space requests follows --------------- */
+
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
+{
+ ump_session_data * session_data;
+
+ DEBUG_ASSERT_POINTER( args );
+ DEBUG_ASSERT_POINTER( args->ctx );
+
+ session_data = (ump_session_data *)args->ctx;
+
+ /* check compatability */
+ if (args->version == UMP_IOCTL_API_VERSION)
+ {
+ DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else if (args->version == MAKE_VERSION_ID(1))
+ {
+ DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
+ args->compatible = 1;
+ session_data->api_version = args->version;
+ }
+ else
+ {
+ DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+ args->compatible = 0;
+ args->version = UMP_IOCTL_API_VERSION; /* report our version */
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
+{
+ ump_session_memory_list_element * session_memory_element;
+ ump_session_memory_list_element * tmp;
+ ump_session_data * session_data;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
+ int secure_id;
+
+ DEBUG_ASSERT_POINTER( release_info );
+ DEBUG_ASSERT_POINTER( release_info->ctx );
+
+ /* Retreive the session data */
+ session_data = (ump_session_data*)release_info->ctx;
+
+ /* If there are many items in the memory session list we
+ * could be de-referencing this pointer a lot so keep a local copy
+ */
+ secure_id = release_info->secure_id;
+
+ DBG_MSG(4, ("Releasing memory with IOCTL, ID: %u\n", secure_id));
+
+ /* Iterate through the memory list looking for the requested secure ID */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ if ( session_memory_element->mem->secure_id == secure_id)
+ {
+ ump_dd_mem *release_mem;
+
+ release_mem = session_memory_element->mem;
+ _mali_osk_list_del(&session_memory_element->list);
+ ump_dd_reference_release(release_mem);
+ _mali_osk_free(session_memory_element);
+
+ ret = _MALI_OSK_ERR_OK;
+ break;
+ }
+ }
+
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG_IF(1, _MALI_OSK_ERR_OK != ret, ("UMP memory with ID %u does not belong to this session.\n", secure_id));
+
+ DBG_MSG(4, ("_ump_ukk_release() returning 0x%x\n", ret));
+ return ret;
+}
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
+{
+ ump_dd_mem * mem;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+
+ /* We lock the mappings so things don't get removed while we are looking for the memory */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem))
+ {
+ user_interaction->size = mem->size_bytes;
+ DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
+ ret = _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ user_interaction->size = 0;
+ DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
+ }
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ return ret;
+}
+
+
+
+void _ump_ukk_msync( _ump_uk_msync_s *args )
+{
+ ump_dd_mem * mem = NULL;
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (NULL==mem)
+ {
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
+ return;
+ }
+
+ /* Returns the cache settings back to Userspace */
+ args->is_cached=mem->is_cached;
+
+ /* If this flag is the only one set, we should not do the actual flush, only the readout */
+ if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
+ return;
+ }
+
+ /* Nothing to do if the memory is not caches */
+ if ( 0==mem->is_cached )
+ {
+ DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+ return ;
+ }
+ DBG_MSG(3, ("_ump_ukk_msync FLUSHING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
+
+ /* The actual cache flush - Implemented for each OS*/
+ _ump_osk_msync( mem , args->op, (u32)args->mapping, (u32)args->address, args->size);
+}
diff --git a/drivers/media/video/samsung/ump/common/ump_kernel_common.c b/drivers/media/video/samsung/ump/common/ump_kernel_common.c
new file mode 100644
index 0000000..e5ff198
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_kernel_common.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+/**
+ * Define the initial and maximum size of number of secure_ids on the system
+ */
+#define UMP_SECURE_ID_TABLE_ENTRIES_INITIAL (128 )
+#define UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM (4096 )
+
+
+/**
+ * Define the initial and maximum size of the ump_session_data::cookies_map,
+ * which is a \ref ump_descriptor_mapping. This limits how many secure_ids
+ * may be mapped into a particular process using _ump_ukk_map_mem().
+ */
+
+#define UMP_COOKIES_PER_SESSION_INITIAL (UMP_SECURE_ID_TABLE_ENTRIES_INITIAL )
+#define UMP_COOKIES_PER_SESSION_MAXIMUM (UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM)
+
+struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void)
+{
+ _mali_osk_errcode_t err;
+
+ /* Perform OS Specific initialization */
+ err = _ump_osk_init();
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Failed to initiaze the UMP Device Driver"));
+ return err;
+ }
+
+ /* Init the global device */
+ _mali_osk_memset(&device, 0, sizeof(device) );
+
+ /* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
+ device.secure_id_map_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0 , 0);
+ if (NULL == device.secure_id_map_lock)
+ {
+ MSG_ERR(("Failed to create OSK lock for secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ device.secure_id_map = ump_descriptor_mapping_create(UMP_SECURE_ID_TABLE_ENTRIES_INITIAL, UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM);
+ if (NULL == device.secure_id_map)
+ {
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ MSG_ERR(("Failed to create secure id lookup table\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Init memory backend */
+ device.backend = ump_memory_backend_create();
+ if (NULL == device.backend)
+ {
+ MSG_ERR(("Failed to create memory backend\n"));
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void ump_kernel_destructor(void)
+{
+ DEBUG_ASSERT_POINTER(device.secure_id_map);
+ DEBUG_ASSERT_POINTER(device.secure_id_map_lock);
+
+ _mali_osk_lock_term(device.secure_id_map_lock);
+ device.secure_id_map_lock = NULL;
+
+ ump_descriptor_mapping_destroy(device.secure_id_map);
+ device.secure_id_map = NULL;
+
+ device.backend->shutdown(device.backend);
+ device.backend = NULL;
+
+ ump_memory_backend_destroy();
+
+ _ump_osk_term();
+}
+
+/** Creates a new UMP session
+ */
+_mali_osk_errcode_t _ump_ukk_open( void** context )
+{
+ struct ump_session_data * session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Failed to allocate ump_session_data in ump_file_open()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, 0);
+ if( NULL == session_data->lock )
+ {
+ MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
+ _mali_osk_free(session_data);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ session_data->cookies_map = ump_descriptor_mapping_create( UMP_COOKIES_PER_SESSION_INITIAL, UMP_COOKIES_PER_SESSION_MAXIMUM );
+
+ if ( NULL == session_data->cookies_map )
+ {
+ MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
+
+ _mali_osk_lock_term( session_data->lock );
+ _mali_osk_free( session_data );
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_list);
+
+ _MALI_OSK_INIT_LIST_HEAD(&session_data->list_head_session_memory_mappings_list);
+
+ /* Since initial version of the UMP interface did not use the API_VERSION ioctl we have to assume
+ that it is this version, and not the "latest" one: UMP_IOCTL_API_VERSION
+ Current and later API versions would do an additional call to this IOCTL and update this variable
+ to the correct one.*/
+ session_data->api_version = MAKE_VERSION_ID(1);
+
+ *context = (void*)session_data;
+
+ DBG_MSG(2, ("New session opened\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_close( void** context )
+{
+ struct ump_session_data * session_data;
+ ump_session_memory_list_element * item;
+ ump_session_memory_list_element * tmp;
+
+ session_data = (struct ump_session_data *)*context;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_close()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* Unmap any descriptors mapped in. */
+ if (0 == _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list))
+ {
+ ump_memory_allocation *descriptor;
+ ump_memory_allocation *temp;
+
+ DBG_MSG(1, ("Memory mappings found on session usage list during session termination\n"));
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list)
+ {
+ _ump_uk_unmap_mem_s unmap_args;
+ DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
+ descriptor->phys_addr, descriptor->size, descriptor->mapping));
+ unmap_args.ctx = (void*)session_data;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.size = descriptor->size;
+ unmap_args._ukk_private = NULL; /* NOTE: unused */
+ unmap_args.cookie = descriptor->cookie;
+
+ /* NOTE: This modifies the list_head_session_memory_mappings_list */
+ _ump_ukk_unmap_mem( &unmap_args );
+ }
+ }
+
+ /* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
+ * can fail silently. */
+ DEBUG_ASSERT( _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list) );
+
+ _MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list)
+ {
+ _mali_osk_list_del(&item->list);
+ DBG_MSG(2, ("Releasing UMP memory %u as part of file close\n", item->mem->secure_id));
+ ump_dd_reference_release(item->mem);
+ _mali_osk_free(item);
+ }
+
+ ump_descriptor_mapping_destroy( session_data->cookies_map );
+
+ _mali_osk_lock_term(session_data->lock);
+ _mali_osk_free(session_data);
+
+ DBG_MSG(2, ("Session closed\n"));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor; /* Describes current mapping of memory */
+ _mali_osk_errcode_t err;
+ unsigned long offset = 0;
+ unsigned long left;
+ ump_dd_handle handle; /* The real UMP handle for this memory. Its real datatype is ump_dd_mem* */
+ ump_dd_mem * mem; /* The real UMP memory. It is equal to the handle, but with exposed struct */
+ u32 block;
+ int map_id;
+
+ session_data = (ump_session_data *)args->ctx;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ /* SEC kernel stability 2012-02-17 */
+ if (NULL == session_data->cookies_map)
+ {
+ MSG_ERR(("session_data->cookies_map is NULL in _ump_ukk_map_mem()\n"));
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ descriptor = (ump_memory_allocation*) _mali_osk_calloc( 1, sizeof(ump_memory_allocation));
+ if (NULL == descriptor)
+ {
+ MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ handle = ump_dd_handle_create_from_secure_id(args->secure_id);
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ _mali_osk_free(descriptor);
+ DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mem = (ump_dd_mem*)handle;
+ DEBUG_ASSERT(mem);
+ if (mem->size_bytes != args->size)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("Trying to map too much or little. ID: %u, virtual size=%lu, UMP size: %lu\n", args->secure_id, args->size, mem->size_bytes));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ map_id = ump_descriptor_mapping_allocate_mapping( session_data->cookies_map, (void*) descriptor );
+
+ if (map_id < 0)
+ {
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(handle);
+ DBG_MSG(1, ("ump_ukk_map_mem: unable to allocate a descriptor_mapping for return cookie\n"));
+
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ descriptor->size = args->size;
+ descriptor->handle = handle;
+ descriptor->phys_addr = args->phys_addr;
+ descriptor->process_mapping_info = args->_ukk_private;
+ descriptor->ump_session = session_data;
+ descriptor->cookie = (u32)map_id;
+
+ if ( mem->is_cached )
+ {
+ descriptor->is_cached = 1;
+ args->is_cached = 1;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
+ }
+ else if ( args->is_cached)
+ {
+ mem->is_cached = 1;
+ descriptor->is_cached = 1;
+ DBG_MSG(3, ("Warning mapping UMP secure_id: %d. As cached, while it was allocated uncached.\n", args->secure_id));
+ }
+ else
+ {
+ descriptor->is_cached = 0;
+ args->is_cached = 0;
+ DBG_MSG(3, ("Mapping UMP secure_id: %d as Uncached.\n", args->secure_id));
+ }
+
+ _mali_osk_list_init( &descriptor->list );
+
+ err = _ump_osk_mem_mapregion_init( descriptor );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ _mali_osk_free(descriptor);
+ ump_dd_reference_release(mem);
+ return err;
+ }
+
+ DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
+ mem->secure_id,
+ mem->size_bytes,
+ ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+ mem->nr_blocks));
+
+ left = descriptor->size;
+ /* loop over all blocks and map them in */
+ for (block = 0; block < mem->nr_blocks; block++)
+ {
+ unsigned long size_to_map;
+
+ if (left > mem->block_array[block].size)
+ {
+ size_to_map = mem->block_array[block].size;
+ }
+ else
+ {
+ size_to_map = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *)&(mem->block_array[block].addr), size_to_map ) )
+ {
+ DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
+ ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ ump_dd_reference_release(mem);
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ left -= size_to_map;
+ offset += size_to_map;
+ }
+
+ /* Add to the ump_memory_allocation tracking list */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add( &descriptor->list, &session_data->list_head_session_memory_mappings_list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = descriptor->cookie;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args )
+{
+ struct ump_session_data * session_data;
+ ump_memory_allocation * descriptor;
+ ump_dd_handle handle;
+
+ session_data = (ump_session_data *)args->ctx;
+
+ if (NULL == session_data)
+ {
+ MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
+ return;
+ }
+
+ /* SEC kernel stability 2012-02-17 */
+ if (NULL == session_data->cookies_map)
+ {
+ MSG_ERR(("session_data->cookies_map is NULL in _ump_ukk_map_mem()\n"));
+ return;
+ }
+
+ if (0 != ump_descriptor_mapping_get( session_data->cookies_map, (int)args->cookie, (void**)&descriptor) )
+ {
+ MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie ));
+ return;
+ }
+
+ DEBUG_ASSERT_POINTER(descriptor);
+
+ handle = descriptor->handle;
+ if ( UMP_DD_HANDLE_INVALID == handle)
+ {
+ DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
+ return;
+ }
+
+ /* Remove the ump_memory_allocation from the list of tracked mappings */
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_del( &descriptor->list );
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ ump_descriptor_mapping_free( session_data->cookies_map, (int)args->cookie );
+
+ ump_dd_reference_release(handle);
+
+ _ump_osk_mem_mapregion_term( descriptor );
+ _mali_osk_free(descriptor);
+}
+
+u32 _ump_ukk_report_memory_usage( void )
+{
+ if(device.backend->stat)
+ return device.backend->stat(device.backend);
+ else
+ return 0;
+}
diff --git a/drivers/media/video/samsung/ump/common/ump_kernel_common.h b/drivers/media/video/samsung/ump/common/ump_kernel_common.h
new file mode 100644
index 0000000..c8b5541
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_kernel_common.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_H__
+#define __UMP_KERNEL_H__
+
+#include "ump_kernel_types.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+
+
+#ifdef DEBUG
+ extern int ump_debug_level;
+ #define UMP_DEBUG_PRINT(args) _mali_osk_dbgmsg args
+ #define UMP_DEBUG_CODE(args) args
+ #define DBG_MSG(level,args) do { /* args should be in brackets */ \
+ ((level) <= ump_debug_level)?\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
+ UMP_DEBUG_PRINT(args):0; \
+ } while (0)
+
+ #define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
+ if((condition)&&((level) <= ump_debug_level)) {\
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
+ else if((level) <= ump_debug_level) { \
+ UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
+ UMP_DEBUG_PRINT(args); \
+ }
+
+ #define DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
+ #define DEBUG_ASSERT(condition) do {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
+#else /* DEBUG */
+ #define UMP_DEBUG_PRINT(args) do {} while(0)
+ #define UMP_DEBUG_CODE(args)
+ #define DBG_MSG(level,args) do {} while(0)
+ #define DBG_MSG_IF(level,condition,args) do {} while(0)
+ #define DBG_MSG_ELSE(level,args) do {} while(0)
+ #define DEBUG_ASSERT(condition) do {} while(0)
+ #define DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#endif /* DEBUG */
+
+#define MSG_ERR(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+ _mali_osk_dbgmsg( " %s()%4d\n", __FUNCTION__, __LINE__) ; \
+ _mali_osk_dbgmsg args ; \
+ _mali_osk_dbgmsg("\n"); \
+ } while(0)
+
+#define MSG(args) do{ /* args should be in brackets */ \
+ _mali_osk_dbgmsg("UMP: "); \
+ _mali_osk_dbgmsg args; \
+ } while (0)
+
+
+
+/*
+ * This struct is used to store per session data.
+ * A session is created when someone open() the device, and
+ * closed when someone close() it or the user space application terminates.
+ */
+typedef struct ump_session_data
+{
+ _mali_osk_list_t list_head_session_memory_list; /**< List of ump allocations made by the process (elements are ump_session_memory_list_element) */
+ _mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
+ int api_version;
+ _mali_osk_lock_t * lock;
+ ump_descriptor_mapping * cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+} ump_session_data;
+
+
+
+/*
+ * This struct is used to track the UMP memory references a session has.
+ * We need to track this in order to be able to clean up after user space processes
+ * which don't do it themself (e.g. due to a crash or premature termination).
+ */
+typedef struct ump_session_memory_list_element
+{
+ struct ump_dd_mem * mem;
+ _mali_osk_list_t list;
+} ump_session_memory_list_element;
+
+
+
+/*
+ * Device specific data, created when device driver is loaded, and then kept as the global variable device.
+ */
+typedef struct ump_dev
+{
+ _mali_osk_lock_t * secure_id_map_lock;
+ ump_descriptor_mapping * secure_id_map;
+ ump_memory_backend * backend;
+} ump_dev;
+
+
+
+extern int ump_debug_level;
+extern struct ump_dev device;
+
+_mali_osk_errcode_t ump_kernel_constructor(void);
+void ump_kernel_destructor(void);
+int map_errcode( _mali_osk_errcode_t err );
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/media/video/samsung/ump/common/ump_kernel_descriptor_mapping.c b/drivers/media/video/samsung/ump/common/ump_kernel_descriptor_mapping.c
new file mode 100644
index 0000000..5174839
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_kernel_descriptor_mapping.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static ump_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(ump_descriptor_table * table);
+
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ ump_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping) );
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ map->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_READERWRITER, 0 , 0);
+ if ( NULL != map->lock )
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term( map->lock );
+ _mali_osk_free(map);
+}
+
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target)
+{
+ int descriptor = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (descriptor == map->current_nr_mappings)
+ {
+ int nr_mappings_new;
+ /* no free descriptor, try to expand the table */
+ ump_descriptor_table * new_table;
+ ump_descriptor_table * old_table = map->table;
+ nr_mappings_new= map->current_nr_mappings *2;
+
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ new_table = descriptor_table_alloc(nr_mappings_new);
+ if (NULL == new_table)
+ {
+ descriptor = -1;
+ goto unlock_and_exit;
+ }
+
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ map->current_nr_mappings = nr_mappings_new;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(descriptor, map->table->usage);
+ map->table->mappings[descriptor] = target;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ return descriptor;
+}
+
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target)
+{
+ int result = -1;/*-EFAULT;*/
+ DEBUG_ASSERT(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = 0;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target)
+{
+ int result = -1;/*-EFAULT;*/
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = 0;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ return result;
+}
+
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
+{
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static ump_descriptor_table * descriptor_table_alloc(int count)
+{
+ ump_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count) );
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(ump_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(ump_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
+
diff --git a/drivers/media/video/samsung/ump/common/ump_kernel_descriptor_mapping.h b/drivers/media/video/samsung/ump/common/ump_kernel_descriptor_mapping.h
new file mode 100644
index 0000000..319cc3b
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_kernel_descriptor_mapping.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_descriptor_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __UMP_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct ump_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} ump_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ ump_descriptor_table * table; /**< Pointer to the current mapping table */
+} ump_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ */
+void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor);
+
+#endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/media/video/samsung/ump/common/ump_kernel_memory_backend.h b/drivers/media/video/samsung/ump/common/ump_kernel_memory_backend.h
new file mode 100644
index 0000000..d329bb5
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_kernel_memory_backend.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_H__
+
+#include "ump_kernel_interface.h"
+#include "ump_kernel_types.h"
+
+
+typedef struct ump_memory_allocation
+{
+ void * phys_addr;
+ void * mapping;
+ unsigned long size;
+ ump_dd_handle handle;
+ void * process_mapping_info;
+ u32 cookie; /**< necessary on some U/K interface implementations */
+ struct ump_session_data * ump_session; /**< Session that this allocation belongs to */
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+ u32 is_cached;
+} ump_memory_allocation;
+
+typedef struct ump_memory_backend
+{
+ int (*allocate)(void* ctx, ump_dd_mem * descriptor);
+ void (*release)(void* ctx, ump_dd_mem * descriptor);
+ void (*shutdown)(struct ump_memory_backend * backend);
+ u32 (*stat)(struct ump_memory_backend *backend);
+ int (*pre_allocate_physical_check)(void *ctx, u32 size);
+ u32 (*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+ void *(*get)(ump_dd_mem *mem, void *args);
+ void (*set)(ump_dd_mem *mem, void *args);
+ void * ctx;
+} ump_memory_backend;
+
+ump_memory_backend * ump_memory_backend_create ( void );
+void ump_memory_backend_destroy( void );
+
+#endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
+
diff --git a/drivers/media/video/samsung/ump/common/ump_kernel_ref_drv.c b/drivers/media/video/samsung/ump/common/ump_kernel_ref_drv.c
new file mode 100644
index 0000000..4dcbe21
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_kernel_ref_drv.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_descriptor_mapping.h"
+
+#define UMP_MINIMUM_SIZE 4096
+#define UMP_MINIMUM_SIZE_MASK (~(UMP_MINIMUM_SIZE-1))
+#define UMP_SIZE_ALIGN(x) (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
+#define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor);
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+{
+ ump_dd_mem * mem;
+ unsigned long size_total = 0;
+ int map_id;
+ u32 i;
+
+ /* Go through the input blocks and verify that they are sane */
+ for (i=0; i < num_blocks; i++)
+ {
+ unsigned long addr = blocks[i].addr;
+ unsigned long size = blocks[i].size;
+
+ DBG_MSG(5, ("Adding physical memory to new handle. Address: 0x%08lx, size: %lu\n", addr, size));
+ size_total += blocks[i].size;
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(addr))
+ {
+ MSG_ERR(("Trying to create UMP memory from unaligned physical address. Address: 0x%08lx\n", addr));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ if (0 != UMP_ADDR_ALIGN_OFFSET(size))
+ {
+ MSG_ERR(("Trying to create UMP memory with unaligned size. Size: %lu\n", size));
+ return UMP_DD_HANDLE_INVALID;
+ }
+ }
+
+ /* Allocate the ump_dd_mem struct for this allocation */
+ mem = _mali_osk_malloc(sizeof(*mem));
+ if (NULL == mem)
+ {
+ DBG_MSG(1, ("Could not allocate ump_dd_mem in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Find a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ /* Now, make a copy of the block information supplied by the user */
+ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
+ if (NULL == mem->block_array)
+ {
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ _mali_osk_memcpy(mem->block_array, blocks, sizeof(ump_dd_physical_block) * num_blocks);
+
+ /* And setup the rest of the ump_dd_mem struct */
+ _mali_osk_atomic_init(&mem->ref_count, 1);
+ mem->secure_id = (ump_secure_id)map_id;
+ mem->size_bytes = size_total;
+ mem->nr_blocks = num_blocks;
+ mem->backend_info = NULL;
+ mem->ctx = NULL;
+ mem->release_func = phys_blocks_release;
+ /* For now UMP handles created by ump_dd_handle_create_from_phys_blocks() is forced to be Uncached */
+ mem->is_cached = 0;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
+
+ return (ump_dd_handle)mem;
+}
+
+static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor)
+{
+ _mali_osk_free(descriptor->block_array);
+ descriptor->block_array = NULL;
+}
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
+{
+ ump_session_data * session_data = NULL;
+ ump_dd_mem *new_allocation = NULL;
+ ump_session_memory_list_element * session_memory_element = NULL;
+ int map_id;
+
+ DEBUG_ASSERT_POINTER( user_interaction );
+ DEBUG_ASSERT_POINTER( user_interaction->ctx );
+
+ session_data = (ump_session_data *) user_interaction->ctx;
+
+ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+ if (NULL == session_memory_element)
+ {
+ DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+
+ new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
+ if (NULL==new_allocation)
+ {
+ _mali_osk_free(session_memory_element);
+ DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Create a secure ID for this allocation */
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);
+
+ if (map_id < 0)
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(session_memory_element);
+ _mali_osk_free(new_allocation);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+ return - _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ /* Initialize the part of the new_allocation that we know so for */
+ new_allocation->secure_id = (ump_secure_id)map_id;
+ _mali_osk_atomic_init(&new_allocation->ref_count,1);
+ if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
+ new_allocation->is_cached = 0;
+ else new_allocation->is_cached = 1;
+
+ new_allocation->backend_info = (void*)user_interaction->constraints;
+
+ /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
+ if (0 == user_interaction->size)
+ {
+ user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
+ }
+
+ new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
+
+ /* Now, ask the active memory backend to do the actual memory allocation */
+ if (!device.backend->allocate( device.backend->ctx, new_allocation ) )
+ {
+ DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
+ ump_descriptor_mapping_free(device.secure_id_map, map_id);
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(new_allocation);
+ _mali_osk_free(session_memory_element);
+ return _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
+ new_allocation->ctx = device.backend->ctx;
+ new_allocation->release_func = device.backend->release;
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* Initialize the session_memory_element, and add it to the session object */
+ session_memory_element->mem = new_allocation;
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ user_interaction->secure_id = new_allocation->secure_id;
+ user_interaction->size = new_allocation->size_bytes;
+ DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_meminfo_set(ump_dd_handle memh, void* args)
+{
+ ump_dd_mem * mem;
+ ump_secure_id secure_id;
+
+ DEBUG_ASSERT_POINTER(memh);
+
+ secure_id = ump_dd_secure_id_get(memh);
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+ {
+ device.backend->set(mem, args);
+ }
+ else
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Failed to look up mapping in ump_meminfo_set(). ID: %u\n", (ump_secure_id)secure_id));
+ return UMP_DD_INVALID;
+ }
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return UMP_DD_SUCCESS;
+}
+
+UMP_KERNEL_API_EXPORT void *ump_dd_meminfo_get(ump_secure_id secure_id, void* args)
+{
+ ump_dd_mem * mem;
+ void *result;
+
+ _mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem))
+ {
+ result = device.backend->get(mem, args);
+ }
+ else
+ {
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+ DBG_MSG(1, ("Failed to look up mapping in ump_meminfo_get(). ID: %u\n", (ump_secure_id)secure_id));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
+ _mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_get_from_vaddr(unsigned long vaddr)
+{
+ ump_dd_mem * mem;
+
+ DBG_MSG(5, ("Getting handle from Virtual address. vaddr: %u\n", vaddr));
+
+ _ump_osk_mem_mapregion_get(&mem, vaddr);
+
+ DBG_MSG(1, ("Getting handle's Handle : 0x%8lx\n", mem));
+
+ return (ump_dd_handle)mem;
+}
+
diff --git a/drivers/media/video/samsung/ump/common/ump_kernel_types.h b/drivers/media/video/samsung/ump/common/ump_kernel_types.h
new file mode 100644
index 0000000..ca03dec
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_kernel_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_TYPES_H__
+#define __UMP_KERNEL_TYPES_H__
+
+#include "ump_kernel_interface.h"
+#include "mali_osk.h"
+
+/*
+ * This struct is what is "behind" a ump_dd_handle
+ */
+typedef struct ump_dd_mem
+{
+ ump_secure_id secure_id;
+ _mali_osk_atomic_t ref_count;
+ unsigned long size_bytes;
+ unsigned long nr_blocks;
+ ump_dd_physical_block * block_array;
+ void (*release_func)(void * ctx, struct ump_dd_mem * descriptor);
+ void * ctx;
+ void * backend_info;
+ int is_cached;
+} ump_dd_mem;
+
+
+
+#endif /* __UMP_KERNEL_TYPES_H__ */
diff --git a/drivers/media/video/samsung/ump/common/ump_osk.h b/drivers/media/video/samsung/ump/common/ump_osk.h
new file mode 100644
index 0000000..bd9254b
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_osk.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk.h
+ * Defines the OS abstraction layer for the UMP kernel device driver (OSK)
+ */
+
+#ifndef __UMP_OSK_H__
+#define __UMP_OSK_H__
+
+#include <mali_osk.h>
+#include <ump_kernel_memory_backend.h>
+#include <ump_uk_types.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+_mali_osk_errcode_t _ump_osk_init( void );
+
+_mali_osk_errcode_t _ump_osk_term( void );
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom );
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation *descriptor );
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size );
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor );
+
+void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size);
+
+void _ump_osk_mem_mapregion_get( ump_dd_mem ** mem, unsigned long vaddr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/media/video/samsung/ump/common/ump_uk_types.h b/drivers/media/video/samsung/ump/common/ump_uk_types.h
new file mode 100644
index 0000000..2bac454
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_uk_types.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(2)
+
+typedef enum
+{
+ _UMP_IOC_QUERY_API_VERSION = 1,
+ _UMP_IOC_ALLOCATE,
+ _UMP_IOC_RELEASE,
+ _UMP_IOC_SIZE_GET,
+ _UMP_IOC_MAP_MEM, /* not used in Linux */
+ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
+ _UMP_IOC_MSYNC,
+#ifdef CONFIG_ION_EXYNOS
+ _UMP_IOC_ION_IMPORT,
+#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ _UMP_IOC_DMABUF_IMPORT,
+#endif
+}_ump_uk_functions;
+
+typedef enum
+{
+ UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 128,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+ _UMP_UK_MSYNC_CLEAN = 0,
+ _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 version; /**< Set to the user space version on entry, stores the device driver version on exit */
+ u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size, [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+#ifdef CONFIG_ION_EXYNOS
+typedef struct _ump_uk_ion_import_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ int ion_fd;
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_ion_import_s;
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+struct ump_uk_dmabuf {
+ void *ctx;
+ int fd;
+ size_t size;
+ uint32_t ump_handle;
+};
+#endif
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+ u32 size; /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ void *phys_addr; /**< [in] physical address */
+ unsigned long size; /**< [in] size */
+ u32 secure_id; /**< [in] secure_id to assign to mapping */
+ void * _ukk_private; /**< Only used inside linux port between kernel frontend and common part to store vma */
+ u32 cookie;
+ u32 is_cached; /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping;
+ u32 size;
+ void * _ukk_private;
+ u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] mapping addr */
+ void *address; /**< [in] flush start addr */
+ u32 size; /**< [in] size to flush */
+ ump_uk_msync_op op; /**< [in] flush operation */
+ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 secure_id; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 is_cached; /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
diff --git a/drivers/media/video/samsung/ump/common/ump_ukk.h b/drivers/media/video/samsung/ump/common/ump_ukk.h
new file mode 100644
index 0000000..db48cd6
--- /dev/null
+++ b/drivers/media/video/samsung/ump/common/ump_ukk.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __UMP_UKK_H__
+#define __UMP_UKK_H__
+
+#include "mali_osk.h"
+#include "ump_uk_types.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+_mali_osk_errcode_t _ump_ukk_open( void** context );
+
+_mali_osk_errcode_t _ump_ukk_close( void** context );
+
+_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info );
+
+_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction );
+
+_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args );
+
+_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+
+void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args );
+
+void _ump_ukk_msync( _ump_uk_msync_s *args );
+
+u32 _ump_ukk_report_memory_usage( void );
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_H__ */
diff --git a/drivers/media/video/samsung/ump/include/ump_kernel_interface.h b/drivers/media/video/samsung/ump/include/ump_kernel_interface.h
new file mode 100644
index 0000000..ba81a07
--- /dev/null
+++ b/drivers/media/video/samsung/ump/include/ump_kernel_interface.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ *
+ * This file contains the kernel space part of the UMP API.
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_H__
+#define __UMP_KERNEL_INTERFACE_H__
+
+
+/** @defgroup ump_kernel_space_api UMP Kernel Space API
+ * @{ */
+
+
+#include "ump_kernel_platform.h"
+
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+/**
+ * External representation of a UMP handle in kernel space.
+ */
+typedef void * ump_dd_handle;
+
+/**
+ * Typedef for a secure ID, a system wide identificator for UMP memory buffers.
+ */
+typedef unsigned int ump_secure_id;
+
+
+/**
+ * Value to indicate an invalid UMP memory handle.
+ */
+#define UMP_DD_HANDLE_INVALID ((ump_dd_handle)0)
+
+
+/**
+ * Value to indicate an invalid secure Id.
+ */
+#define UMP_INVALID_SECURE_ID ((ump_secure_id)-1)
+
+
+/**
+ * UMP error codes for kernel space.
+ */
+typedef enum
+{
+ UMP_DD_SUCCESS, /**< indicates success */
+ UMP_DD_INVALID, /**< indicates failure */
+} ump_dd_status_code;
+
+
+/**
+ * Struct used to describe a physical block used by UMP memory
+ */
+typedef struct ump_dd_physical_block
+{
+ unsigned long addr; /**< The physical address of the block */
+ unsigned long size; /**< The length of the block, typically page aligned */
+} ump_dd_physical_block;
+
+
+/**
+ * Retrieves the secure ID for the specified UMP memory.
+ *
+ * This identificator is unique across the entire system, and uniquely identifies
+ * the specified UMP memory. This identificator can later be used through the
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id" or
+ * @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ * functions in order to access this UMP memory, for instance from another process.
+ *
+ * @note There is a user space equivalent function called @ref ump_secure_id_get "ump_secure_id_get"
+ *
+ * @see ump_dd_handle_create_from_secure_id
+ * @see ump_handle_create_from_secure_id
+ * @see ump_secure_id_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the secure ID for the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves a handle to allocated UMP memory.
+ *
+ * The usage of UMP memory is reference counted, so this will increment the reference
+ * count by one for the specified UMP memory.
+ * Use @ref ump_dd_reference_release "ump_dd_reference_release" when there is no longer any
+ * use for the retrieved handle.
+ *
+ * @note There is a user space equivalent function called @ref ump_handle_create_from_secure_id "ump_handle_create_from_secure_id"
+ *
+ * @see ump_dd_reference_release
+ * @see ump_handle_create_from_secure_id
+ *
+ * @param secure_id The secure ID of the UMP memory to open, that can be retrieved using the @ref ump_secure_id_get "ump_secure_id_get " function.
+ *
+ * @return UMP_INVALID_MEMORY_HANDLE indicates failure, otherwise a valid handle is returned.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id);
+
+
+/**
+ * Retrieves the number of physical blocks used by the specified UMP memory.
+ *
+ * This function retrieves the number of @ref ump_dd_physical_block "ump_dd_physical_block" structs needed
+ * to describe the physical memory layout of the given UMP memory. This can later be used when calling
+ * the functions @ref ump_dd_phys_blocks_get "ump_dd_phys_blocks_get" and
+ * @ref ump_dd_phys_block_get "ump_dd_phys_block_get".
+ *
+ * @see ump_dd_phys_blocks_get
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return The number of ump_dd_physical_block structs required to describe the physical memory layout of the specified UMP memory.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle mem);
+
+
+/**
+ * Retrieves all physical memory block information for specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will fail if the num_blocks parameter is either to large or to small.
+ *
+ * @see ump_dd_phys_block_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param blocks An array of @ref ump_dd_physical_block "ump_dd_physical_block" structs that will receive the physical description.
+ * @param num_blocks The number of blocks to return in the blocks array. Use the function
+ * @ref ump_dd_phys_block_count_get "ump_dd_phys_block_count_get" first to determine the number of blocks required.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle mem, ump_dd_physical_block * blocks, unsigned long num_blocks);
+
+
+/**
+ * Retrieves the physical memory block information for specified block for the specified UMP memory.
+ *
+ * This function can be used by other device drivers in order to create MMU tables.
+ *
+ * @note This function will return UMP_DD_INVALID if the specified index is out of range.
+ *
+ * @see ump_dd_phys_blocks_get
+ *
+ * @param mem Handle to UMP memory.
+ * @param index Which physical info block to retrieve.
+ * @param block Pointer to a @ref ump_dd_physical_block "ump_dd_physical_block" struct which will receive the requested information.
+ *
+ * @return UMP_DD_SUCCESS indicates success, UMP_DD_INVALID indicates failure.
+ */
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle mem, unsigned long index, ump_dd_physical_block * block);
+
+
+/**
+ * Retrieves the actual size of the specified UMP memory.
+ *
+ * The size is reported in bytes, and is typically page aligned.
+ *
+ * @note There is a user space equivalent function called @ref ump_size_get "ump_size_get"
+ *
+ * @see ump_size_get
+ *
+ * @param mem Handle to UMP memory.
+ *
+ * @return Returns the allocated size of the specified UMP memory, in bytes.
+ */
+UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle mem);
+
+
+/**
+ * Adds an extra reference to the specified UMP memory.
+ *
+ * This function adds an extra reference to the specified UMP memory. This function should
+ * be used every time a UMP memory handle is duplicated, that is, assigned to another ump_dd_handle
+ * variable. The function @ref ump_dd_reference_release "ump_dd_reference_release" must then be used
+ * to release each copy of the UMP memory handle.
+ *
+ * @note You are not required to call @ref ump_dd_reference_add "ump_dd_reference_add"
+ * for UMP handles returned from
+ * @ref ump_dd_handle_create_from_secure_id "ump_dd_handle_create_from_secure_id",
+ * because these handles are already reference counted by this function.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_add "ump_reference_add"
+ *
+ * @see ump_reference_add
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle mem);
+
+
+/**
+ * Releases a reference from the specified UMP memory.
+ *
+ * This function should be called once for every reference to the UMP memory handle.
+ * When the last reference is released, all resources associated with this UMP memory
+ * handle are freed.
+ *
+ * @note There is a user space equivalent function called @ref ump_reference_release "ump_reference_release"
+ *
+ * @see ump_reference_release
+ *
+ * @param mem Handle to UMP memory.
+ */
+UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle mem);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_INTERFACE_H__ */
diff --git a/drivers/media/video/samsung/ump/include/ump_kernel_interface_ref_drv.h b/drivers/media/video/samsung/ump/include/ump_kernel_interface_ref_drv.h
new file mode 100644
index 0000000..3efe165
--- /dev/null
+++ b/drivers/media/video/samsung/ump/include/ump_kernel_interface_ref_drv.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface.h
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_REF_DRV_H__
+#define __UMP_KERNEL_INTERFACE_REF_DRV_H__
+
+#include "ump_kernel_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Turn specified physical memory into UMP memory. */
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks);
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_get(ump_secure_id secure_id);
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_meminfo_set(ump_dd_handle memh, void* args);
+UMP_KERNEL_API_EXPORT void *ump_dd_meminfo_get(ump_secure_id secure_id, void* args);
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_get_from_vaddr(unsigned long vaddr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_INTERFACE_REF_DRV_H__ */
diff --git a/drivers/media/video/samsung/ump/include/ump_kernel_interface_vcm.h b/drivers/media/video/samsung/ump/include/ump_kernel_interface_vcm.h
new file mode 100644
index 0000000..a784241
--- /dev/null
+++ b/drivers/media/video/samsung/ump/include/ump_kernel_interface_vcm.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_interface_vcm.h
+ */
+
+#ifndef __UMP_KERNEL_INTERFACE_VCM_H__
+#define __UMP_KERNEL_INTERFACE_VCM_H__
+
+#include <linux/vcm-drv.h>
+#include <plat/s5p-vcm.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Turn specified physical memory into UMP memory. */
+struct ump_vcm {
+ struct vcm *vcm;
+ struct vcm_res *vcm_res;
+ unsigned int dev_id;
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_INTERFACE_VCM_H__ */
diff --git a/drivers/media/video/samsung/ump/include/ump_kernel_platform.h b/drivers/media/video/samsung/ump/include/ump_kernel_platform.h
new file mode 100644
index 0000000..1b5af40
--- /dev/null
+++ b/drivers/media/video/samsung/ump/include/ump_kernel_platform.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_platform.h
+ *
+ * This file should define UMP_KERNEL_API_EXPORT,
+ * which dictates how the UMP kernel API should be exported/imported.
+ * Modify this file, if needed, to match your platform setup.
+ */
+
+#ifndef __UMP_KERNEL_PLATFORM_H__
+#define __UMP_KERNEL_PLATFORM_H__
+
+/** @addtogroup ump_kernel_space_api
+ * @{ */
+
+/**
+ * A define which controls how UMP kernel space API functions are imported and exported.
+ * This define should be set by the implementor of the UMP API.
+ */
+
+#if defined(_WIN32)
+
+#if defined(UMP_BUILDING_UMP_LIBRARY)
+#define UMP_KERNEL_API_EXPORT __declspec(dllexport)
+#else
+#define UMP_KERNEL_API_EXPORT __declspec(dllimport)
+#endif
+
+#else
+
+#define UMP_KERNEL_API_EXPORT
+
+#endif
+
+
+/** @} */ /* end group ump_kernel_space_api */
+
+
+#endif /* __UMP_KERNEL_PLATFORM_H__ */
diff --git a/drivers/media/video/samsung/ump/linux/license/gpl/ump_kernel_license.h b/drivers/media/video/samsung/ump/linux/license/gpl/ump_kernel_license.h
new file mode 100644
index 0000000..17b930d
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/license/gpl/ump_kernel_license.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_license.h
+ * Defines for the macro MODULE_LICENSE.
+ */
+
+#ifndef __UMP_KERNEL_LICENSE_H__
+#define __UMP_KERNEL_LICENSE_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define UMP_KERNEL_LINUX_LICENSE "GPL"
+#define UMP_LICENSE_IS_GPL 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_KERNEL_LICENSE_H__ */
diff --git a/drivers/media/video/samsung/ump/linux/ump_ioctl.h b/drivers/media/video/samsung/ump/linux/ump_ioctl.h
new file mode 100644
index 0000000..50ef9df
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_ioctl.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_IOCTL_H__
+#define __UMP_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#include "../common/ump_uk_types.h"
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file UMP_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace UMP driver.
+ */
+
+#define UMP_IOCTL_NR 0x90
+
+
+#define UMP_IOC_QUERY_API_VERSION _IOR(UMP_IOCTL_NR, _UMP_IOC_QUERY_API_VERSION, _ump_uk_api_version_s)
+#define UMP_IOC_ALLOCATE _IOWR(UMP_IOCTL_NR, _UMP_IOC_ALLOCATE, _ump_uk_allocate_s)
+#define UMP_IOC_RELEASE _IOR(UMP_IOCTL_NR, _UMP_IOC_RELEASE, _ump_uk_release_s)
+#define UMP_IOC_SIZE_GET _IOWR(UMP_IOCTL_NR, _UMP_IOC_SIZE_GET, _ump_uk_size_get_s)
+#define UMP_IOC_MSYNC _IOW(UMP_IOCTL_NR, _UMP_IOC_MSYNC, _ump_uk_size_get_s)
+#ifdef CONFIG_ION_EXYNOS
+#define UMP_IOC_ION_IMPORT _IOW(UMP_IOCTL_NR, _UMP_IOC_ION_IMPORT, _ump_uk_ion_import_s)
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#define UMP_IOC_DMABUF_IMPORT _IOW(UMP_IOCTL_NR, _UMP_IOC_DMABUF_IMPORT,\
+ struct ump_uk_dmabuf)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_IOCTL_H__ */
diff --git a/drivers/media/video/samsung/ump/linux/ump_kernel_linux.c b/drivers/media/video/samsung/ump/linux/ump_kernel_linux.c
new file mode 100644
index 0000000..69f55c5
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_kernel_linux.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/fs.h> /* file system operations */
+#include <linux/cdev.h> /* character device definitions */
+#include <linux/ioport.h> /* request_mem_region */
+#include <linux/mm.h> /* memory management functions and types */
+#include <asm/uaccess.h> /* user space access */
+#include <asm/atomic.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+
+#include "arch/config.h" /* Configuration for current platform. The symlinc for arch is set by Makefile */
+#include "ump_ioctl.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_interface.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_memory_backend.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+#include "ump_kernel_license.h"
+
+#include "ump_osk.h"
+#include "ump_ukk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk_wrappers.h"
+#include "ump_ukk_ref_wrappers.h"
+
+#ifdef CONFIG_ION_EXYNOS
+#include <linux/ion.h>
+extern struct ion_device *ion_exynos;
+struct ion_client *ion_client_ump = NULL;
+#endif
+
+/* Module parameter to control log level */
+int ump_debug_level = 3;
+module_param(ump_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(ump_debug_level, "Higher number, more dmesg output");
+
+/* By default the module uses any available major, but it's possible to set it at load time to a specific number */
+int ump_major = 243;
+module_param(ump_major, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_major, "Device major number");
+
+/* Name of the UMP device driver */
+static char ump_dev_name[] = "ump"; /* should be const, but the functions we call requires non-cost */
+
+
+static struct dentry *ump_debugfs_dir = NULL;
+
+/*
+ * The data which we attached to each virtual memory mapping request we get.
+ * Each memory mapping has a reference to the UMP memory it maps.
+ * We release this reference when the last memory mapping is unmapped.
+ */
+typedef struct ump_vma_usage_tracker
+{
+ int references;
+ ump_dd_handle handle;
+} ump_vma_usage_tracker;
+
+struct ump_device
+{
+ struct cdev cdev;
+#if UMP_LICENSE_IS_GPL
+ struct class * ump_class;
+#endif
+};
+
+/* The global variable containing the global device data */
+static struct ump_device ump_device;
+
+
+/* Forward declare static functions */
+static int ump_file_open(struct inode *inode, struct file *filp);
+static int ump_file_release(struct inode *inode, struct file *filp);
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma);
+
+#ifdef CONFIG_VIDEO_MALI400MP_R2P3
+extern int map_errcode( _mali_osk_errcode_t err );
+#endif
+
+/* This variable defines the file operations this UMP device driver offer */
+static struct file_operations ump_fops =
+{
+ .owner = THIS_MODULE,
+ .open = ump_file_open,
+ .release = ump_file_release,
+#ifdef HAVE_UNLOCKED_IOCTL
+ .unlocked_ioctl = ump_file_ioctl,
+#else
+ .ioctl = ump_file_ioctl,
+#endif
+ .mmap = ump_file_mmap
+};
+
+
+/* This function is called by Linux to initialize this module.
+ * All we do is initialize the UMP device driver.
+ */
+static int ump_initialize_module(void)
+{
+ _mali_osk_errcode_t err;
+
+ DBG_MSG(2, ("Inserting UMP device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__));
+
+ err = ump_kernel_constructor();
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("UMP device driver init failed\n"));
+ return map_errcode(err);
+ }
+
+ MSG(("UMP device driver %s loaded\n", SVN_REV_STRING));
+ return 0;
+}
+
+
+
+/*
+ * This function is called by Linux to unload/terminate/exit/cleanup this module.
+ * All we do is terminate the UMP device driver.
+ */
+static void ump_cleanup_module(void)
+{
+#ifdef CONFIG_ION_EXYNOS
+ if (ion_client_ump)
+ ion_client_destroy(ion_client_ump);
+#endif
+
+ DBG_MSG(2, ("Unloading UMP device driver\n"));
+ ump_kernel_destructor();
+ DBG_MSG(2, ("Module unloaded\n"));
+}
+
+
+
+static ssize_t ump_memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ size_t r;
+ u32 mem = _ump_ukk_report_memory_usage();
+
+ r = snprintf(buf, 64, "%u\n", mem);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static const struct file_operations ump_memory_usage_fops = {
+ .owner = THIS_MODULE,
+ .read = ump_memory_used_read,
+};
+
+/*
+ * Initialize the UMP device driver.
+ */
+int ump_kernel_device_initialize(void)
+{
+ int err;
+ dev_t dev = 0;
+ ump_debugfs_dir = debugfs_create_dir(ump_dev_name, NULL);
+ if (ERR_PTR(-ENODEV) == ump_debugfs_dir)
+ {
+ ump_debugfs_dir = NULL;
+ }
+ else
+ {
+ debugfs_create_file("memory_usage", 0400, ump_debugfs_dir, NULL, &ump_memory_usage_fops);
+ }
+
+ if (0 == ump_major)
+ {
+ /* auto select a major */
+ err = alloc_chrdev_region(&dev, 0, 1, ump_dev_name);
+ ump_major = MAJOR(dev);
+ }
+ else
+ {
+ /* use load time defined major number */
+ dev = MKDEV(ump_major, 0);
+ err = register_chrdev_region(dev, 1, ump_dev_name);
+ }
+
+ if (0 == err)
+ {
+ memset(&ump_device, 0, sizeof(ump_device));
+
+ /* initialize our char dev data */
+ cdev_init(&ump_device.cdev, &ump_fops);
+ ump_device.cdev.owner = THIS_MODULE;
+ ump_device.cdev.ops = &ump_fops;
+
+ /* register char dev with the kernel */
+ err = cdev_add(&ump_device.cdev, dev, 1/*count*/);
+ if (0 == err)
+ {
+
+#if UMP_LICENSE_IS_GPL
+ ump_device.ump_class = class_create(THIS_MODULE, ump_dev_name);
+ if (IS_ERR(ump_device.ump_class))
+ {
+ err = PTR_ERR(ump_device.ump_class);
+ }
+ else
+ {
+ struct device * mdev;
+ mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
+ if (!IS_ERR(mdev))
+ {
+ return 0;
+ }
+
+ err = PTR_ERR(mdev);
+ }
+ cdev_del(&ump_device.cdev);
+#else
+ return 0;
+#endif
+ }
+
+ unregister_chrdev_region(dev, 1);
+ }
+
+ return err;
+}
+
+
+
+/*
+ * Terminate the UMP device driver
+ */
+void ump_kernel_device_terminate(void)
+{
+ dev_t dev = MKDEV(ump_major, 0);
+
+#if UMP_LICENSE_IS_GPL
+ device_destroy(ump_device.ump_class, dev);
+ class_destroy(ump_device.ump_class);
+#endif
+
+ /* unregister char device */
+ cdev_del(&ump_device.cdev);
+
+ /* free major */
+ unregister_chrdev_region(dev, 1);
+
+ if(ump_debugfs_dir)
+ debugfs_remove_recursive(ump_debugfs_dir);
+}
+
+/*
+ * Open a new session. User space has called open() on us.
+ */
+static int ump_file_open(struct inode *inode, struct file *filp)
+{
+ struct ump_session_data * session_data;
+ _mali_osk_errcode_t err;
+
+ /* input validation */
+ if (0 != MINOR(inode->i_rdev))
+ {
+ MSG_ERR(("Minor not zero in ump_file_open()\n"));
+ return -ENODEV;
+ }
+
+ /* Call the OS-Independent UMP Open function */
+ err = _ump_ukk_open((void**) &session_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("Ump failed to open a new session\n"));
+ return map_errcode( err );
+ }
+
+ filp->private_data = (void*)session_data;
+ filp->f_pos = 0;
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Close a session. User space has called close() or crashed/terminated.
+ */
+static int ump_file_release(struct inode *inode, struct file *filp)
+{
+ _mali_osk_errcode_t err;
+
+ err = _ump_ukk_close((void**) &filp->private_data );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+
+
+/*
+ * Handle IOCTL requests.
+ */
+#ifdef HAVE_UNLOCKED_IOCTL
+static long ump_file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+#else
+static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
+#endif
+{
+ int err = -ENOTTY;
+ void __user * argument;
+ struct ump_session_data * session_data;
+
+#ifndef HAVE_UNLOCKED_IOCTL
+ (void)inode; /* inode not used */
+#endif
+
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data)
+ {
+ MSG_ERR(("No session data attached to file object\n"));
+ return -ENOTTY;
+ }
+
+ /* interpret the argument as a user pointer to something */
+ argument = (void __user *)arg;
+
+ switch (cmd)
+ {
+ case UMP_IOC_QUERY_API_VERSION:
+ err = ump_get_api_version_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_ALLOCATE :
+ err = ump_allocate_wrapper((u32 __user *)argument, session_data);
+ break;
+#ifdef CONFIG_ION_EXYNOS
+ case UMP_IOC_ION_IMPORT:
+ err = ump_ion_import_wrapper((u32 __user *)argument, session_data);
+ break;
+#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ case UMP_IOC_DMABUF_IMPORT:
+ err = ump_dmabuf_import_wrapper((u32 __user *)argument,
+ session_data);
+ break;
+#endif
+ case UMP_IOC_RELEASE:
+ err = ump_release_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_SIZE_GET:
+ err = ump_size_get_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ case UMP_IOC_MSYNC:
+ err = ump_msync_wrapper((u32 __user *)argument, session_data);
+ break;
+
+ default:
+ DBG_MSG(1, ("No handler for IOCTL. cmd: 0x%08x, arg: 0x%08lx\n", cmd, arg));
+ err = -EFAULT;
+ break;
+ }
+
+ return err;
+}
+
+#ifndef CONFIG_VIDEO_MALI400MP_R2P3
+#ifndef CONFIG_VIDEO_MALI400MP
+#ifndef CONFIG_VIDEO_MALI400MP_R3P0
+int map_errcode( _mali_osk_errcode_t err )
+{
+ switch(err)
+ {
+ case _MALI_OSK_ERR_OK : return 0;
+ case _MALI_OSK_ERR_FAULT: return -EFAULT;
+ case _MALI_OSK_ERR_INVALID_FUNC: return -ENOTTY;
+ case _MALI_OSK_ERR_INVALID_ARGS: return -EINVAL;
+ case _MALI_OSK_ERR_NOMEM: return -ENOMEM;
+ case _MALI_OSK_ERR_TIMEOUT: return -ETIMEDOUT;
+ case _MALI_OSK_ERR_RESTARTSYSCALL: return -ERESTARTSYS;
+ case _MALI_OSK_ERR_ITEM_NOT_FOUND: return -ENOENT;
+ default: return -EFAULT;
+ }
+}
+#endif
+#endif
+#endif
+
+/*
+ * Handle from OS to map specified virtual memory to specified UMP memory.
+ */
+static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
+{
+ _ump_uk_map_mem_s args;
+ _mali_osk_errcode_t err;
+ struct ump_session_data * session_data;
+
+ /* Validate the session data */
+ session_data = (struct ump_session_data *)filp->private_data;
+ if (NULL == session_data || NULL == session_data->cookies_map->table->mappings)
+ {
+ MSG_ERR(("mmap() called without any session data available\n"));
+ return -EFAULT;
+ }
+
+ /* Re-pack the arguments that mmap() packed for us */
+ args.ctx = session_data;
+ args.phys_addr = 0;
+ args.size = vma->vm_end - vma->vm_start;
+ args._ukk_private = vma;
+ args.secure_id = vma->vm_pgoff;
+ args.is_cached = 0;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ {
+ args.is_cached = 1;
+ vma->vm_flags = vma->vm_flags | VM_SHARED | VM_MAYSHARE ;
+ DBG_MSG(3, ("UMP Map function: Forcing the CPU to use cache\n"));
+ }
+
+ DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags ));
+
+ /* Call the common mmap handler */
+ err = _ump_ukk_map_mem( &args );
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
+ return map_errcode( err );
+ }
+
+ return 0; /* success */
+}
+
+/* Export UMP kernel space API functions */
+EXPORT_SYMBOL(ump_dd_secure_id_get);
+EXPORT_SYMBOL(ump_dd_handle_create_from_secure_id);
+EXPORT_SYMBOL(ump_dd_phys_block_count_get);
+EXPORT_SYMBOL(ump_dd_phys_block_get);
+EXPORT_SYMBOL(ump_dd_phys_blocks_get);
+EXPORT_SYMBOL(ump_dd_size_get);
+EXPORT_SYMBOL(ump_dd_reference_add);
+EXPORT_SYMBOL(ump_dd_reference_release);
+EXPORT_SYMBOL(ump_dd_meminfo_get);
+EXPORT_SYMBOL(ump_dd_meminfo_set);
+EXPORT_SYMBOL(ump_dd_handle_get_from_vaddr);
+
+/* Export our own extended kernel space allocator */
+EXPORT_SYMBOL(ump_dd_handle_create_from_phys_blocks);
+
+/* Setup init and exit functions for this module */
+//module_init(ump_initialize_module);
+arch_initcall(ump_initialize_module);
+module_exit(ump_cleanup_module);
+
+/* And some module informatio */
+MODULE_LICENSE(UMP_KERNEL_LINUX_LICENSE);
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_VERSION(SVN_REV_STRING);
diff --git a/drivers/media/video/samsung/ump/linux/ump_kernel_linux.h b/drivers/media/video/samsung/ump/linux/ump_kernel_linux.h
new file mode 100644
index 0000000..b93c814e
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_kernel_linux.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMP_KERNEL_H__
+#define __UMP_KERNEL_H__
+
+int ump_kernel_device_initialize(void);
+void ump_kernel_device_terminate(void);
+
+
+#endif /* __UMP_KERNEL_H__ */
diff --git a/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_dedicated.c b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_dedicated.c
new file mode 100644
index 0000000..4e6c9b5
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_dedicated.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+#define UMP_BLOCK_SIZE (256UL * 1024UL) /* 256kB, remember to keep the ()s */
+
+
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+
+
+typedef struct block_allocator
+{
+ struct semaphore mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 num_blocks;
+ u32 num_free;
+} block_allocator;
+
+
+static void block_allocator_shutdown(ump_memory_backend * backend);
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
+static void block_allocator_release(void * ctx, ump_dd_mem * handle);
+static inline u32 get_phys(block_allocator * allocator, block_info * block);
+
+
+
+/*
+ * Create dedicated memory backend
+ */
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
+{
+ ump_memory_backend * backend;
+ block_allocator * allocator;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = (size + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1);
+ num_blocks = usable_size / UMP_BLOCK_SIZE;
+
+ if (0 == usable_size)
+ {
+ DBG_MSG(1, ("Memory block of size %u is unusable\n", size));
+ return NULL;
+ }
+
+ DBG_MSG(5, ("Creating dedicated UMP memory backend. Base address: 0x%08x, size: 0x%08x\n", base_address, size));
+ DBG_MSG(6, ("%u usable bytes which becomes %u blocks\n", usable_size, num_blocks));
+
+ backend = kzalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL != backend)
+ {
+ allocator = kmalloc(sizeof(block_allocator), GFP_KERNEL);
+ if (NULL != allocator)
+ {
+ allocator->all_blocks = kmalloc(sizeof(block_allocator) * num_blocks, GFP_KERNEL);
+ if (NULL != allocator->all_blocks)
+ {
+ int i;
+
+ allocator->first_free = NULL;
+ allocator->num_blocks = num_blocks;
+ allocator->num_free = num_blocks;
+ allocator->base = base_address;
+ sema_init(&allocator->mutex, 1);
+
+ for (i = 0; i < num_blocks; i++)
+ {
+ allocator->all_blocks[i].next = allocator->first_free;
+ allocator->first_free = &allocator->all_blocks[i];
+ }
+
+ backend->ctx = allocator;
+ backend->allocate = block_allocator_allocate;
+ backend->release = block_allocator_release;
+ backend->shutdown = block_allocator_shutdown;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+ backend->get = NULL;
+ backend->set = NULL;
+
+ return backend;
+ }
+ kfree(allocator);
+ }
+ kfree(backend);
+ }
+
+ return NULL;
+}
+
+
+
+/*
+ * Destroy specified dedicated memory backend
+ */
+static void block_allocator_shutdown(ump_memory_backend * backend)
+{
+ block_allocator * allocator;
+
+ BUG_ON(!backend);
+ BUG_ON(!backend->ctx);
+
+ allocator = (block_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
+
+ kfree(allocator->all_blocks);
+ kfree(allocator);
+ kfree(backend);
+}
+
+
+
+static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
+{
+ block_allocator * allocator;
+ u32 left;
+ block_info * last_allocated = NULL;
+ int i = 0;
+
+ BUG_ON(!ctx);
+ BUG_ON(!mem);
+
+ allocator = (block_allocator*)ctx;
+ left = mem->size_bytes;
+
+ BUG_ON(!left);
+ BUG_ON(!&allocator->mutex);
+
+ mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
+ mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+ if (NULL == mem->block_array)
+ {
+ MSG_ERR(("Failed to allocate block array\n"));
+ return 0;
+ }
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Could not get mutex to do block_allocate\n"));
+ return 0;
+ }
+
+ mem->size_bytes = 0;
+
+ while ((left > 0) && (allocator->first_free))
+ {
+ block_info * block;
+
+ block = allocator->first_free;
+ allocator->first_free = allocator->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+ allocator->num_free--;
+
+ mem->block_array[i].addr = get_phys(allocator, block);
+ mem->block_array[i].size = UMP_BLOCK_SIZE;
+ mem->size_bytes += UMP_BLOCK_SIZE;
+
+ i++;
+
+ if (left < UMP_BLOCK_SIZE) left = 0;
+ else left -= UMP_BLOCK_SIZE;
+ }
+
+ if (left)
+ {
+ block_info * block;
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ block = last_allocated->next;
+ last_allocated->next = allocator->first_free;
+ allocator->first_free = last_allocated;
+ last_allocated = block;
+ allocator->num_free++;
+ }
+
+ vfree(mem->block_array);
+ mem->backend_info = NULL;
+ mem->block_array = NULL;
+
+ DBG_MSG(4, ("Could not find a mem-block for the allocation.\n"));
+ up(&allocator->mutex);
+
+ return 0;
+ }
+
+ mem->backend_info = last_allocated;
+
+ up(&allocator->mutex);
+
+ return 1;
+}
+
+
+
+static void block_allocator_release(void * ctx, ump_dd_mem * handle)
+{
+ block_allocator * allocator;
+ block_info * block, * next;
+
+ BUG_ON(!ctx);
+ BUG_ON(!handle);
+
+ allocator = (block_allocator*)ctx;
+ block = (block_info*)handle->backend_info;
+ BUG_ON(!block);
+
+ if (down_interruptible(&allocator->mutex))
+ {
+ MSG_ERR(("Allocator release: Failed to get mutex - memory leak\n"));
+ return;
+ }
+
+ while (block)
+ {
+ next = block->next;
+
+ BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+
+ block->next = allocator->first_free;
+ allocator->first_free = block;
+ allocator->num_free++;
+
+ block = next;
+ }
+ DBG_MSG(3, ("%d blocks free after release call\n", allocator->num_free));
+ up(&allocator->mutex);
+
+ vfree(handle->block_array);
+ handle->block_array = NULL;
+}
+
+
+
+/*
+ * Helper function for calculating the physical base adderss of a memory block
+ */
+static inline u32 get_phys(block_allocator * allocator, block_info * block)
+{
+ return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
+}
diff --git a/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_dedicated.h b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_dedicated.h
new file mode 100644
index 0000000..fa4bdcc
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_dedicated.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_dedicated.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
+
diff --git a/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_os.c b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_os.c
new file mode 100644
index 0000000..323c13c
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_os.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+
+
+
+typedef struct os_allocator
+{
+ struct semaphore mutex;
+ u32 num_pages_max; /**< Maximum number of pages to allocate from the OS */
+ u32 num_pages_allocated; /**< Number of pages allocated from the OS */
+} os_allocator;
+
+
+
+static void os_free(void* ctx, ump_dd_mem * descriptor);
+static int os_allocate(void* ctx, ump_dd_mem * descriptor);
+static void os_memory_backend_destroy(ump_memory_backend * backend);
+static u32 os_stat(struct ump_memory_backend *backend);
+
+
+
+/*
+ * Create OS memory backend
+ */
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
+{
+ ump_memory_backend * backend;
+ os_allocator * info;
+
+ info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
+ if (NULL == info)
+ {
+ return NULL;
+ }
+
+ info->num_pages_max = max_allocation >> PAGE_SHIFT;
+ info->num_pages_allocated = 0;
+
+ sema_init(&info->mutex, 1);
+
+ backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL == backend)
+ {
+ kfree(info);
+ return NULL;
+ }
+
+ backend->ctx = info;
+ backend->allocate = os_allocate;
+ backend->release = os_free;
+ backend->shutdown = os_memory_backend_destroy;
+ backend->stat = os_stat;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+ backend->get = NULL;
+ backend->set = NULL;
+
+ return backend;
+}
+
+
+
+/*
+ * Destroy specified OS memory backend
+ */
+static void os_memory_backend_destroy(ump_memory_backend * backend)
+{
+ os_allocator * info = (os_allocator*)backend->ctx;
+
+ DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
+
+ kfree(info);
+ kfree(backend);
+}
+
+
+
+/*
+ * Allocate UMP memory
+ */
+static int os_allocate(void* ctx, ump_dd_mem * descriptor)
+{
+ u32 left;
+ os_allocator * info;
+ int pages_allocated = 0;
+ int is_cached;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!ctx);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size_bytes;
+ is_cached = descriptor->is_cached;
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return 0; /* failure */
+ }
+
+ descriptor->backend_info = NULL;
+ descriptor->nr_blocks = ((left + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
+
+ DBG_MSG(5, ("Allocating page array. Size: %lu\n", descriptor->nr_blocks * sizeof(ump_dd_physical_block)));
+
+ descriptor->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * descriptor->nr_blocks);
+ if (NULL == descriptor->block_array)
+ {
+ up(&info->mutex);
+ DBG_MSG(1, ("Block array could not be allocated\n"));
+ return 0; /* failure */
+ }
+
+ while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max))
+ {
+ struct page * new_page;
+
+ if (is_cached)
+ {
+ new_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN );
+ } else
+ {
+ new_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD);
+ }
+ if (NULL == new_page)
+ {
+ MSG_ERR(("UMP memory allocated: Out of Memory !!\n"));
+ break;
+ }
+
+ /* Ensure page caches are flushed. */
+ if ( is_cached )
+ {
+ descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ } else
+ {
+ descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ descriptor->block_array[pages_allocated].size = PAGE_SIZE;
+ }
+
+ DBG_MSG(5, ("Allocated page 0x%08lx cached: %d\n", descriptor->block_array[pages_allocated].addr, is_cached));
+
+ if (left < PAGE_SIZE)
+ {
+ left = 0;
+ }
+ else
+ {
+ left -= PAGE_SIZE;
+ }
+
+ pages_allocated++;
+ }
+
+ DBG_MSG(5, ("Alloce for ID:%2d got %d pages, cached: %d\n", descriptor->secure_id, pages_allocated));
+
+ if (left)
+ {
+ MSG_ERR(("Failed to allocate needed pages\n"));
+ MSG_ERR(("UMP memory allocated: %d kB Configured maximum OS memory usage: %d kB\n",
+ (pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));
+
+ while(pages_allocated)
+ {
+ pages_allocated--;
+ if ( !is_cached )
+ {
+ dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
+ }
+
+ up(&info->mutex);
+
+ return 0; /* failure */
+ }
+
+ info->num_pages_allocated += pages_allocated;
+
+ DBG_MSG(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ up(&info->mutex);
+
+ return 1; /* success*/
+}
+
+
+/*
+ * Free specified UMP memory
+ */
+static void os_free(void* ctx, ump_dd_mem * descriptor)
+{
+ os_allocator * info;
+ int i;
+
+ BUG_ON(!ctx);
+ BUG_ON(!descriptor);
+
+ info = (os_allocator*)ctx;
+
+ BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
+
+ if (down_interruptible(&info->mutex))
+ {
+ DBG_MSG(1, ("Failed to get mutex in os_free\n"));
+ return;
+ }
+
+ DBG_MSG(5, ("Releasing %lu OS pages\n", descriptor->nr_blocks));
+
+ info->num_pages_allocated -= descriptor->nr_blocks;
+
+ up(&info->mutex);
+
+ for ( i = 0; i < descriptor->nr_blocks; i++)
+ {
+ DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
+ if ( ! descriptor->is_cached)
+ {
+ dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
+ }
+
+ vfree(descriptor->block_array);
+}
+
+
+static u32 os_stat(struct ump_memory_backend *backend)
+{
+ os_allocator *info;
+ info = (os_allocator*)backend->ctx;
+ return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
diff --git a/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_os.h b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_os.h
new file mode 100644
index 0000000..f924705
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_os.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_os.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_OS_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_os_memory_backend_create(const int max_allocation);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
+
diff --git a/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_vcm.c b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_vcm.c
new file mode 100644
index 0000000..de7f212
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_vcm.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/* create by boojin.kim@samsung.com */
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/semaphore.h>
+#else /* pre 2.6.26 the file was in the arch specific location */
+#include <asm/semaphore.h>
+#endif
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend.h"
+#include "ump_kernel_interface_ref_drv.h"
+#include "ump_kernel_memory_backend_vcm.h"
+#include "../common/ump_uk_types.h"
+#include <linux/vcm-drv.h>
+#include <plat/s5p-vcm.h>
+#include <linux/dma-mapping.h>
+
+#define UMP_REF_DRV_UK_VCM_DEV_G2D 12
+
+typedef struct ump_vcm {
+ struct vcm *vcm;
+ struct vcm_res *vcm_res;
+ unsigned int dev_id;
+} ump_vcm;
+
+typedef struct vcm_allocator {
+ struct semaphore mutex;
+ u32 num_vcm_blocks;
+} vcm_allocator;
+
+static void ump_vcm_free(void* ctx, ump_dd_mem * descriptor);
+static int ump_vcm_allocate(void* ctx, ump_dd_mem * descriptor);
+static void *vcm_res_get(ump_dd_mem *mem, void* args);
+static void vcm_attr_set(ump_dd_mem *mem, void* args);
+static int vcm_mem_allocator(vcm_allocator *info, ump_dd_mem *descriptor);
+static void vcm_memory_backend_destroy(ump_memory_backend * backend);
+
+
+/*
+ * Create VCM memory backend
+ */
+ump_memory_backend * ump_vcm_memory_backend_create(const int max_allocation)
+{
+ ump_memory_backend * backend;
+ vcm_allocator * info;
+
+ info = kmalloc(sizeof(vcm_allocator), GFP_KERNEL);
+ if (NULL == info)
+ {
+ return NULL;
+ }
+
+ info->num_vcm_blocks = 0;
+
+
+ sema_init(&info->mutex, 1);
+
+ backend = kmalloc(sizeof(ump_memory_backend), GFP_KERNEL);
+ if (NULL == backend)
+ {
+ kfree(info);
+ return NULL;
+ }
+
+ backend->ctx = info;
+ backend->allocate = ump_vcm_allocate;
+ backend->release = ump_vcm_free;
+ backend->shutdown = vcm_memory_backend_destroy;
+ backend->pre_allocate_physical_check = NULL;
+ backend->adjust_to_mali_phys = NULL;
+
+ backend->get = vcm_res_get;
+ backend->set = vcm_attr_set;
+
+
+ return backend;
+}
+
+/*
+ * Destroy specified VCM memory backend
+ */
+static void vcm_memory_backend_destroy(ump_memory_backend * backend)
+{
+ vcm_allocator * info = (vcm_allocator*)backend->ctx;
+#if 0
+ DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
+#endif
+ kfree(info);
+ kfree(backend);
+}
+
+/*
+ * Allocate UMP memory
+ */
+static int ump_vcm_allocate(void *ctx, ump_dd_mem * descriptor)
+{
+ int ret; /* success */
+ vcm_allocator *info;
+ struct ump_vcm *ump_vcm;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!ctx);
+
+ info = (vcm_allocator*)ctx;
+
+ ump_vcm = kmalloc(sizeof(struct ump_vcm), GFP_KERNEL);
+ if (NULL == ump_vcm)
+ {
+ return 0;
+ }
+
+ ump_vcm->dev_id = (int)descriptor->backend_info & ~UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE;
+
+ if(ump_vcm->dev_id == UMP_REF_DRV_UK_CONSTRAINT_NONE) { /* None */
+ ump_vcm->dev_id = UMP_REF_DRV_UK_VCM_DEV_G2D; /* this ID is G2D */
+ }
+ else if(ump_vcm->dev_id == UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR) { /* Physical Linear */
+ return 0;
+ }
+ else { /* Other VCM */
+ ump_vcm->dev_id -= 2;
+ }
+
+ DBG_MSG(5, ("Device ID for VCM : %d\n", ump_vcm->dev_id));
+ ump_vcm->vcm = vcm_find_vcm(ump_vcm->dev_id);
+
+ if (!ump_vcm->vcm)
+ {
+ return 0;
+ }
+ descriptor->backend_info = (void*)ump_vcm;
+
+ if (down_interruptible(&info->mutex)) {
+ DBG_MSG(1, ("Failed to get mutex in ump_vcm_allocate\n"));
+ return 0; /* failure */
+ }
+
+ ret = vcm_mem_allocator(info, descriptor);
+ up(&info->mutex);
+
+ return ret; /* success */
+}
+
+static int vcm_mem_allocator(vcm_allocator *info, ump_dd_mem *descriptor)
+{
+ unsigned long num_blocks;
+ int i;
+ struct vcm_phys *phys;
+ struct vcm_phys_part *part;
+ int size_total = 0;
+ struct ump_vcm *ump_vcm;
+
+ ump_vcm = (struct ump_vcm*)descriptor->backend_info;
+
+ ump_vcm->vcm_res =
+ vcm_make_binding(ump_vcm->vcm, descriptor->size_bytes,
+ ump_vcm->dev_id, 0);
+
+ phys = ump_vcm->vcm_res->phys;
+ part = phys->parts;
+ num_blocks = phys->count;
+
+ DBG_MSG(5,
+ ("Allocating page array. Size: %lu, VCM Reservation : 0x%x\n",
+ phys->count * sizeof(ump_dd_physical_block),
+ ump_vcm->vcm_res->start));
+
+ /* Now, make a copy of the block information supplied by the user */
+ descriptor->block_array =
+ (ump_dd_physical_block *) vmalloc(sizeof(ump_dd_physical_block) *
+ num_blocks);
+
+ if (NULL == descriptor->block_array) {
+ vfree(descriptor->block_array);
+ DBG_MSG(1, ("Could not allocate a mem handle for function.\n"));
+ return 0; /* failure */
+ }
+
+ for (i = 0; i < num_blocks; i++) {
+ descriptor->block_array[i].addr = part->start;
+ descriptor->block_array[i].size = part->size;
+
+ dmac_unmap_area(phys_to_virt(part->start), part->size, DMA_FROM_DEVICE);
+ outer_inv_range(part->start, part->start + part->size);
+
+ ++part;
+ size_total += descriptor->block_array[i].size;
+ DBG_MSG(6,
+ ("UMP memory created with VCM. addr 0x%x, size: 0x%x\n",
+ descriptor->block_array[i].addr,
+ descriptor->block_array[i].size));
+ }
+
+ descriptor->size_bytes = size_total;
+ descriptor->nr_blocks = num_blocks;
+ descriptor->ctx = NULL;
+
+ info->num_vcm_blocks += num_blocks;
+ return 1;
+}
+
+/*
+ * Free specified UMP memory
+ */
+static void ump_vcm_free(void *ctx, ump_dd_mem * descriptor)
+{
+ struct ump_vcm *ump_vcm;
+ vcm_allocator *info;
+
+ BUG_ON(!descriptor);
+ BUG_ON(!ctx);
+
+ ump_vcm = (struct ump_vcm*)descriptor->backend_info;
+ info = (vcm_allocator*)ctx;
+
+ BUG_ON(descriptor->nr_blocks > info->num_vcm_blocks);
+
+ if (down_interruptible(&info->mutex)) {
+ DBG_MSG(1, ("Failed to get mutex in ump_vcm_free\n"));
+ return;
+ }
+
+ DBG_MSG(5, ("Releasing %lu VCM pages\n", descriptor->nr_blocks));
+
+ info->num_vcm_blocks -= descriptor->nr_blocks;
+
+ up(&info->mutex);
+
+ DBG_MSG(6, ("Freeing physical page by VCM\n"));
+ vcm_destroy_binding(ump_vcm->vcm_res);
+ ump_vcm->vcm = NULL;
+ ump_vcm->vcm_res = NULL;
+
+ kfree(ump_vcm);
+ vfree(descriptor->block_array);
+}
+
+static void *vcm_res_get(ump_dd_mem *mem, void *args)
+{
+ struct ump_vcm *ump_vcm;
+ enum vcm_dev_id vcm_id;
+
+ ump_vcm = (struct ump_vcm*)mem->backend_info;
+ vcm_id = (enum vcm_dev_id)args;
+
+ if (vcm_reservation_in_vcm
+ (vcm_find_vcm(vcm_id), ump_vcm->vcm_res)
+ == S5PVCM_RES_NOT_IN_VCM)
+ return NULL;
+ else
+ return ump_vcm->vcm_res;
+}
+
+static void vcm_attr_set(ump_dd_mem *mem, void *args)
+{
+ struct ump_vcm *ump_vcm, *ump_vcmh;
+
+ ump_vcm = (struct ump_vcm*)args;
+
+ ump_vcmh = kmalloc(sizeof(struct ump_vcm), GFP_KERNEL);
+ if (NULL == ump_vcmh)
+ {
+ return;
+ }
+
+ ump_vcmh->dev_id = ump_vcm->dev_id;
+ ump_vcmh->vcm = ump_vcm->vcm;
+ ump_vcmh->vcm_res = ump_vcm->vcm_res;
+
+ mem->backend_info= (void*)ump_vcmh;
+
+ return;
+}
+
+
diff --git a/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_vcm.h b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_vcm.h
new file mode 100644
index 0000000..62f6d12
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_kernel_memory_backend_vcm.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_memory_backend_vcm.h
+ */
+
+#ifndef __UMP_KERNEL_MEMORY_BACKEND_VCM_H__
+#define __UMP_KERNEL_MEMORY_BACKEND_VCM_H__
+
+#include "ump_kernel_memory_backend.h"
+
+ump_memory_backend * ump_vcm_memory_backend_create(const int max_allocation);
+
+#endif /* __UMP_KERNEL_MEMORY_BACKEND_VCM_H__ */
diff --git a/drivers/media/video/samsung/ump/linux/ump_memory_backend.c b/drivers/media/video/samsung/ump/linux/ump_memory_backend.c
new file mode 100644
index 0000000..f2a6bd6
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_memory_backend.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/ioport.h> /* request_mem_region */
+
+#include "arch/config.h" /* Configuration for current platform. The symlink for arch is set by Makefile */
+
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_memory_backend_os.h"
+#include "ump_kernel_memory_backend_dedicated.h"
+#include "ump_kernel_memory_backend_vcm.h"
+
+/* Configure which dynamic memory allocator to use */
+int ump_backend = ARCH_UMP_BACKEND_DEFAULT;
+module_param(ump_backend, int, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_backend, "0 = dedicated memory backend (default), 1 = OS memory backend");
+
+/* The base address of the memory block for the dedicated memory backend */
+unsigned int ump_memory_address = ARCH_UMP_MEMORY_ADDRESS_DEFAULT;
+module_param(ump_memory_address, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_address, "The physical address to map for the dedicated memory backend");
+
+/* The size of the memory block for the dedicated memory backend */
+unsigned int ump_memory_size = ARCH_UMP_MEMORY_SIZE_DEFAULT;
+module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
+MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
+
+ump_memory_backend* ump_memory_backend_create ( void )
+{
+ ump_memory_backend * backend = NULL;
+
+ /* Create the dynamic memory allocator backend */
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Using dedicated memory backend\n"));
+
+ DBG_MSG(2, ("Requesting dedicated memory: 0x%08x, size: %u\n", ump_memory_address, ump_memory_size));
+ /* Ask the OS if we can use the specified physical memory */
+ if (NULL == request_mem_region(ump_memory_address, ump_memory_size, "UMP Memory"))
+ {
+ MSG_ERR(("Failed to request memory region (0x%08X - 0x%08X). Is Mali DD already loaded?\n", ump_memory_address, ump_memory_address + ump_memory_size - 1));
+ return NULL;
+ }
+ backend = ump_block_allocator_create(ump_memory_address, ump_memory_size);
+ }
+ else if (1 == ump_backend)
+ {
+ DBG_MSG(2, ("Using OS memory backend, allocation limit: %d\n", ump_memory_size));
+ backend = ump_os_memory_backend_create(ump_memory_size);
+ }
+#ifdef CONFIG_UMP_VCM_ALLOC
+ else if (2 == ump_backend)
+ {
+ DBG_MSG(2, ("Using VCM memory backend, allocation limit: %d\n", ump_memory_size));
+ backend = ump_vcm_memory_backend_create(ump_memory_size);
+ }
+#endif
+
+ return backend;
+}
+
+void ump_memory_backend_destroy( void )
+{
+ if (0 == ump_backend)
+ {
+ DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
+ release_mem_region(ump_memory_address, ump_memory_size);
+ }
+}
diff --git a/drivers/media/video/samsung/ump/linux/ump_osk_atomics.c b/drivers/media/video/samsung/ump/linux/ump_osk_atomics.c
new file mode 100644
index 0000000..ef1902e
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_osk_atomics.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_atomics.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+#include "ump_osk.h"
+#include <asm/atomic.h>
+
+int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_dec_return((atomic_t *)&atom->u.val);
+}
+
+int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom )
+{
+ return atomic_inc_return((atomic_t *)&atom->u.val);
+}
diff --git a/drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c b/drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c
new file mode 100644
index 0000000..17af2bd
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c
@@ -0,0 +1,441 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_memory.c
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+/* needed to detect kernel version specific code */
+#include <linux/version.h>
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+#include <linux/module.h> /* kernel module definitions */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/memory.h>
+#include <asm/cacheflush.h>
+#include <linux/dma-mapping.h>
+
+typedef struct ump_vma_usage_tracker
+{
+ atomic_t references;
+ ump_memory_allocation *descriptor;
+} ump_vma_usage_tracker;
+
+static void ump_vma_open(struct vm_area_struct * vma);
+static void ump_vma_close(struct vm_area_struct * vma);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+#endif
+
+static struct vm_operations_struct ump_vm_ops =
+{
+ .open = ump_vma_open,
+ .close = ump_vma_close,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ .fault = ump_cpu_page_fault_handler
+#else
+ .nopfn = ump_cpu_page_fault_handler
+#endif
+};
+
+/*
+ * Page fault for VMA region
+ * This should never happen since we always map in the entire virtual memory range.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
+#else
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+#endif
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ void __user * address;
+ address = vmf->virtual_address;
+#endif
+ MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
+ MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+ return VM_FAULT_SIGBUS;
+#else
+ return NOPFN_SIGBUS;
+#endif
+}
+
+static void ump_vma_open(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_inc_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+}
+
+static void ump_vma_close(struct vm_area_struct * vma)
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ _ump_uk_unmap_mem_s args;
+ int new_val;
+
+ vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ BUG_ON(NULL == vma_usage_tracker);
+
+ new_val = atomic_dec_return(&vma_usage_tracker->references);
+
+ DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
+
+ vma_usage_tracker->descriptor->process_mapping_info = vma;
+
+ if (0 == new_val)
+ {
+ ump_memory_allocation * descriptor;
+
+ descriptor = vma_usage_tracker->descriptor;
+
+ args.ctx = descriptor->ump_session;
+ args.cookie = descriptor->cookie;
+ args.mapping = descriptor->mapping;
+ args.size = descriptor->size;
+
+ args._ukk_private = NULL; /** @note unused */
+
+ DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
+ _ump_ukk_unmap_mem( & args );
+
+ /* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
+ }
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
+{
+ ump_vma_usage_tracker * vma_usage_tracker;
+ struct vm_area_struct *vma;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
+ if (NULL == vma_usage_tracker)
+ {
+ DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
+ return -_MALI_OSK_ERR_FAULT;
+ }
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+ if (NULL == vma )
+ {
+ kfree(vma_usage_tracker);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ vma->vm_private_data = vma_usage_tracker;
+ vma->vm_flags |= VM_IO;
+ vma->vm_flags |= VM_RESERVED;
+
+ if (0==descriptor->is_cached)
+ {
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ }
+ DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
+
+ /* Setup the functions which handle further VMA handling */
+ vma->vm_ops = &ump_vm_ops;
+
+ /* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
+ descriptor->mapping = (void __user*)vma->vm_start;
+
+ atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
+ vma_usage_tracker->descriptor = descriptor;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
+{
+ struct vm_area_struct* vma;
+ ump_vma_usage_tracker * vma_usage_tracker;
+
+ if (NULL == descriptor) return;
+
+ /* Linux does the right thing as part of munmap to remove the mapping
+ * All that remains is that we remove the vma_usage_tracker setup in init() */
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ vma_usage_tracker = vma->vm_private_data;
+
+ /* We only get called if mem_mapregion_init succeeded */
+ kfree(vma_usage_tracker);
+ return;
+}
+
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
+{
+ struct vm_area_struct *vma;
+ _mali_osk_errcode_t retval;
+
+ if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
+
+ vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+
+ if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+
+ retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+
+ DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
+ ump_dd_secure_id_get(descriptor->handle),
+ (unsigned long)vma,
+ (unsigned long)(vma->vm_start + offset),
+ (unsigned long)*phys_addr,
+ size,
+ (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+
+ return retval;
+}
+
+static u32 _ump_osk_virt_to_phys_start(ump_dd_mem * mem, u32 start, u32 address, int *index)
+{
+ int i;
+ u32 offset = address - start;
+ ump_dd_physical_block *block;
+ u32 sum = 0;
+
+ for (i=0; i<mem->nr_blocks; i++) {
+ block = &mem->block_array[i];
+ sum += block->size;
+ if (sum > offset) {
+ *index = i;
+ DBG_MSG(3, ("_ump_osk_virt_to_phys : index : %d, virtual 0x%x, phys 0x%x\n", i, address, (u32)block->addr + offset - (sum -block->size)));
+ return (u32)block->addr + offset - (sum -block->size);
+ }
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static u32 _ump_osk_virt_to_phys_end(ump_dd_mem * mem, u32 start, u32 address, int *index)
+{
+ int i;
+ u32 offset = address - start;
+ ump_dd_physical_block *block;
+ u32 sum = 0;
+
+ for (i=0; i<mem->nr_blocks; i++) {
+ block = &mem->block_array[i];
+ sum += block->size;
+ if (sum >= offset) {
+ *index = i;
+ DBG_MSG(3, ("_ump_osk_virt_to_phys : index : %d, virtual 0x%x, phys 0x%x\n", i, address, (u32)block->addr + offset - (sum -block->size)));
+ return (u32)block->addr + offset - (sum -block->size);
+ }
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void _ump_osk_msync_with_virt(ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size)
+{
+ int start_index, end_index;
+ u32 start_p, end_p;
+
+ DBG_MSG(3, ("Cache flush with user virtual address. start : 0x%x, end : 0x%x, address 0x%x, size 0x%x\n", start, start+mem->size_bytes, address, size));
+
+ start_p = _ump_osk_virt_to_phys_start(mem, start, address, &start_index);
+ end_p = _ump_osk_virt_to_phys_end(mem, start, address+size, &end_index);
+
+ if (start_index==end_index) {
+ if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)
+ outer_flush_range(start_p, end_p);
+ else
+ outer_clean_range(start_p, end_p);
+ } else {
+ ump_dd_physical_block *block;
+ int i;
+
+ for (i=start_index; i<=end_index; i++) {
+ block = &mem->block_array[i];
+
+ if (i == start_index) {
+ if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
+ outer_flush_range(start_p, block->addr+block->size);
+ } else {
+ outer_clean_range(start_p, block->addr+block->size);
+ }
+ }
+ else if (i == end_index) {
+ if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
+ outer_flush_range(block->addr, end_p);
+ } else {
+ outer_clean_range(block->addr, end_p);
+ }
+ break;
+ }
+ else {
+ if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
+ outer_flush_range(block->addr, block->addr+block->size);
+ } else {
+ outer_clean_range(block->addr, block->addr+block->size);
+ }
+ }
+ }
+ }
+ return;
+}
+
+void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size)
+{
+ int i;
+ u32 start_p, end_p;
+ ump_dd_physical_block *block;
+
+ DBG_MSG(3,
+ ("Flushing nr of blocks: %u. First: paddr: 0x%08x vaddr: 0x%08x size:%dB\n",
+ mem->nr_blocks, mem->block_array[0].addr,
+ phys_to_virt(mem->block_array[0].addr),
+ mem->block_array[0].size));
+
+#ifndef USING_DMA_FLUSH
+ if (address) {
+ if ((address >= start)
+ && ((address + size) <= start + mem->size_bytes)) {
+ if (size >= SZ_64K) {
+ flush_all_cpu_caches();
+ } else if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)
+ dmac_flush_range((void *)address,
+ (void *)(address + size - 1));
+ else
+ dmac_map_area((void *)address, size,
+ DMA_TO_DEVICE);
+#ifdef CONFIG_CACHE_L2X0
+ if (size >= SZ_1M)
+ outer_clean_all();
+ else
+ _ump_osk_msync_with_virt(mem, op, start, address, size);
+#endif
+ return;
+ }
+ }
+
+ if ((op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)) {
+ if ((mem->size_bytes >= SZ_1M)) {
+ flush_all_cpu_caches();
+#ifdef CONFIG_CACHE_L2X0
+ outer_flush_all();
+#endif
+ return;
+ } else if ((mem->size_bytes >= SZ_64K)) {
+ flush_all_cpu_caches();
+#ifdef CONFIG_CACHE_L2X0
+ for (i = 0; i < mem->nr_blocks; i++) {
+ block = &mem->block_array[i];
+ start_p = (u32) block->addr;
+ end_p = start_p + block->size - 1;
+ outer_flush_range(start_p, end_p);
+ }
+#endif
+ return;
+ }
+ } else {
+ if ((mem->size_bytes >= SZ_1M)) {
+ flush_all_cpu_caches();
+#ifdef CONFIG_CACHE_L2X0
+ outer_clean_all();
+#endif
+ return;
+ } else if ((mem->size_bytes >= SZ_64K)) {
+ flush_all_cpu_caches();
+#ifdef CONFIG_CACHE_L2X0
+ for (i = 0; i < mem->nr_blocks; i++) {
+ block = &mem->block_array[i];
+ start_p = (u32) block->addr;
+ end_p = start_p + block->size - 1;
+ outer_clean_range(start_p, end_p);
+ }
+#endif
+ return;
+ }
+ }
+#endif
+
+ for (i = 0; i < mem->nr_blocks; i++) {
+ /* TODO: Find out which flush method is best of 1)Dma OR 2)Normal flush functions */
+ /*#define USING_DMA_FLUSH */
+#ifdef USING_DMA_FLUSH
+ DEBUG_ASSERT((PAGE_SIZE == mem->block_array[i].size));
+ dma_map_page(NULL,
+ pfn_to_page(mem->block_array[i].
+ addr >> PAGE_SHIFT), 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ /*dma_unmap_page(NULL, mem->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL); */
+#else
+ block = &mem->block_array[i];
+ start_p = (u32) block->addr;
+ end_p = start_p + block->size - 1;
+ if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
+ dmac_flush_range(phys_to_virt(start_p),
+ phys_to_virt(end_p));
+ outer_flush_range(start_p, end_p);
+ } else {
+ dmac_map_area(phys_to_virt(start_p), block->size,
+ DMA_TO_DEVICE);
+ outer_clean_range(start_p, end_p);
+ }
+#endif
+ }
+}
+
+
+void _ump_osk_mem_mapregion_get( ump_dd_mem ** mem, unsigned long vaddr)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ ump_vma_usage_tracker * vma_usage_tracker;
+ ump_memory_allocation *descriptor;
+ ump_dd_handle handle;
+
+ DBG_MSG(3, ("_ump_osk_mem_mapregion_get: vaddr 0x%08lx\n", vaddr));
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, vaddr);
+ up_read(&mm->mmap_sem);
+ if(!vma)
+ {
+ DBG_MSG(3, ("Not found VMA\n"));
+ *mem = NULL;
+ return;
+ }
+ DBG_MSG(4, ("Get vma: 0x%08lx vma->vm_start: 0x%08lx\n", (unsigned long)vma, vma->vm_start));
+
+ vma_usage_tracker = (struct ump_vma_usage_tracker*)vma->vm_private_data;
+ if(vma_usage_tracker == NULL)
+ {
+ DBG_MSG(3, ("Not found vma_usage_tracker\n"));
+ *mem = NULL;
+ return;
+ }
+
+ descriptor = (struct ump_memory_allocation*)vma_usage_tracker->descriptor;
+ handle = (ump_dd_handle)descriptor->handle;
+
+ DBG_MSG(3, ("Get handle: 0x%08lx\n", handle));
+ *mem = (ump_dd_mem*)handle;
+}
diff --git a/drivers/media/video/samsung/ump/linux/ump_osk_misc.c b/drivers/media/video/samsung/ump/linux/ump_osk_misc.c
new file mode 100644
index 0000000..12066eb
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_osk_misc.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_osk_misc.c
+ * Implementation of the OS abstraction layer for the UMP kernel device driver
+ */
+
+
+#include "ump_osk.h"
+
+#include <linux/kernel.h>
+#include "ump_kernel_linux.h"
+
+/* is called from ump_kernel_constructor in common code */
+_mali_osk_errcode_t _ump_osk_init( void )
+{
+ if (0 != ump_kernel_device_initialize())
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _ump_osk_term( void )
+{
+ ump_kernel_device_terminate();
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/media/video/samsung/ump/linux/ump_ukk_ref_wrappers.c b/drivers/media/video/samsung/ump/linux/ump_ukk_ref_wrappers.c
new file mode 100644
index 0000000..3e355c0
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_ukk_ref_wrappers.c
@@ -0,0 +1,315 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+#if defined(CONFIG_ION_EXYNOS) || defined(CONFIG_DMA_SHARED_BUFFER)
+#include <linux/scatterlist.h>
+#include "ump_kernel_interface_ref_drv.h"
+#include "mali_osk_list.h"
+#ifdef CONFIG_ION_EXYNOS
+#include <linux/ion.h>
+#include "../../../../../gpu/ion/ion_priv.h"
+extern struct ion_device *ion_exynos;
+extern struct ion_client *ion_client_ump;
+#endif
+#ifdef CONFIG_DMA_SHARED_BUFFER
+#include <linux/dma-buf.h>
+#endif
+#endif
+
+/*
+ * IOCTL operation; Allocate UMP memory
+ */
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_allocate_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ err = _ump_ukk_allocate( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
+ return map_errcode(err);
+ }
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */
+ _ump_uk_release_s release_args;
+
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));
+
+ release_args.ctx = (void *) session_data;
+ release_args.secure_id = user_interaction.secure_id;
+
+ err = _ump_ukk_release( &release_args );
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
+ }
+
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+#ifdef CONFIG_ION_EXYNOS
+/*
+ * IOCTL operation; Import fd to UMP memory
+ */
+int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_ion_import_s user_interaction;
+ ump_dd_handle *ump_handle;
+ ump_dd_physical_block * blocks;
+ unsigned long num_blocks;
+ struct ion_handle *ion_hnd;
+ struct scatterlist *sg;
+ struct scatterlist *sg_ion;
+ unsigned long i = 0;
+
+ ump_session_memory_list_element * session_memory_element = NULL;
+ if (ion_client_ump==NULL)
+ ion_client_ump = ion_client_create(ion_exynos, -1, "ump");
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ /* translate fd to secure ID*/
+ ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd);
+ sg_ion = ion_map_dma(ion_client_ump,ion_hnd);
+
+ blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024);
+ sg = sg_ion;
+ do {
+ blocks[i].addr = sg_phys(sg);
+ blocks[i].size = sg_dma_len(sg);
+ i++;
+ if (i>=1024) {
+ _mali_osk_free(blocks);
+ MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+ sg = sg_next(sg);
+ } while(sg);
+
+ num_blocks = i;
+
+ /* Initialize the session_memory_element, and add it to the session object */
+ session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+
+ if (NULL == session_memory_element)
+ {
+ _mali_osk_free(blocks);
+ DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
+ if (UMP_DD_HANDLE_INVALID == ump_handle)
+ {
+ _mali_osk_free(session_memory_element);
+ _mali_osk_free(blocks);
+ DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
+ return -EFAULT;
+ }
+
+ session_memory_element->mem = (ump_dd_mem*)ump_handle;
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ ion_unmap_dma(ion_client_ump,ion_hnd);
+ ion_free(ion_client_ump, ion_hnd);
+
+ _mali_osk_free(blocks);
+
+ user_interaction.secure_id = ump_dd_secure_id_get(ump_handle);
+ user_interaction.size = ump_dd_size_get(ump_handle);
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ /* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */
+
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));
+
+ return -EFAULT;
+ }
+ return 0; /* success */
+}
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+int ump_dmabuf_import_wrapper(u32 __user *argument,
+ struct ump_session_data *session_data)
+{
+ ump_session_memory_list_element *session = NULL;
+ struct ump_uk_dmabuf ump_dmabuf;
+ ump_dd_handle *ump_handle;
+ ump_dd_physical_block *blocks;
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dma_buf;
+ struct sg_table *sgt;
+ struct scatterlist *sgl;
+ unsigned long block_size;
+ /* FIXME */
+ struct device dev;
+ unsigned int i = 0, npages;
+ int ret;
+
+ /* Sanity check input parameters */
+ if (!argument || !session_data) {
+ MSG_ERR(("NULL parameter.\n"));
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ump_dmabuf, argument,
+ sizeof(struct ump_uk_dmabuf))) {
+ MSG_ERR(("copy_from_user() failed.\n"));
+ return -EFAULT;
+ }
+
+ dma_buf = dma_buf_get(ump_dmabuf.fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ /*
+ * check whether dma_buf imported already exists or not.
+ *
+ * TODO
+ * if already imported then dma_buf_put() should be called
+ * and then just return dma_buf imported.
+ */
+
+ attach = dma_buf_attach(dma_buf, &dev);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto err_dma_buf_put;
+ }
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_dma_buf_detach;
+ }
+
+ npages = sgt->nents;
+
+ /* really need? */
+ ump_dmabuf.ctx = (void *)session_data;
+
+ block_size = sizeof(ump_dd_physical_block) * npages;
+
+ blocks = (ump_dd_physical_block *)_mali_osk_malloc(block_size);
+ sgl = sgt->sgl;
+
+ while (i < npages) {
+ blocks[i].addr = sg_phys(sgl);
+ blocks[i].size = sg_dma_len(sgl);
+ sgl = sg_next(sgl);
+ i++;
+ }
+
+ /*
+ * Initialize the session memory list element, and add it
+ * to the session object
+ */
+ session = _mali_osk_calloc(1, sizeof(*session));
+ if (!session) {
+ DBG_MSG(1, ("Failed to allocate session.\n"));
+ ret = -EFAULT;
+ goto err_free_block;
+ }
+
+ ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, i);
+ if (UMP_DD_HANDLE_INVALID == ump_handle) {
+ DBG_MSG(1, ("Failed to create ump handle.\n"));
+ ret = -EFAULT;
+ goto err_free_session;
+ }
+
+ session->mem = (ump_dd_mem *)ump_handle;
+
+ _mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_list_add(&(session->list),
+ &(session_data->list_head_session_memory_list));
+ _mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free(blocks);
+
+ ump_dmabuf.ump_handle = (uint32_t)ump_handle;
+ ump_dmabuf.size = ump_dd_size_get(ump_handle);
+
+ if (copy_to_user(argument, &ump_dmabuf,
+ sizeof(struct ump_uk_dmabuf))) {
+ MSG_ERR(("copy_to_user() failed.\n"));
+ ret = -EFAULT;
+ goto err_release_ump_handle;
+ }
+
+ return 0;
+
+err_release_ump_handle:
+ ump_dd_reference_release(ump_handle);
+err_free_session:
+ _mali_osk_free(session);
+err_free_block:
+ _mali_osk_free(blocks);
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+err_dma_buf_detach:
+ dma_buf_detach(dma_buf, attach);
+err_dma_buf_put:
+ dma_buf_put(dma_buf);
+ return ret;
+}
+#endif
diff --git a/drivers/media/video/samsung/ump/linux/ump_ukk_ref_wrappers.h b/drivers/media/video/samsung/ump/linux/ump_ukk_ref_wrappers.h
new file mode 100644
index 0000000..7bd4660
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_ukk_ref_wrappers.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls for the reference implementation
+ */
+
+#ifndef __UMP_UKK_REF_WRAPPERS_H__
+#define __UMP_UKK_REF_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+#ifdef CONFIG_ION_EXYNOS
+int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+#endif
+
+#ifdef CONFIG_DMA_SHARED_BUFFER
+int ump_dmabuf_import_wrapper(u32 __user *argument,
+ struct ump_session_data *session_data);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UKK_REF_WRAPPERS_H__ */
diff --git a/drivers/media/video/samsung/ump/linux/ump_ukk_wrappers.c b/drivers/media/video/samsung/ump/linux/ump_ukk_wrappers.c
new file mode 100644
index 0000000..8b73ca8
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_ukk_wrappers.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.c
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#include <asm/uaccess.h> /* user space access */
+
+#include "ump_osk.h"
+#include "ump_uk_types.h"
+#include "ump_ukk.h"
+#include "ump_kernel_common.h"
+
+/*
+ * IOCTL operation; Negotiate version of IOCTL API
+ */
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_api_version_s version_info;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_get_api_version()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&version_info, argument, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ version_info.ctx = (void*) session_data;
+ err = _ump_uku_get_api_version( &version_info );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
+ return map_errcode(err);
+ }
+
+ version_info.ctx = NULL;
+
+ /* Copy ouput data back to user space */
+ if (0 != copy_to_user(argument, &version_info, sizeof(version_info)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+
+/*
+ * IOCTL operation; Release reference to specified UMP memory.
+ */
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_release_s release_args;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_release()\n"));
+ return -ENOTTY;
+ }
+
+ /* Copy the user space memory to kernel space (so we safely can read it) */
+ if (0 != copy_from_user(&release_args, argument, sizeof(release_args)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_get_api_version()\n"));
+ return -EFAULT;
+ }
+
+ release_args.ctx = (void*) session_data;
+ err = _ump_ukk_release( &release_args );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
+ return map_errcode(err);
+ }
+
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_size_get_s user_interaction;
+ _mali_osk_errcode_t err;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+ err = _ump_ukk_size_get( &user_interaction );
+ if( _MALI_OSK_ERR_OK != err )
+ {
+ MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
+ return map_errcode(err);
+ }
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_size_get()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
+
+/*
+ * IOCTL operation; Return size for specified UMP memory.
+ */
+ int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+{
+ _ump_uk_msync_s user_interaction;
+
+ /* Sanity check input parameters */
+ if (NULL == argument || NULL == session_data)
+ {
+ MSG_ERR(("NULL parameter in ump_ioctl_size_get()\n"));
+ return -ENOTTY;
+ }
+
+ if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_from_user() in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ user_interaction.ctx = (void *) session_data;
+
+ _ump_ukk_msync( &user_interaction );
+
+ user_interaction.ctx = NULL;
+
+ if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
+ {
+ MSG_ERR(("copy_to_user() failed in ump_ioctl_msync()\n"));
+ return -EFAULT;
+ }
+
+ return 0; /* success */
+}
diff --git a/drivers/media/video/samsung/ump/linux/ump_ukk_wrappers.h b/drivers/media/video/samsung/ump/linux/ump_ukk_wrappers.h
new file mode 100644
index 0000000..4892c31
--- /dev/null
+++ b/drivers/media/video/samsung/ump/linux/ump_ukk_wrappers.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_ukk_wrappers.h
+ * Defines the wrapper functions which turn Linux IOCTL calls into _ukk_ calls
+ */
+
+#ifndef __UMP_UKK_WRAPPERS_H__
+#define __UMP_UKK_WRAPPERS_H__
+
+#include <linux/kernel.h>
+#include "ump_kernel_common.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+
+
+int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+#endif /* __UMP_UKK_WRAPPERS_H__ */