diff options
Diffstat (limited to 'drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c')
-rw-r--r-- | drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c | 213 |
1 files changed, 123 insertions, 90 deletions
diff --git a/drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c b/drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c index 17af2bd..b38c714 100644 --- a/drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c +++ b/drivers/media/video/samsung/ump/linux/ump_osk_low_level_mem.c @@ -1,9 +1,9 @@ /* - * Copyright (C) 2010 ARM Limited. All rights reserved. - * + * Copyright (C) 2010-2012 ARM Limited. All rights reserved. + * * This program is free software and is provided to you under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence. - * + * * A copy of the licence is included with the program, and can also be obtained from Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <asm/memory.h> +#include <asm/uaccess.h> /* to verify pointers from user space */ #include <asm/cacheflush.h> #include <linux/dma-mapping.h> @@ -301,108 +302,140 @@ static void _ump_osk_msync_with_virt(ump_dd_mem * mem, ump_uk_msync_op op, u32 s return; } -void _ump_osk_msync( ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size) +static void level1_cache_flush_all(void) +{ + DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n")); + __cpuc_flush_kern_all(); +} + +void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data ) { int i; - u32 start_p, end_p; - ump_dd_physical_block *block; + const void *start_v, *end_v; - DBG_MSG(3, - ("Flushing nr of blocks: %u. First: paddr: 0x%08x vaddr: 0x%08x size:%dB\n", - mem->nr_blocks, mem->block_array[0].addr, - phys_to_virt(mem->block_array[0].addr), - mem->block_array[0].size)); - -#ifndef USING_DMA_FLUSH - if (address) { - if ((address >= start) - && ((address + size) <= start + mem->size_bytes)) { - if (size >= SZ_64K) { - flush_all_cpu_caches(); - } else if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) - dmac_flush_range((void *)address, - (void *)(address + size - 1)); - else - dmac_map_area((void *)address, size, - DMA_TO_DEVICE); -#ifdef CONFIG_CACHE_L2X0 - if (size >= SZ_1M) - outer_clean_all(); + /* Flush L1 using virtual address, the entire range in one go. + * Only flush if user space process has a valid write mapping on given address. */ + if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) ) + { + start_v = (void *)virt; + end_v = (void *)(start_v + size - 1); + /* There is no dmac_clean_range, so the L1 is always flushed, + * also for UMP_MSYNC_CLEAN. */ + dmac_flush_range(start_v, end_v); + DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. Cpu address: %x-%x\n", mem->secure_id, start_v,end_v)); + } + else + { + if (session_data) + { + if (op == _UMP_UK_MSYNC_FLUSH_L1 ) + { + DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush)); + session_data->has_pending_level1_cache_flush = 0; + level1_cache_flush_all(); + return; + } else - _ump_osk_msync_with_virt(mem, op, start, address, size); -#endif - return; + { + if (session_data->cache_operations_ongoing) + { + session_data->has_pending_level1_cache_flush++; + DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) ); + } + else + { + /* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */ + level1_cache_flush_all(); + } + } + } + else + { + DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__)); + level1_cache_flush_all(); } } - if ((op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)) { - if ((mem->size_bytes >= SZ_1M)) { - flush_all_cpu_caches(); -#ifdef CONFIG_CACHE_L2X0 - outer_flush_all(); -#endif - return; - } else if ((mem->size_bytes >= SZ_64K)) { - flush_all_cpu_caches(); -#ifdef CONFIG_CACHE_L2X0 - for (i = 0; i < mem->nr_blocks; i++) { - block = &mem->block_array[i]; - start_p = (u32) block->addr; - end_p = start_p + block->size - 1; - outer_flush_range(start_p, end_p); - } -#endif - return; + if ( NULL == mem ) return; + + if ( mem->size_bytes==size) + { + DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id)); + } + else + { + DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n", + mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr)); + } + + + /* Flush L2 using physical addresses, block for block. */ + for (i=0 ; i < mem->nr_blocks; i++) + { + u32 start_p, end_p; + ump_dd_physical_block *block; + block = &mem->block_array[i]; + + if(offset >= block->size) + { + offset -= block->size; + continue; } - } else { - if ((mem->size_bytes >= SZ_1M)) { - flush_all_cpu_caches(); -#ifdef CONFIG_CACHE_L2X0 - outer_clean_all(); -#endif - return; - } else if ((mem->size_bytes >= SZ_64K)) { - flush_all_cpu_caches(); -#ifdef CONFIG_CACHE_L2X0 - for (i = 0; i < mem->nr_blocks; i++) { - block = &mem->block_array[i]; - start_p = (u32) block->addr; + + if(offset) + { + start_p = (u32)block->addr + offset; + /* We'll zero the offset later, after using it to calculate end_p. */ + } + else + { + start_p = (u32)block->addr; + } + + if(size < block->size - offset) + { + end_p = start_p + size - 1; + size = 0; + } + else + { + if(offset) + { + end_p = start_p + (block->size - offset - 1); + size -= block->size - offset; + offset = 0; + } + else + { end_p = start_p + block->size - 1; - outer_clean_range(start_p, end_p); + size -= block->size; } -#endif - return; } - } -#endif - for (i = 0; i < mem->nr_blocks; i++) { - /* TODO: Find out which flush method is best of 1)Dma OR 2)Normal flush functions */ - /*#define USING_DMA_FLUSH */ -#ifdef USING_DMA_FLUSH - DEBUG_ASSERT((PAGE_SIZE == mem->block_array[i].size)); - dma_map_page(NULL, - pfn_to_page(mem->block_array[i]. - addr >> PAGE_SHIFT), 0, PAGE_SIZE, - DMA_BIDIRECTIONAL); - /*dma_unmap_page(NULL, mem->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL); */ -#else - block = &mem->block_array[i]; - start_p = (u32) block->addr; - end_p = start_p + block->size - 1; - if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) { - dmac_flush_range(phys_to_virt(start_p), - phys_to_virt(end_p)); - outer_flush_range(start_p, end_p); - } else { - dmac_map_area(phys_to_virt(start_p), block->size, - DMA_TO_DEVICE); - outer_clean_range(start_p, end_p); + switch(op) + { + case _UMP_UK_MSYNC_CLEAN: + outer_clean_range(start_p, end_p); + break; + case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE: + outer_flush_range(start_p, end_p); + break; + case _UMP_UK_MSYNC_INVALIDATE: + outer_inv_range(start_p, end_p); + break; + default: + break; + } + + if(0 == size) + { + /* Nothing left to flush. */ + break; } -#endif } -} + return; +} void _ump_osk_mem_mapregion_get( ump_dd_mem ** mem, unsigned long vaddr) { |