aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/samsung/mali/common
diff options
context:
space:
mode:
authorDorian Snyder <dastin1015@gmail.com>2014-06-15 01:24:33 -0700
committerDorian Snyder <dastin1015@gmail.com>2014-08-04 18:46:06 +0000
commitd90b43b963027b4d1559cec14e97117827a6458d (patch)
tree00d1334c383628e7d6f38a4f7ca0c575a4916d91 /drivers/media/video/samsung/mali/common
parent89e6992285b4274996b39cd953c0adb5e95a3236 (diff)
downloadkernel_samsung_smdk4412-d90b43b963027b4d1559cec14e97117827a6458d.zip
kernel_samsung_smdk4412-d90b43b963027b4d1559cec14e97117827a6458d.tar.gz
kernel_samsung_smdk4412-d90b43b963027b4d1559cec14e97117827a6458d.tar.bz2
mali: add r3p1 for devices that need it
d710 needs to use old mali drivers due to new ones causing signal to be completely killed Change-Id: I450c356b50e3f3521a63717a1c241e3b818b936f
Diffstat (limited to 'drivers/media/video/samsung/mali/common')
-rw-r--r--drivers/media/video/samsung/mali/common/mali_block_allocator.c391
-rw-r--r--drivers/media/video/samsung/mali/common/mali_block_allocator.h18
-rw-r--r--drivers/media/video/samsung/mali/common/mali_cluster.c218
-rw-r--r--drivers/media/video/samsung/mali/common/mali_cluster.h44
-rw-r--r--drivers/media/video/samsung/mali/common/mali_device_pause_resume.c46
-rw-r--r--drivers/media/video/samsung/mali/common/mali_device_pause_resume.h31
-rw-r--r--drivers/media/video/samsung/mali/common/mali_dlbu.c285
-rw-r--r--drivers/media/video/samsung/mali/common/mali_dlbu.h45
-rw-r--r--drivers/media/video/samsung/mali/common/mali_gp.c746
-rw-r--r--drivers/media/video/samsung/mali/common/mali_gp.h46
-rw-r--r--drivers/media/video/samsung/mali/common/mali_gp_job.c49
-rw-r--r--drivers/media/video/samsung/mali/common/mali_gp_job.h131
-rw-r--r--drivers/media/video/samsung/mali/common/mali_gp_scheduler.c443
-rw-r--r--drivers/media/video/samsung/mali/common/mali_gp_scheduler.h30
-rw-r--r--drivers/media/video/samsung/mali/common/mali_group.c841
-rw-r--r--drivers/media/video/samsung/mali/common/mali_group.h146
-rw-r--r--drivers/media/video/samsung/mali/common/mali_hw_core.c46
-rw-r--r--drivers/media/video/samsung/mali/common/mali_hw_core.h71
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_common.h183
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_core.c980
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_core.h39
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.c184
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.h101
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_mem_os.c354
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_mem_os.h37
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c376
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.h152
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_utilization.c218
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_utilization.h44
-rw-r--r--drivers/media/video/samsung/mali/common/mali_kernel_vsync.c51
-rw-r--r--drivers/media/video/samsung/mali/common/mali_l2_cache.c414
-rw-r--r--drivers/media/video/samsung/mali/common/mali_l2_cache.h43
-rw-r--r--drivers/media/video/samsung/mali/common/mali_mem_validation.c71
-rw-r--r--drivers/media/video/samsung/mali/common/mali_mem_validation.h19
-rw-r--r--drivers/media/video/samsung/mali/common/mali_memory.c1319
-rw-r--r--drivers/media/video/samsung/mali/common/mali_memory.h82
-rw-r--r--drivers/media/video/samsung/mali/common/mali_mmu.c619
-rw-r--r--drivers/media/video/samsung/mali/common/mali_mmu.h55
-rw-r--r--drivers/media/video/samsung/mali/common/mali_mmu_page_directory.c475
-rw-r--r--drivers/media/video/samsung/mali/common/mali_mmu_page_directory.h100
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk.h1798
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk_bitops.h166
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk_list.h184
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk_mali.h222
-rw-r--r--drivers/media/video/samsung/mali/common/mali_osk_profiling.h147
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pm.c552
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pm.h56
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pmu.c199
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pmu.h70
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pp.c710
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pp.h47
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pp_job.c95
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pp_job.h273
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pp_scheduler.c594
-rw-r--r--drivers/media/video/samsung/mali/common/mali_pp_scheduler.h38
-rw-r--r--drivers/media/video/samsung/mali/common/mali_scheduler.c37
-rw-r--r--drivers/media/video/samsung/mali/common/mali_scheduler.h21
-rw-r--r--drivers/media/video/samsung/mali/common/mali_session.c47
-rw-r--r--drivers/media/video/samsung/mali/common/mali_session.h65
-rw-r--r--drivers/media/video/samsung/mali/common/mali_ukk.h612
-rw-r--r--drivers/media/video/samsung/mali/common/mali_user_settings_db.c88
-rw-r--r--drivers/media/video/samsung/mali/common/mali_user_settings_db.h40
62 files changed, 15604 insertions, 0 deletions
diff --git a/drivers/media/video/samsung/mali/common/mali_block_allocator.c b/drivers/media/video/samsung/mali/common/mali_block_allocator.c
new file mode 100644
index 0000000..269e662
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_block_allocator.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_osk.h"
+
+#define MALI_BLOCK_SIZE (256UL * 1024UL) /* 256 kB, remember to keep the ()s */
+
+typedef struct block_info
+{
+ struct block_info * next;
+} block_info;
+
+/* The structure used as the handle produced by block_allocator_allocate,
+ * and removed by block_allocator_release */
+typedef struct block_allocator_allocation
+{
+ /* The list will be released in reverse order */
+ block_info *last_allocated;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 start_offset;
+ u32 mapping_length;
+} block_allocator_allocation;
+
+
+typedef struct block_allocator
+{
+ _mali_osk_lock_t *mutex;
+ block_info * all_blocks;
+ block_info * first_free;
+ u32 base;
+ u32 cpu_usage_adjust;
+ u32 num_blocks;
+} block_allocator;
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block);
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static void block_allocator_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block );
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ block_allocator * info;
+ u32 usable_size;
+ u32 num_blocks;
+
+ usable_size = size & ~(MALI_BLOCK_SIZE - 1);
+ MALI_DEBUG_PRINT(3, ("Mali block allocator create for region starting at 0x%08X length 0x%08X\n", base_address, size));
+ MALI_DEBUG_PRINT(4, ("%d usable bytes\n", usable_size));
+ num_blocks = usable_size / MALI_BLOCK_SIZE;
+ MALI_DEBUG_PRINT(4, ("which becomes %d blocks\n", num_blocks));
+
+ if (usable_size == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory block of size %d is unusable\n", size));
+ return NULL;
+ }
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(block_allocator));
+ if (NULL != info)
+ {
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO);
+ if (NULL != info->mutex)
+ {
+ info->all_blocks = _mali_osk_malloc(sizeof(block_info) * num_blocks);
+ if (NULL != info->all_blocks)
+ {
+ u32 i;
+ info->first_free = NULL;
+ info->num_blocks = num_blocks;
+
+ info->base = base_address;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ for ( i = 0; i < num_blocks; i++)
+ {
+ info->all_blocks[i].next = info->first_free;
+ info->first_free = &info->all_blocks[i];
+ }
+
+ allocator->allocate = block_allocator_allocate;
+ allocator->allocate_page_table_block = block_allocator_allocate_page_table_block;
+ allocator->destroy = block_allocator_destroy;
+ allocator->stat = block_allocator_stat;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_lock_term(info->mutex);
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static void block_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (block_allocator*)allocator->ctx;
+
+ _mali_osk_free(info->all_blocks);
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+MALI_STATIC_INLINE u32 get_phys(block_allocator * info, block_info * block)
+{
+ return info->base + ((block - info->all_blocks) * MALI_BLOCK_SIZE);
+}
+
+static mali_physical_memory_allocation_result block_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ block_allocator * info;
+ u32 left;
+ block_info * last_allocated = NULL;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ block_allocator_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (block_allocator*)ctx;
+ left = descriptor->size - *offset;
+ MALI_DEBUG_ASSERT(0 != left);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ret_allocation = _mali_osk_malloc( sizeof(block_allocator_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ /* Failure; try another allocator by returning MALI_MEM_ALLOC_NONE */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return result;
+ }
+
+ ret_allocation->start_offset = *offset;
+ ret_allocation->mapping_length = 0;
+
+ while ((left > 0) && (info->first_free))
+ {
+ block_info * block;
+ u32 phys_addr;
+ u32 padding;
+ u32 current_mapping_size;
+
+ block = info->first_free;
+ info->first_free = info->first_free->next;
+ block->next = last_allocated;
+ last_allocated = block;
+
+ phys_addr = get_phys(info, block);
+
+ padding = *offset & (MALI_BLOCK_SIZE-1);
+
+ if (MALI_BLOCK_SIZE - padding < left)
+ {
+ current_mapping_size = MALI_BLOCK_SIZE - padding;
+ }
+ else
+ {
+ current_mapping_size = left;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, phys_addr + padding, info->cpu_usage_adjust, current_mapping_size))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->start_offset, ret_allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ /* release all memory back to the pool */
+ while (last_allocated)
+ {
+ /* This relinks every block we've just allocated back into the free-list */
+ block = last_allocated->next;
+ last_allocated->next = info->first_free;
+ info->first_free = last_allocated;
+ last_allocated = block;
+ }
+
+ break;
+ }
+
+ *offset += current_mapping_size;
+ left -= current_mapping_size;
+ ret_allocation->mapping_length += current_mapping_size;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ if (last_allocated)
+ {
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Record all the information about this allocation */
+ ret_allocation->last_allocated = last_allocated;
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+
+ alloc_info->ctx = info;
+ alloc_info->handle = ret_allocation;
+ alloc_info->release = block_allocator_release;
+ }
+ else
+ {
+ /* Free the allocation information - nothing to be passed back */
+ _mali_osk_free( ret_allocation );
+ }
+
+ return result;
+}
+
+static void block_allocator_release(void * ctx, void * handle)
+{
+ block_allocator * info;
+ block_info * block, * next;
+ block_allocator_allocation *allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (block_allocator*)ctx;
+ allocation = (block_allocator_allocation*)handle;
+ block = allocation->last_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* unmap */
+ mali_allocation_engine_unmap_physical(allocation->engine, allocation->descriptor, allocation->start_offset, allocation->mapping_length, (_mali_osk_mem_mapregion_flags_t)0);
+
+ while (block)
+ {
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ next = block->next;
+
+ /* relink into free-list */
+ block->next = info->first_free;
+ info->first_free = block;
+
+ /* advance the loop */
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free( allocation );
+}
+
+
+static mali_physical_memory_allocation_result block_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+ block_allocator * info;
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(block);
+ info = (block_allocator*)ctx;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ if (NULL != info->first_free)
+ {
+ void * virt;
+ u32 phys;
+ u32 size;
+ block_info * alloc;
+ alloc = info->first_free;
+
+ phys = get_phys(info, alloc); /* Does not modify info or alloc */
+ size = MALI_BLOCK_SIZE; /* Must be multiple of MALI_MMU_PAGE_SIZE */
+ virt = _mali_osk_mem_mapioregion( phys, size, "Mali block allocator page tables" );
+
+ /* Failure of _mali_osk_mem_mapioregion will result in MALI_MEM_ALLOC_INTERNAL_FAILURE,
+ * because it's unlikely another allocator will be able to map in. */
+
+ if ( NULL != virt )
+ {
+ block->ctx = info; /* same as incoming ctx */
+ block->handle = alloc;
+ block->phys_base = phys;
+ block->size = size;
+ block->release = block_allocator_release_page_table_block;
+ block->mapping = virt;
+
+ info->first_free = alloc->next;
+
+ alloc->next = NULL; /* Could potentially link many blocks together instead */
+
+ result = MALI_MEM_ALLOC_FINISHED;
+ }
+ }
+ else result = MALI_MEM_ALLOC_NONE;
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+
+static void block_allocator_release_page_table_block( mali_page_table_block *page_table_block )
+{
+ block_allocator * info;
+ block_info * block, * next;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (block_allocator*)page_table_block->ctx;
+ block = (block_info*)page_table_block->handle;
+
+ MALI_DEBUG_ASSERT_POINTER(info);
+ MALI_DEBUG_ASSERT_POINTER(block);
+
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ /* Unmap all the physical memory at once */
+ _mali_osk_mem_unmapioregion( page_table_block->phys_base, page_table_block->size, page_table_block->mapping );
+
+ /** @note This loop handles the case where more than one block_info was linked.
+ * Probably unnecessary for page table block releasing. */
+ while (block)
+ {
+ next = block->next;
+
+ MALI_DEBUG_ASSERT(!((block < info->all_blocks) || (block > (info->all_blocks + info->num_blocks))));
+
+ block->next = info->first_free;
+ info->first_free = block;
+
+ block = next;
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
+
+static u32 block_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+ block_allocator * info;
+ block_info *block;
+ u32 free_blocks = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+
+ info = (block_allocator*)allocator->ctx;
+ block = info->first_free;
+
+ while(block)
+ {
+ free_blocks++;
+ block = block->next;
+ }
+ return (info->num_blocks - free_blocks) * MALI_BLOCK_SIZE;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_block_allocator.h b/drivers/media/video/samsung/mali/common/mali_block_allocator.h
new file mode 100644
index 0000000..6c6f13e
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_block_allocator.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_BLOCK_ALLOCATOR_H__
+#define __MALI_BLOCK_ALLOCATOR_H__
+
+#include "mali_kernel_memory_engine.h"
+
+mali_physical_memory_allocator * mali_block_allocator_create(u32 base_address, u32 cpu_usage_adjust, u32 size, const char *name);
+
+#endif /* __MALI_BLOCK_ALLOCATOR_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_cluster.c b/drivers/media/video/samsung/mali/common/mali_cluster.c
new file mode 100644
index 0000000..f0fb2b6
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_cluster.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_cluster.h"
+#include "mali_osk.h"
+#include "mali_group.h"
+#include "mali_l2_cache.h"
+#include "mali_scheduler.h"
+
+static struct mali_cluster *mali_global_clusters[MALI_MAX_NUMBER_OF_CLUSTERS] = { NULL, NULL, NULL };
+static u32 mali_global_num_clusters = 0;
+
+/**
+ * The structure represents a render cluster
+ * A render cluster is defined by all the cores that share the same Mali L2 cache
+ */
+struct mali_cluster
+{
+ struct mali_l2_cache_core *l2;
+ u32 number_of_groups;
+ struct mali_group* groups[MALI_MAX_NUMBER_OF_GROUPS_PER_CLUSTER];
+ u32 last_invalidated_id;
+ mali_bool power_is_enabled;
+};
+
+struct mali_cluster *mali_cluster_create(struct mali_l2_cache_core *l2_cache)
+{
+ struct mali_cluster *cluster = NULL;
+
+ if (mali_global_num_clusters >= MALI_MAX_NUMBER_OF_CLUSTERS)
+ {
+ MALI_PRINT_ERROR(("Mali cluster: Too many cluster objects created\n"));
+ return NULL;
+ }
+
+ cluster = _mali_osk_malloc(sizeof(struct mali_cluster));
+ if (NULL != cluster)
+ {
+ _mali_osk_memset(cluster, 0, sizeof(struct mali_cluster));
+ cluster->l2 = l2_cache; /* This cluster now owns this L2 cache object */
+ cluster->last_invalidated_id = 0;
+ cluster->power_is_enabled = MALI_TRUE;
+
+ mali_global_clusters[mali_global_num_clusters] = cluster;
+ mali_global_num_clusters++;
+
+ return cluster;
+ }
+
+ return NULL;
+}
+
+void mali_cluster_power_is_enabled_set(struct mali_cluster * cluster, mali_bool power_is_enabled)
+{
+ cluster->power_is_enabled = power_is_enabled;
+}
+
+mali_bool mali_cluster_power_is_enabled_get(struct mali_cluster * cluster)
+{
+ return cluster->power_is_enabled;
+}
+
+
+void mali_cluster_add_group(struct mali_cluster *cluster, struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ if (cluster->number_of_groups < MALI_MAX_NUMBER_OF_GROUPS_PER_CLUSTER)
+ {
+ /* This cluster now owns the group object */
+ cluster->groups[cluster->number_of_groups] = group;
+ cluster->number_of_groups++;
+ }
+}
+
+void mali_cluster_delete(struct mali_cluster *cluster)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ /* Free all the resources we own */
+ for (i = 0; i < cluster->number_of_groups; i++)
+ {
+ mali_group_delete(cluster->groups[i]);
+ }
+
+ if (NULL != cluster->l2)
+ {
+ mali_l2_cache_delete(cluster->l2);
+ }
+
+ for (i = 0; i < mali_global_num_clusters; i++)
+ {
+ if (mali_global_clusters[i] == cluster)
+ {
+ mali_global_clusters[i] = NULL;
+ mali_global_num_clusters--;
+ break;
+ }
+ }
+
+ _mali_osk_free(cluster);
+}
+
+void mali_cluster_reset(struct mali_cluster *cluster)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ /* Free all the resources we own */
+ for (i = 0; i < cluster->number_of_groups; i++)
+ {
+ struct mali_group *group = cluster->groups[i];
+
+ mali_group_reset(group);
+ }
+
+ if (NULL != cluster->l2)
+ {
+ mali_l2_cache_reset(cluster->l2);
+ }
+}
+
+struct mali_l2_cache_core* mali_cluster_get_l2_cache_core(struct mali_cluster *cluster)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+ return cluster->l2;
+}
+
+struct mali_group *mali_cluster_get_group(struct mali_cluster *cluster, u32 index)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ if (index < cluster->number_of_groups)
+ {
+ return cluster->groups[index];
+ }
+
+ return NULL;
+}
+
+struct mali_cluster *mali_cluster_get_global_cluster(u32 index)
+{
+ if (MALI_MAX_NUMBER_OF_CLUSTERS > index)
+ {
+ return mali_global_clusters[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_cluster_get_glob_num_clusters(void)
+{
+ return mali_global_num_clusters;
+}
+
+mali_bool mali_cluster_l2_cache_invalidate_all(struct mali_cluster *cluster, u32 id)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ if (NULL != cluster->l2)
+ {
+ /* If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started. */
+ if (((s32)id) <= ((s32)cluster->last_invalidated_id))
+ {
+ return MALI_FALSE;
+ }
+ else
+ {
+ cluster->last_invalidated_id = mali_scheduler_get_new_id();
+ }
+
+ mali_l2_cache_invalidate_all(cluster->l2);
+ }
+ return MALI_TRUE;
+}
+
+void mali_cluster_l2_cache_invalidate_all_force(struct mali_cluster *cluster)
+{
+ MALI_DEBUG_ASSERT_POINTER(cluster);
+
+ if (NULL != cluster->l2)
+ {
+ cluster->last_invalidated_id = mali_scheduler_get_new_id();
+ mali_l2_cache_invalidate_all(cluster->l2);
+ }
+}
+
+void mali_cluster_invalidate_pages(u32 *pages, u32 num_pages)
+{
+ u32 i;
+
+ for (i = 0; i < mali_global_num_clusters; i++)
+ {
+ /*additional check for cluster*/
+ if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_clusters[i]->l2))
+ {
+ mali_l2_cache_invalidate_pages(mali_global_clusters[i]->l2, pages, num_pages);
+ }
+ mali_l2_cache_unlock_power_state(mali_global_clusters[i]->l2);
+ /*check for failed power locking???*/
+ }
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_cluster.h b/drivers/media/video/samsung/mali/common/mali_cluster.h
new file mode 100644
index 0000000..33debdb
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_cluster.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_CLUSTER_H__
+#define __MALI_CLUSTER_H__
+
+#include "mali_osk.h"
+#include "mali_l2_cache.h"
+
+/* Maximum 1 GP and 4 PP for a cluster (Mali-400 Quad-core) */
+#define MALI_MAX_NUMBER_OF_GROUPS_PER_CLUSTER 5
+#define MALI_MAX_NUMBER_OF_CLUSTERS 3
+
+struct mali_cluster;
+struct mali_group;
+
+struct mali_cluster *mali_cluster_create(struct mali_l2_cache_core *l2_cache);
+void mali_cluster_add_group(struct mali_cluster *cluster, struct mali_group *group);
+void mali_cluster_delete(struct mali_cluster *cluster);
+
+void mali_cluster_power_is_enabled_set(struct mali_cluster * cluster, mali_bool power_is_enabled);
+mali_bool mali_cluster_power_is_enabled_get(struct mali_cluster * cluster);
+
+void mali_cluster_reset(struct mali_cluster *cluster);
+
+struct mali_l2_cache_core* mali_cluster_get_l2_cache_core(struct mali_cluster *cluster);
+struct mali_group *mali_cluster_get_group(struct mali_cluster *cluster, u32 index);
+
+struct mali_cluster *mali_cluster_get_global_cluster(u32 index);
+u32 mali_cluster_get_glob_num_clusters(void);
+
+/* Returns MALI_TRUE if it did the flush */
+mali_bool mali_cluster_l2_cache_invalidate_all(struct mali_cluster *cluster, u32 id);
+void mali_cluster_l2_cache_invalidate_all_force(struct mali_cluster *cluster);
+void mali_cluster_invalidate_pages(u32 *pages, u32 num_pages);
+
+#endif /* __MALI_CLUSTER_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_device_pause_resume.c b/drivers/media/video/samsung/mali/common/mali_device_pause_resume.c
new file mode 100644
index 0000000..6af1279
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_device_pause_resume.c
@@ -0,0 +1,46 @@
+/**
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_device_pause_resume.c
+ * Implementation of the Mali pause/resume functionality
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_pm.h"
+
+void mali_dev_pause(mali_bool *power_is_on)
+{
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+
+ /*
+ * Take and hold the PM lock to be sure we don't change power state as well.
+ * (it might be unsafe to for instance change frequency if Mali GPU is powered off)
+ */
+ mali_pm_execute_state_change_lock();
+ if (NULL != power_is_on)
+ {
+ *power_is_on = mali_pm_is_powered_on();
+ }
+}
+
+void mali_dev_resume(void)
+{
+ mali_pm_execute_state_change_unlock();
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+}
+
+/*
+EXPORT_SYMBOL(mali_dev_pause);
+EXPORT_SYMBOL(mali_dev_resume);
+*/
diff --git a/drivers/media/video/samsung/mali/common/mali_device_pause_resume.h b/drivers/media/video/samsung/mali/common/mali_device_pause_resume.h
new file mode 100644
index 0000000..6be75b0
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_device_pause_resume.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DEVICE_PAUSE_RESUME_H__
+#define __MALI_DEVICE_PAUSE_RESUME_H__
+
+#include "mali_osk.h"
+
+/**
+ * Pause the scheduling and power state changes of Mali device driver.
+ * mali_dev_resume() must always be called as soon as possible after this function
+ * in order to resume normal operation of the Mali driver.
+ *
+ * @param power_is_on Receives the power current status of Mali GPU. MALI_TRUE if GPU is powered on
+ */
+void mali_dev_pause(mali_bool *power_is_on);
+
+/**
+ * Resume scheduling and allow power changes in Mali device driver.
+ * This must always be called after mali_dev_pause().
+ */
+void mali_dev_resume(void);
+
+#endif /* __MALI_DEVICE_PAUSE_RESUME_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_dlbu.c b/drivers/media/video/samsung/mali/common/mali_dlbu.c
new file mode 100644
index 0000000..fcc51fa
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_dlbu.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_dlbu.h"
+#include "mali_memory.h"
+#include "mali_pp.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_hw_core.h"
+
+/**
+ * Size of DLBU registers in bytes
+ */
+#define MALI_DLBU_SIZE 0x400
+
+u32 mali_dlbu_phys_addr = 0;
+static mali_io_address mali_dlbu_cpu_addr = 0;
+
+static u32 mali_dlbu_tile_position;
+
+/**
+ * DLBU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_dlbu_register {
+ MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR = 0x0000, /**< Master tile list physical base address;
+ 31:12 Physical address to the page used for the DLBU
+ 0 DLBU enable - set this bit to 1 enables the AXI bus
+ between PPs and L2s, setting to 0 disables the router and
+ no further transactions are sent to DLBU */
+ MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR = 0x0004, /**< Master tile list virtual base address;
+ 31:12 Virtual address to the page used for the DLBU */
+ MALI_DLBU_REGISTER_TLLIST_VBASEADDR = 0x0008, /**< Tile list virtual base address;
+ 31:12 Virtual address to the tile list. This address is used when
+ calculating the call address sent to PP.*/
+ MALI_DLBU_REGISTER_FB_DIM = 0x000C, /**< Framebuffer dimension;
+ 23:16 Number of tiles in Y direction-1
+ 7:0 Number of tiles in X direction-1 */
+ MALI_DLBU_REGISTER_TLLIST_CONF = 0x0010, /**< Tile list configuration;
+ 29:28 select the size of each allocated block: 0=128 bytes, 1=256, 2=512, 3=1024
+ 21:16 2^n number of tiles to be binned to one tile list in Y direction
+ 5:0 2^n number of tiles to be binned to one tile list in X direction */
+ MALI_DLBU_REGISTER_START_TILE_POS = 0x0014, /**< Start tile positions;
+ 31:24 start position in Y direction for group 1
+ 23:16 start position in X direction for group 1
+ 15:8 start position in Y direction for group 0
+ 7:0 start position in X direction for group 0 */
+ MALI_DLBU_REGISTER_PP_ENABLE_MASK = 0x0018, /**< PP enable mask;
+ 7 enable PP7 for load balancing
+ 6 enable PP6 for load balancing
+ 5 enable PP5 for load balancing
+ 4 enable PP4 for load balancing
+ 3 enable PP3 for load balancing
+ 2 enable PP2 for load balancing
+ 1 enable PP1 for load balancing
+ 0 enable PP0 for load balancing */
+} mali_dlbu_register;
+
+typedef enum
+{
+ PP0ENABLE = 0,
+ PP1ENABLE,
+ PP2ENABLE,
+ PP3ENABLE,
+ PP4ENABLE,
+ PP5ENABLE,
+ PP6ENABLE,
+ PP7ENABLE
+} mali_dlbu_pp_enable;
+
+struct mali_dlbu_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 pp_cores_mask; /**< This is a mask for the PP cores whose operation will be controlled by LBU
+ see MALI_DLBU_REGISTER_PP_ENABLE_MASK register */
+};
+
+_mali_osk_errcode_t mali_dlbu_initialize(void)
+{
+
+ MALI_DEBUG_PRINT(2, ("Dynamic Load Balancing Unit initializing\n"));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr))
+ {
+ MALI_SUCCESS;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
+
+ mali_mmu_release_table_page(mali_dlbu_phys_addr);
+}
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource)
+{
+ struct mali_dlbu_core *core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali DLBU: Creating Mali dynamic load balancing unit: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_dlbu_core));
+ if (NULL != core)
+ {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI_DLBU_SIZE))
+ {
+ if (_MALI_OSK_ERR_OK == mali_dlbu_reset(core))
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_VADDR, MALI_DLB_VIRT_ADDR);
+
+ return core;
+ }
+ MALI_PRINT_ERROR(("Failed to reset DLBU %s\n", core->hw_core.description));
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali DLBU: Failed to allocate memory for DLBU core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
+{
+ mali_dlbu_reset(dlbu);
+ mali_hw_core_delete(&dlbu->hw_core);
+ _mali_osk_free(dlbu);
+}
+
+void mali_dlbu_enable(struct mali_dlbu_core *dlbu)
+{
+ u32 wval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR);
+
+ wval |= 0x1;
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, wval);
+}
+
+void mali_dlbu_disable(struct mali_dlbu_core *dlbu)
+{
+ u32 wval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR);
+
+ wval |= (wval & 0xFFFFFFFE);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, wval);
+}
+
+_mali_osk_errcode_t mali_dlbu_enable_pp_core(struct mali_dlbu_core *dlbu, u32 pp_core_enable, u32 val)
+{
+ u32 wval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK);
+
+ if((pp_core_enable < mali_pp_get_glob_num_pp_cores()) && ((0 == val) || (1 == val))) /* check for valid input parameters */
+ {
+ if (val == 1)
+ {
+ val = (wval | (pp_core_enable <<= 0x1));
+ }
+ if (val == 0)
+ {
+ val = (wval & ~(pp_core_enable << 0x1));
+ }
+ wval |= val;
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, wval);
+ dlbu->pp_cores_mask = wval;
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_dlbu_enable_all_pp_cores(struct mali_dlbu_core *dlbu)
+{
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+}
+
+void mali_dlbu_disable_all_pp_cores(struct mali_dlbu_core *dlbu)
+{
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, 0x0);
+}
+
+void mali_dlbu_setup(struct mali_dlbu_core *dlbu, u8 fb_xdim, u8 fb_ydim, u8 xtilesize, u8 ytilesize, u8 blocksize, u8 xgr0, u8 ygr0, u8 xgr1, u8 ygr1)
+{
+ u32 wval = 0x0;
+
+ /* write the framebuffer dimensions */
+ wval = (16 << (u32)fb_ydim) | (u32)fb_xdim;
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_FB_DIM, wval);
+
+ /* write the tile list configuration */
+ wval = 0x0;
+ wval = (28 << (u32)blocksize) | (16 << (u32)ytilesize) | ((u32)xtilesize);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_CONF, wval);
+
+ /* write the start tile position */
+ wval = 0x0;
+ wval = (24 << (u32)ygr1 | (16 << (u32)xgr1) | 8 << (u32)ygr0) | (u32)xgr0;
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS, wval);
+}
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(dlbu);
+
+ MALI_DEBUG_PRINT(4, ("Mali DLBU: mali_dlbu_reset: %s\n", dlbu->hw_core.description));
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_MASTER_TLLIST_PHYS_ADDR, mali_dlbu_phys_addr);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, 0x00);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_FB_DIM, 0x00);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_CONF, 0x00);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS, 0x00);
+
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, dlbu->pp_cores_mask);
+
+ err = _MALI_OSK_ERR_OK;
+
+ return err;
+}
+
+_mali_osk_errcode_t mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ u32 wval, rval;
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+
+ /* find the core id and set the mask */
+
+ if (NULL != pp_core)
+ {
+ wval = mali_pp_core_get_id(pp_core);
+ rval = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK);
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_PP_ENABLE_MASK, (wval << 0x1) | rval);
+ err = _MALI_OSK_ERR_OK;
+ }
+
+ return err;
+}
+
+void mali_dlbu_set_tllist_base_address(struct mali_dlbu_core *dlbu, u32 val)
+{
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_TLLIST_VBASEADDR, val);
+}
+
+void mali_dlbu_pp_jobs_stop(struct mali_dlbu_core *dlbu)
+{
+ /* this function to implement (see documentation):
+ * 1) clear all bits in the enable register
+ * 2) wait until all PPs have finished - mali_pp_scheduler.c code - this done in interrupts call?
+ * 3) read the current tile position registers to get current tile positions -
+ * note that current tile position register is the same as start tile position - perhaps the name should be changed!!! */
+
+ /* 1) */
+ mali_dlbu_disable_all_pp_cores(dlbu);
+
+ /* 3) */
+ mali_dlbu_tile_position = mali_hw_core_register_read(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS);
+}
+
+void mali_dlbu_pp_jobs_restart(struct mali_dlbu_core *dlbu)
+{
+ /* this function to implement (see the document):
+ * 1) configure the dynamic load balancing unit as normal
+ * 2) set the current tile position registers as read when stopping the job
+ * 3) configure the PPs to start the job as normal - done by another part of the system - scheduler */
+
+ /* 1) */
+ mali_dlbu_reset(dlbu);
+ /* ++ setup the needed values - see this */
+
+ /* 2) */
+ mali_hw_core_register_write(&dlbu->hw_core, MALI_DLBU_REGISTER_START_TILE_POS, mali_dlbu_tile_position);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_dlbu.h b/drivers/media/video/samsung/mali/common/mali_dlbu.h
new file mode 100644
index 0000000..e3c3b9d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_dlbu.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DLBU_H__
+#define __MALI_DLBU_H__
+
+#include "mali_osk.h"
+#include "mali_group.h"
+
+#define MALI_DLB_VIRT_ADDR 0xFFF00000 /* master tile virtual address fixed at this value and mapped into every session */
+
+extern u32 mali_dlbu_phys_addr;
+
+struct mali_dlbu_core;
+
+_mali_osk_errcode_t mali_dlbu_initialize(void);
+void mali_dlbu_terminate(void);
+
+struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t * resource);
+void mali_dlbu_delete(struct mali_dlbu_core *dlbu);
+
+void mali_dlbu_enable(struct mali_dlbu_core *dlbu);
+void mali_dlbu_disable(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_enable_pp_core(struct mali_dlbu_core *dlbu, u32 pp_core_enable, u32 val);
+void mali_dlbu_enable_all_pp_cores(struct mali_dlbu_core *dlbu);
+void mali_dlbu_disable_all_pp_cores(struct mali_dlbu_core *dlbu);
+
+_mali_osk_errcode_t mali_dlbu_reset(struct mali_dlbu_core *dlbu);
+void mali_dlbu_setup(struct mali_dlbu_core *dlbu, u8 fb_xdim, u8 fb_ydim, u8 xtilesize, u8 ytilesize, u8 blocksize, u8 xgr0, u8 ygr0, u8 xgr1, u8 ygr1);
+
+_mali_osk_errcode_t mali_dlbu_add_group(struct mali_dlbu_core *dlbu, struct mali_group *group);
+void mali_dlbu_set_tllist_base_address(struct mali_dlbu_core *dlbu, u32 val);
+
+void mali_dlbu_pp_jobs_stop(struct mali_dlbu_core *dlbu);
+void mali_dlbu_pp_jobs_restart(struct mali_dlbu_core *dlbu);
+
+#endif /* __MALI_DLBU_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_gp.c b/drivers/media/video/samsung/mali/common/mali_gp.c
new file mode 100644
index 0000000..1624e46
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_gp.c
@@ -0,0 +1,746 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_gp_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_osk_profiling.h"
+#endif
+
+/**
+ * Definition of the GP core struct
+ * Used to track a GP core in the system.
+ */
+struct mali_gp_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ struct mali_group *group; /**< Parent group for this core */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ struct mali_gp_job *running_job; /**< Current running job */
+ _mali_osk_timer_t *timeout_timer; /**< timeout timer for this core */
+ u32 timeout_job_id; /**< job id for the timed out job - relevant only if gp_core_timed_out == MALI_TRUE */
+ mali_bool core_timed_out; /**< if MALI_TRUE, this gp core has timed out; if MALI_FALSE, no timeout on this gp core */
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src0_used; /**< The selected performance counter 0 when a job is running */
+ u32 counter_src1_used; /**< The selected performance counter 1 when a job is running */
+};
+
+static struct mali_gp_core *mali_global_gp_core = NULL;
+
+/* Interrupt handlers */
+static _mali_osk_errcode_t mali_gp_upper_half(void *data);
+static void mali_gp_bottom_half(void *data);
+static void mali_gp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data);
+static void mali_gp_post_process_job(struct mali_gp_core *core, mali_bool suspend);
+static void mali_gp_timeout(void *data);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group)
+{
+ struct mali_gp_core* core = NULL;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_gp_core);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Creating Mali GP core: %s\n", resource->description));
+
+ core = _mali_osk_malloc(sizeof(struct mali_gp_core));
+ if (NULL != core)
+ {
+ core->group = group;
+ core->running_job = NULL;
+ core->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src0_used = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1_used = MALI_HW_CORE_NO_COUNTER;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALIGP2_REGISTER_ADDRESS_SPACE_SIZE))
+ {
+ _mali_osk_errcode_t ret;
+
+ mali_group_lock(group);
+ ret = mali_gp_reset(core);
+ mali_group_unlock(group);
+
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_gp_upper_half,
+ mali_gp_bottom_half,
+ mali_gp_irq_probe_trigger,
+ mali_gp_irq_probe_ack,
+ core,
+ "mali_gp_irq_handlers");
+ if (NULL != core->irq)
+ {
+ /* Initialise the timeout timer */
+ core->timeout_timer = _mali_osk_timer_init();
+ if(NULL != core->timeout_timer)
+ {
+ _mali_osk_timer_setcallback(core->timeout_timer, mali_gp_timeout, (void *)core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: set global gp core from 0x%08X to 0x%08X\n", mali_global_gp_core, core));
+ mali_global_gp_core = core;
+
+ return core;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to setup timeout timer for GP core %s\n", core->hw_core.description));
+ /* Release IRQ handlers */
+ _mali_osk_irq_term(core->irq);
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to setup interrupt handlers for GP core %s\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to allocate memory for GP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_gp_delete(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_timer_term(core->timeout_timer);
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+ mali_global_gp_core = NULL;
+ _mali_osk_free(core);
+}
+
+void mali_gp_stop_bus(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core)
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ /* Send the stop bus command. */
+ mali_gp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_hard_reset(struct mali_gp_core *core)
+{
+ const int reset_finished_loop_count = 15;
+ const u32 reset_wait_target_register = MALIGP2_REG_ADDR_MGMT_WRITE_BOUND_LOW;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ const u32 reset_default_value = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Hard reset of core %s\n", core->hw_core.description));
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_gp_post_process_job(core, MALI_FALSE);
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_invalid_value);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_RESET);
+
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, reset_wait_target_register))
+ {
+ break;
+ }
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_PRINT_ERROR(("Mali GP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, reset_wait_target_register, reset_default_value); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+}
+
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core)
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Reset of core %s\n", core->hw_core.description));
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_gp_post_process_job(core, MALI_FALSE);
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+
+#if defined(USING_MALI200)
+
+ /* On Mali-200, stop the bus, then do a hard reset of the core */
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to stop bus for core %s, unable to recover\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* the bus was stopped OK, do the hard reset */
+ mali_gp_hard_reset(core);
+
+#elif defined(USING_MALI400)
+
+ /* Mali-300 and Mali-400 have a safe reset command which we use */
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALI400GP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALI400GP_REG_VAL_CMD_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400GP_REG_VAL_IRQ_RESET_COMPLETED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali GP: Failed to reset core %s, unable to recover\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+#error "no supported mali core defined"
+#endif
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job)
+{
+ u32 startcmd = 0;
+ u32 *frame_registers = mali_gp_job_get_frame_registers(job);
+ core->counter_src0_used = core->counter_src0;
+ core->counter_src1_used = core->counter_src1;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ if (mali_gp_job_has_vs_job(job))
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_VS;
+ }
+
+ if (mali_gp_job_has_plbu_job(job))
+ {
+ startcmd |= (u32) MALIGP2_REG_VAL_CMD_START_PLBU;
+ }
+
+ MALI_DEBUG_ASSERT(0 != startcmd);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VSCL_START_ADDR, frame_registers, MALIGP2_NUM_REGS_FRAME);
+
+#if PROFILING_PRINT_L2_HITRATE_ON_GP_FINISH
+ {
+ /* Read hits and Read misses*/
+ mali_l2_cache_core_set_counter_src0(mali_l2_cache_core_get_glob_l2_core(0), 20);
+ mali_l2_cache_core_set_counter_src1(mali_l2_cache_core_get_glob_l2_core(0), 21);
+ }
+#endif
+
+ /* This selects which performance counters we are reading */
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ /* global_config has enabled HW counters, this will override anything specified by user space */
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ }
+ else
+ {
+ /* Use HW counters from job object, if any */
+ u32 perf_counter_flag = mali_gp_job_get_perf_counter_flag(job);
+ if (0 != perf_counter_flag)
+ {
+ if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ core->counter_src0_used = mali_gp_job_get_perf_counter_src0(job);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ core->counter_src1_used = mali_gp_job_get_perf_counter_src1(job);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALIGP2_REG_VAL_PERF_CNT_ENABLE);
+ }
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Starting job (0x%08x) on core %s with command 0x%08X\n", job, core->hw_core.description, startcmd));
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core. */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, startcmd);
+
+ /* Barrier to make sure the previous register write is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* Setup the timeout timer value and save the job id for the job running on the gp core */
+
+ _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+ core->timeout_job_id = mali_gp_job_get_id(job);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ job->frame_builder_id, job->flush_id, 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), job->pid, job->tid, 0, 0, 0);
+#endif
+
+ core->running_job = job;
+}
+
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr)
+{
+ u32 irq_readout;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
+
+ if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM | MALIGP2_REG_VAL_IRQ_HANG));
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* re-enable interrupts */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR, start_addr);
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_END_ADDR, end_addr);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: Resuming job\n"));
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_CMD, MALIGP2_REG_VAL_CMD_UPDATE_PLBU_ALLOC);
+ _mali_osk_write_mem_barrier();
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+#endif
+ }
+ /*
+ * else: core has been reset between PLBU_OUT_OF_MEM interrupt and this new heap response.
+ * A timeout or a page fault on Mali-200 PP core can cause this behaviour.
+ */
+}
+
+void mali_gp_abort_job(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ if (_MALI_OSK_ERR_FAULT != mali_gp_reset(core))
+ {
+ _mali_osk_timer_del(core->timeout_timer);
+ }
+}
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_VERSION);
+}
+
+mali_bool mali_gp_core_set_counter_src0(struct mali_gp_core *core, u32 counter)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ core->counter_src0 = counter;
+ return MALI_TRUE;
+}
+
+mali_bool mali_gp_core_set_counter_src1(struct mali_gp_core *core, u32 counter)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ core->counter_src1 = counter;
+ return MALI_TRUE;
+}
+
+u32 mali_gp_core_get_counter_src0(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->counter_src0;
+}
+
+u32 mali_gp_core_get_counter_src1(struct mali_gp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->counter_src1;
+}
+
+struct mali_gp_core *mali_gp_get_global_gp_core(void)
+{
+ return mali_global_gp_core;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static _mali_osk_errcode_t mali_gp_upper_half(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout)
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+#endif
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_irq_schedulework(core->irq);
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_gp_bottom_half(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+ u32 irq_errors;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#if 0 /* Bottom half TLP logging is currently not supported */
+ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0);
+#endif
+#endif
+
+ mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */
+
+ if ( MALI_FALSE == mali_group_power_is_on(core->group) )
+ {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description));
+ mali_group_unlock(core->group);
+ return;
+ }
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+ MALI_DEBUG_PRINT(4, ("Mali GP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description));
+
+ if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST))
+ {
+ u32 core_status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE))
+ {
+ mali_gp_post_process_job(core, MALI_FALSE);
+ MALI_DEBUG_PRINT(4, ("Mali GP: Job completed, calling group handler\n"));
+ mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_COMPLETED); /* Will release group lock */
+ return;
+ }
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST|MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST|MALIGP2_REG_VAL_IRQ_HANG|MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
+ if (0 != irq_errors)
+ {
+ mali_gp_post_process_job(core, MALI_FALSE);
+ MALI_PRINT_ERROR(("Mali GP: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, core->hw_core.description));
+ mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_FAILED); /* Will release group lock */
+ return;
+ }
+ else if (MALI_TRUE == core->core_timed_out) /* SW timeout */
+ {
+ if (core->timeout_job_id == mali_gp_job_get_id(core->running_job))
+ {
+ mali_gp_post_process_job(core, MALI_FALSE);
+ MALI_DEBUG_PRINT(2, ("Mali GP: Job %d timed out\n", mali_gp_job_get_id(core->running_job)));
+ mali_group_bottom_half(core->group, GROUP_EVENT_GP_JOB_TIMED_OUT);
+ }
+ core->core_timed_out = MALI_FALSE;
+ return;
+ }
+ else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM)
+ {
+ /* GP wants more memory in order to continue.
+ *
+ * This must be handled prior to HANG because this actually can
+ * generate a HANG while waiting for more memory.
+ * And it must be handled before the completion interrupts,
+ * since the PLBU can run out of memory after VS is complete;
+ * in which case the OOM must be handled before to complete the
+ * PLBU work.
+ */
+ mali_gp_post_process_job(core, MALI_TRUE);
+ MALI_DEBUG_PRINT(3, ("Mali GP: PLBU needs more heap memory\n"));
+ mali_group_bottom_half(core->group, GROUP_EVENT_GP_OOM); /* Will release group lock */
+ return;
+ }
+ else if (irq_readout & MALIGP2_REG_VAL_IRQ_HANG)
+ {
+ /* Just ignore hang interrupts, the job timer will detect hanging jobs anyways */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_HANG);
+ }
+
+ /*
+ * The only way to get here is if we got a HANG interrupt, which we ignore, or only one of two needed END_CMD_LST interrupts.
+ * Re-enable interrupts and let core continue to run.
+ */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED);
+ mali_group_unlock(core->group);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#if 0 /* Bottom half TLP logging is currently not supported */
+ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid()+11000, 0, 0, 0);
+#endif
+#endif
+}
+
+static void mali_gp_irq_probe_trigger(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_USED); /* @@@@ This should not be needed */
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT, MALIGP2_REG_VAL_CMD_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_gp_irq_probe_ack(void *data)
+{
+ struct mali_gp_core *core = (struct mali_gp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ if (MALIGP2_REG_VAL_IRQ_FORCE_HANG & irq_readout)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_CLEAR, MALIGP2_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+/* ------ local helper functions below --------- */
+
+static void mali_gp_post_process_job(struct mali_gp_core *core, mali_bool suspend)
+{
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ if (NULL != core->running_job)
+ {
+ u32 val0 = 0;
+ u32 val1 = 0;
+#if MALI_TIMELINE_PROFILING_ENABLED
+ u32 event_id;
+#endif
+
+#if PROFILING_PRINT_L2_HITRATE_ON_GP_FINISH
+ {
+ u32 src0, value0, src1, value1, sum, per_thousand, per_thousand_now, diff0, diff1;
+ static u32 print_nr=0;
+ static u32 prev0=0;
+ static u32 prev1=0;
+ if ( !(++print_nr&511) )
+ {
+ mali_l2_cache_core_get_counter_values(mali_l2_cache_core_get_glob_l2_core(0), &src0, &value0, &src1, &value1);
+ MALI_DEBUG_ASSERT( src0==20 ); /* Read hits */
+ MALI_DEBUG_ASSERT( src1==21 ); /* Read misses */
+
+ sum = value0+value1;
+ if ( sum > 1000000 )
+ {
+ per_thousand = value0 / (sum/1000);
+ }
+ else
+ {
+ per_thousand = (value0*1000) / (sum);
+ }
+ diff0 = value0-prev0;
+ diff1 = value1-prev1;
+
+ sum = diff0 + diff1 ;
+ if ( sum > 1000000 )
+ {
+ per_thousand_now = diff0 / (sum/1000);
+ }
+ else
+ {
+ per_thousand_now = (diff0*1000) / (sum);
+ }
+
+ prev0=value0;
+ prev1=value1;
+ if (per_thousand_now<=1000)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali L2: Read hits/misses: %d/%d = %d thousand_parts total, since previous: %d\n", value0, value1, per_thousand, per_thousand_now));
+ }
+
+ }
+ }
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ if (mali_gp_job_get_perf_counter_flag(core->running_job) &&
+ _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE && mali_gp_job_get_perf_counter_src0(core->running_job) == core->counter_src0_used)
+ {
+ /* We retrieved the counter that user space asked for, so return the value through the job object */
+ mali_gp_job_set_perf_counter_value0(core->running_job, val0);
+ }
+ else
+ {
+ /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */
+ mali_gp_job_set_perf_counter_value0(core->running_job, MALI_HW_CORE_INVALID_VALUE);
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_C0, val0);
+#endif
+
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ if (mali_gp_job_get_perf_counter_flag(core->running_job) &&
+ _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE && mali_gp_job_get_perf_counter_src1(core->running_job) == core->counter_src1_used)
+ {
+ /* We retrieved the counter that user space asked for, so return the value through the job object */
+ mali_gp_job_set_perf_counter_value1(core->running_job, val1);
+ }
+ else
+ {
+ /* User space asked for a counter, but this is not what we retrieved (overridden by counter src set on core) */
+ mali_gp_job_set_perf_counter_value1(core->running_job, MALI_HW_CORE_INVALID_VALUE);
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_report_hw_counter(COUNTER_VP_C1, val1);
+#endif
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ if (MALI_TRUE == suspend)
+ {
+ event_id = MALI_PROFILING_EVENT_TYPE_SUSPEND|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0);
+ }
+ else
+ {
+ event_id = MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0);
+ }
+ _mali_osk_profiling_add_event(event_id, val0, val1, core->counter_src0_used | (core->counter_src1_used << 8), 0, 0);
+#endif
+
+ mali_gp_job_set_current_heap_addr(core->running_job,
+ mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_PLBU_ALLOC_START_ADDR));
+
+ if (MALI_TRUE != suspend)
+ {
+ /* We are no longer running a job... */
+ core->running_job = NULL;
+ _mali_osk_timer_del(core->timeout_timer);
+ }
+ }
+}
+
+/* callback function for gp core timeout */
+static void mali_gp_timeout(void *data)
+{
+ struct mali_gp_core * core = ((struct mali_gp_core *)data);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP: TIMEOUT callback \n"));
+ core->core_timed_out = MALI_TRUE;
+ _mali_osk_irq_schedulework(core->irq);
+}
+
+#if 0
+void mali_gp_print_state(struct mali_gp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali GP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) ));
+}
+#endif
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP: %s\n", core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_gp.h b/drivers/media/video/samsung/mali/common/mali_gp.h
new file mode 100644
index 0000000..3175b75
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_gp.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_H__
+#define __MALI_GP_H__
+
+#include "mali_osk.h"
+#include "mali_gp_job.h"
+
+struct mali_gp_core;
+struct mali_group;
+
+_mali_osk_errcode_t mali_gp_initialize(void);
+void mali_gp_terminate(void);
+
+struct mali_gp_core *mali_gp_create(const _mali_osk_resource_t * resource, struct mali_group *group);
+void mali_gp_delete(struct mali_gp_core *core);
+
+void mali_gp_stop_bus(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_stop_bus_wait(struct mali_gp_core *core);
+void mali_gp_hard_reset(struct mali_gp_core *core);
+_mali_osk_errcode_t mali_gp_reset(struct mali_gp_core *core);
+
+void mali_gp_job_start(struct mali_gp_core *core, struct mali_gp_job *job);
+void mali_gp_resume_with_new_heap(struct mali_gp_core *core, u32 start_addr, u32 end_addr);
+
+void mali_gp_abort_job(struct mali_gp_core *core);
+
+u32 mali_gp_core_get_version(struct mali_gp_core *core);
+
+mali_bool mali_gp_core_set_counter_src0(struct mali_gp_core *core, u32 counter);
+mali_bool mali_gp_core_set_counter_src1(struct mali_gp_core *core, u32 counter);
+u32 mali_gp_core_get_counter_src0(struct mali_gp_core *core);
+u32 mali_gp_core_get_counter_src1(struct mali_gp_core *core);
+struct mali_gp_core *mali_gp_get_global_gp_core(void);
+
+u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+
+#endif /* __MALI_GP_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_gp_job.c b/drivers/media/video/samsung/mali/common/mali_gp_job.c
new file mode 100644
index 0000000..abe1d93
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_gp_job.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *args, u32 id)
+{
+ struct mali_gp_job *job;
+
+ job = _mali_osk_malloc(sizeof(struct mali_gp_job));
+ if (NULL != job)
+ {
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->user_id = args->user_job_ptr;
+ _mali_osk_memcpy(job->frame_registers, args->frame_registers, sizeof(job->frame_registers));
+ job->heap_current_addr = args->frame_registers[4];
+ job->perf_counter_flag = args->perf_counter_flag;
+ job->perf_counter_src0 = args->perf_counter_src0;
+ job->perf_counter_src1 = args->perf_counter_src1;
+ job->perf_counter_value0 = 0;
+ job->perf_counter_value1 = 0;
+
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+ job->frame_builder_id = args->frame_builder_id;
+ job->flush_id = args->flush_id;
+
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_gp_job_delete(struct mali_gp_job *job)
+{
+ _mali_osk_free(job);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_gp_job.h b/drivers/media/video/samsung/mali/common/mali_gp_job.h
new file mode 100644
index 0000000..9c29f1c
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_gp_job.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_JOB_H__
+#define __MALI_GP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+
+/**
+ * The structure represends a GP job, including all sub-jobs
+ * (This struct unfortunatly needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_gp_job
+{
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 id; /**< identifier for this job in kernel space (sequential numbering) */
+ u32 user_id; /**< identifier for the job in user space */
+ u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< core specific registers associated with this job, see ARM DDI0415A */
+ u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
+ u32 perf_counter_flag; /**< bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
+ u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ u32 frame_builder_id; /**< id of the originating frame builder */
+ u32 flush_id; /**< flush id within the originating frame builder */
+};
+
+struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *args, u32 id);
+void mali_gp_job_delete(struct mali_gp_job *job);
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_user_id(struct mali_gp_job *job)
+{
+ return job->user_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
+{
+ return job->frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
+{
+ return job->flush_id;
+}
+
+MALI_STATIC_INLINE u32* mali_gp_job_get_frame_registers(struct mali_gp_job *job)
+{
+ return job->frame_registers;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
+{
+ return (job->frame_registers[0] != job->frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
+{
+ return (job->frame_registers[2] != job->frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
+{
+ return job->heap_current_addr;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
+{
+ job->heap_current_addr = heap_addr;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
+{
+ return job->perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
+{
+ return job->perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
+{
+ return job->perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
+{
+ return job->perf_counter_value0;
+}
+
+MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
+{
+ return job->perf_counter_value1;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value0 = value;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
+{
+ job->perf_counter_value1 = value;
+}
+
+#endif /* __MALI_GP_JOB_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_gp_scheduler.c b/drivers/media/video/samsung/mali/common/mali_gp_scheduler.c
new file mode 100644
index 0000000..f06d899
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_gp_scheduler.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_gp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_gp.h"
+#include "mali_gp_job.h"
+#include "mali_group.h"
+#include "mali_cluster.h"
+
+enum mali_gp_slot_state
+{
+ MALI_GP_SLOT_STATE_IDLE,
+ MALI_GP_SLOT_STATE_WORKING,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_gp_slot
+{
+ struct mali_group *group;
+ /*
+ * We keep track of the state here as well as in the group object
+ * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+ */
+ enum mali_gp_slot_state state;
+ u32 returned_cookie;
+};
+
+static u32 gp_version = 0;
+static _MALI_OSK_LIST_HEAD(job_queue); /* List of jobs with some unscheduled work */
+static struct mali_gp_slot slot;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *gp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static mali_bool mali_gp_scheduler_is_suspended(void);
+
+static _mali_osk_lock_t *gp_scheduler_lock = NULL;
+/* Contains tid of thread that locked the scheduler or 0, if not locked */
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void)
+{
+ u32 i;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue);
+
+ gp_scheduler_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == gp_scheduler_lock)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == gp_scheduler_working_wait_queue)
+ {
+ _mali_osk_lock_term(gp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Find all the available GP cores */
+ for (i = 0; i < mali_cluster_get_glob_num_clusters(); i++)
+ {
+ u32 group_id = 0;
+ struct mali_cluster *curr_cluster = mali_cluster_get_global_cluster(i);
+ struct mali_group *group = mali_cluster_get_group(curr_cluster, group_id);
+ while (NULL != group)
+ {
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ if (NULL != gp_core)
+ {
+ if (0 == gp_version)
+ {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+ slot.group = group;
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+ break; /* There are only one GP, no point in looking for more */
+ }
+ group_id++;
+ group = mali_cluster_get_group(curr_cluster, group_id);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_gp_scheduler_terminate(void)
+{
+ _mali_osk_wait_queue_term(gp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(gp_scheduler_lock);
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_lock(void)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: GP scheduler lock taken\n"));
+}
+
+MALI_STATIC_INLINE void mali_gp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali GP scheduler: Releasing GP scheduler lock\n"));
+ _mali_osk_lock_signal(gp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+MALI_STATIC_INLINE void mali_gp_scheduler_assert_locked(void)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(gp_scheduler_lock);
+}
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED() mali_gp_scheduler_assert_locked()
+#else
+#define MALI_ASSERT_GP_SCHEDULER_LOCKED()
+#endif
+
+static void mali_gp_scheduler_schedule(void)
+{
+ struct mali_gp_job *job;
+
+ MALI_ASSERT_GP_SCHEDULER_LOCKED();
+
+ if (0 < pause_count || MALI_GP_SLOT_STATE_IDLE != slot.state || _mali_osk_list_empty(&job_queue))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+ pause_count, MALI_GP_SLOT_STATE_IDLE == slot.state ? 1 : 0));
+ return; /* Nothing to do, so early out */
+ }
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_gp_job, list);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Starting job %u (0x%08X)\n", mali_gp_job_get_id(job), job));
+ if (_MALI_OSK_ERR_OK == mali_group_start_gp_job(slot.group, job))
+ {
+ /* Mark slot as busy */
+ slot.state = MALI_GP_SLOT_STATE_WORKING;
+
+ /* Remove from queue of unscheduled jobs */
+ _mali_osk_list_del(&job->list);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Failed to start GP job\n"));
+ }
+}
+
+static void mali_gp_scheduler_return_job_to_user(struct mali_gp_job *job, mali_bool success)
+{
+ _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_FINISHED, sizeof(_mali_uk_gp_job_finished_s));
+ if (NULL != notobj)
+ {
+ _mali_uk_gp_job_finished_s *jobres = notobj->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_gp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success)
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ }
+ else
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(mali_gp_job_get_session(job), notobj);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali GP scheduler: Unable to allocate notification object\n"));
+ }
+
+ mali_gp_job_delete(job);
+}
+
+
+void mali_gp_scheduler_do_schedule(void)
+{
+ mali_gp_scheduler_lock();
+
+ mali_gp_scheduler_schedule();
+
+ mali_gp_scheduler_unlock();
+}
+
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success)
+{
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) completed (%s)\n", mali_gp_job_get_id(job), job, success ? "success" : "failure"));
+
+ mali_gp_scheduler_lock();
+
+ /* Mark slot as idle again */
+ slot.state = MALI_GP_SLOT_STATE_IDLE;
+
+ /* If paused, then this was the last job, so wake up sleeping workers */
+ if (pause_count > 0)
+ {
+ _mali_osk_wait_queue_wake_up(gp_scheduler_working_wait_queue);
+ }
+ else
+ {
+ mali_gp_scheduler_schedule();
+ }
+
+ mali_gp_scheduler_unlock();
+
+ mali_gp_scheduler_return_job_to_user(job, success);
+}
+
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job)
+{
+ _mali_osk_notification_t *notobj;
+
+ notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_GP_STALLED, sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL != notobj)
+ {
+ _mali_uk_gp_job_suspended_s * jobres;
+
+ mali_gp_scheduler_lock();
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notobj->result_buffer;
+
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->reason = _MALIGP_JOB_SUSPENDED_OUT_OF_MEMORY;
+ jobres->cookie = mali_gp_job_get_id(job);
+ slot.returned_cookie = jobres->cookie;
+
+ mali_session_send_notification(mali_gp_job_get_session(job), notobj);
+
+ mali_gp_scheduler_unlock();
+ }
+
+ /*
+ * If this function failed, then we could return the job to user space right away,
+ * but there is a job timer anyway that will do that eventually.
+ * This is not exactly a common case anyway.
+ */
+}
+
+void mali_gp_scheduler_suspend(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_gp_scheduler_unlock();
+
+ _mali_osk_wait_queue_wait_event(gp_scheduler_working_wait_queue, mali_gp_scheduler_is_suspended);
+}
+
+void mali_gp_scheduler_resume(void)
+{
+ mali_gp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ if (0 == pause_count)
+ {
+ mali_gp_scheduler_schedule();
+ }
+ mali_gp_scheduler_unlock();
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(_mali_uk_gp_start_job_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (NULL == args->ctx)
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ session = (struct mali_session_data*)args->ctx;
+ if (NULL == session)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ job = mali_gp_job_create(session, args, mali_scheduler_get_new_id());
+ if (NULL == job)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+#if PROFILING_SKIP_PP_AND_GP_JOBS
+#warning GP jobs will not be executed
+ mali_gp_scheduler_return_job_to_user(job, MALI_TRUE);
+ return _MALI_OSK_ERR_OK;
+#endif
+
+ mali_gp_scheduler_lock();
+
+ _mali_osk_list_addtail(&job->list, &job_queue);
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n", mali_gp_job_get_id(job), job));
+
+ mali_gp_scheduler_schedule();
+
+ mali_gp_scheduler_unlock();
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_session_data *session;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ if (NULL == args->ctx)
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+
+ session = (struct mali_session_data*)args->ctx;
+ if (NULL == session)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_gp_scheduler_lock();
+
+ /* Make sure that the cookie returned by user space is the same as we provided in the first place */
+ if (args->cookie != slot.returned_cookie)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali GP scheduler: Got an illegal cookie from user space, expected %u but got %u (job id)\n", slot.returned_cookie, args->cookie)) ;
+ mali_gp_scheduler_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_gp_scheduler_unlock();
+
+ switch (args->code)
+ {
+ case _MALIGP_JOB_RESUME_WITH_NEW_HEAP:
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Resuming job %u with new heap; 0x%08X - 0x%08X\n", args->cookie, args->arguments[0], args->arguments[1]));
+ mali_group_resume_gp_with_new_heap(slot.group, args->cookie, args->arguments[0], args->arguments[1]);
+ ret = _MALI_OSK_ERR_OK;
+ break;
+
+ case _MALIGP_JOB_ABORT:
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting job %u, no new heap provided\n", args->cookie));
+ mali_group_abort_gp_job(slot.group, args->cookie);
+ ret = _MALI_OSK_ERR_OK;
+ break;
+
+ default:
+ MALI_PRINT_ERROR(("Mali GP scheduler: Wrong suspend response from user space\n"));
+ ret = _MALI_OSK_ERR_FAULT;
+ break;
+ }
+
+ return ret;
+
+}
+
+void mali_gp_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *job, *tmp;
+
+ mali_gp_scheduler_lock();
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Aborting all jobs from session 0x%08x\n", session));
+
+ /* Check queue for jobs and remove */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_gp_job, list)
+ {
+ if (mali_gp_job_get_session(job) == session)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Removing GP job 0x%08x from queue\n", job));
+ _mali_osk_list_del(&(job->list));
+ mali_gp_job_delete(job);
+ }
+ }
+
+ mali_gp_scheduler_unlock();
+
+ /* Abort running jobs from this session. It is safe to do this outside
+ * the scheduler lock as there is only one GP core, and the queue has
+ * already been emptied, as long as there are no new jobs coming in
+ * from user space. */
+ mali_group_abort_session(slot.group, session);
+}
+
+static mali_bool mali_gp_scheduler_is_suspended(void)
+{
+ mali_bool ret;
+
+ mali_gp_scheduler_lock();
+ ret = pause_count > 0 && slot.state == MALI_GP_SLOT_STATE_IDLE;
+ mali_gp_scheduler_unlock();
+
+ return ret;
+}
+
+
+#if MALI_STATE_TRACKING
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+
+ n += mali_group_dump_state(slot.group, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\t\tState: %d\n", mali_group_gp_state(slot.group));
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_gp_scheduler.h b/drivers/media/video/samsung/mali/common/mali_gp_scheduler.h
new file mode 100644
index 0000000..ef58509
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_gp_scheduler.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GP_SCHEDULER_H__
+#define __MALI_GP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_cluster.h"
+#include "mali_gp_job.h"
+
+_mali_osk_errcode_t mali_gp_scheduler_initialize(void);
+void mali_gp_scheduler_terminate(void);
+
+void mali_gp_scheduler_do_schedule(void);
+void mali_gp_scheduler_job_done(struct mali_group *group, struct mali_gp_job *job, mali_bool success);
+void mali_gp_scheduler_oom(struct mali_group *group, struct mali_gp_job *job);
+void mali_gp_scheduler_abort_session(struct mali_session_data *session);
+u32 mali_gp_scheduler_dump_state(char *buf, u32 size);
+
+void mali_gp_scheduler_suspend(void);
+void mali_gp_scheduler_resume(void);
+
+#endif /* __MALI_GP_SCHEDULER_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_group.c b/drivers/media/video/samsung/mali/common/mali_group.c
new file mode 100644
index 0000000..94bf774
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_group.c
@@ -0,0 +1,841 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "mali_cluster.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_mmu.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_pm.h"
+
+/*
+ * The group object is the most important object in the device driver,
+ * and acts as the center of many HW operations.
+ * The reason for this is that operations on the MMU will affect all
+ * cores connected to this MMU (a group is defined by the MMU and the
+ * cores which are connected to this).
+ * The group lock is thus the most important lock, followed by the
+ * GP and PP scheduler locks. They must be taken in the following
+ * order:
+ * GP/PP lock first, then group lock(s).
+ */
+
+/**
+ * The structure represents a render group
+ * A render group is defined by all the cores that share the same Mali MMU
+ */
+
+struct mali_group
+{
+ struct mali_cluster *cluster;
+
+ struct mali_mmu_core *mmu;
+ struct mali_session_data *session;
+ int page_dir_ref_count;
+ mali_bool power_is_on;
+#if defined(USING_MALI200)
+ mali_bool pagedir_activation_failed;
+#endif
+
+ struct mali_gp_core *gp_core;
+ enum mali_group_core_state gp_state;
+ struct mali_gp_job *gp_running_job;
+
+ struct mali_pp_core *pp_core;
+ enum mali_group_core_state pp_state;
+ struct mali_pp_job *pp_running_job;
+ u32 pp_running_sub_job;
+
+ _mali_osk_lock_t *lock;
+};
+
+static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS];
+static u32 mali_global_num_groups = 0;
+
+enum mali_group_activate_pd_status
+{
+ MALI_GROUP_ACTIVATE_PD_STATUS_FAILED,
+ MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD,
+ MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD,
+};
+
+/* local helper functions */
+static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
+static void mali_group_complete_jobs(struct mali_group *group, mali_bool complete_gp, mali_bool complete_pp, bool success);
+
+void mali_group_lock(struct mali_group *group)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(group->lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
+}
+
+void mali_group_unlock(struct mali_group *group)
+{
+ MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
+ _mali_osk_lock_signal(group->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+}
+#endif
+
+
+struct mali_group *mali_group_create(struct mali_cluster *cluster, struct mali_mmu_core *mmu)
+{
+ struct mali_group *group = NULL;
+
+ if (mali_global_num_groups >= MALI_MAX_NUMBER_OF_GROUPS)
+ {
+ MALI_PRINT_ERROR(("Mali group: Too many group objects created\n"));
+ return NULL;
+ }
+
+ group = _mali_osk_malloc(sizeof(struct mali_group));
+ if (NULL != group)
+ {
+ _mali_osk_memset(group, 0, sizeof(struct mali_group));
+ group->lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_GROUP);
+ if (NULL != group->lock)
+ {
+ group->cluster = cluster;
+ group->mmu = mmu; /* This group object now owns the MMU object */
+ group->session = NULL;
+ group->page_dir_ref_count = 0;
+ group->power_is_on = MALI_TRUE;
+
+ group->gp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->pp_state = MALI_GROUP_CORE_STATE_IDLE;
+#if defined(USING_MALI200)
+ group->pagedir_activation_failed = MALI_FALSE;
+#endif
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
+ }
+ _mali_osk_free(group);
+ }
+
+ return NULL;
+}
+
+void mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core)
+{
+ /* This group object now owns the GP core object */
+ group->gp_core = gp_core;
+}
+
+void mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core)
+{
+ /* This group object now owns the PP core object */
+ group->pp_core = pp_core;
+}
+
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core)
+ {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core)
+ {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu)
+ {
+ mali_mmu_delete(group->mmu);
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ if (mali_global_groups[i] == group)
+ {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+ break;
+ }
+ }
+
+ _mali_osk_lock_term(group->lock);
+
+ _mali_osk_free(group);
+}
+
+/* Called from mali_cluster_reset() when the system is re-turned on */
+void mali_group_reset(struct mali_group *group)
+{
+ mali_group_lock(group);
+
+ group->session = NULL;
+
+ if (NULL != group->mmu)
+ {
+ mali_mmu_reset(group->mmu);
+ }
+
+ if (NULL != group->gp_core)
+ {
+ mali_gp_reset(group->gp_core);
+ }
+
+ if (NULL != group->pp_core)
+ {
+ mali_pp_reset(group->pp_core);
+ }
+
+ mali_group_unlock(group);
+}
+
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group)
+{
+ return group->gp_core;
+}
+
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group)
+{
+ return group->pp_core;
+}
+
+_mali_osk_errcode_t mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+ enum mali_group_activate_pd_status activate_status;
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);
+
+ mali_pm_core_event(MALI_CORE_EVENT_GP_START);
+
+ session = mali_gp_job_get_session(job);
+
+ mali_group_lock(group);
+
+ mali_cluster_l2_cache_invalidate_all(group->cluster, mali_gp_job_get_id(job));
+
+ activate_status = mali_group_activate_page_directory(group, session);
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
+ {
+ /* if session is NOT kept Zapping is done as part of session switch */
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
+ {
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+ mali_gp_job_start(group->gp_core, job);
+ group->gp_running_job = job;
+ group->gp_state = MALI_GROUP_CORE_STATE_WORKING;
+
+ mali_group_unlock(group);
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+#if defined(USING_MALI200)
+ group->pagedir_activation_failed = MALI_TRUE;
+#endif
+
+ mali_group_unlock(group);
+
+ mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* Failed to start, so "cancel" the MALI_CORE_EVENT_GP_START */
+ return _MALI_OSK_ERR_FAULT;
+}
+
+_mali_osk_errcode_t mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+{
+ struct mali_session_data *session;
+ enum mali_group_activate_pd_status activate_status;
+
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);
+
+ mali_pm_core_event(MALI_CORE_EVENT_PP_START);
+
+ session = mali_pp_job_get_session(job);
+
+ mali_group_lock(group);
+
+ mali_cluster_l2_cache_invalidate_all(group->cluster, mali_pp_job_get_id(job));
+
+ activate_status = mali_group_activate_page_directory(group, session);
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_FAILED != activate_status)
+ {
+ /* if session is NOT kept Zapping is done as part of session switch */
+ if (MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD == activate_status)
+ {
+ MALI_DEBUG_PRINT(3, ("PP starting job PD_Switch 0 Flush 1 Zap 1\n"));
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+ }
+ mali_pp_job_start(group->pp_core, job, sub_job);
+ group->pp_running_job = job;
+ group->pp_running_sub_job = sub_job;
+ group->pp_state = MALI_GROUP_CORE_STATE_WORKING;
+
+ mali_group_unlock(group);
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+#if defined(USING_MALI200)
+ group->pagedir_activation_failed = MALI_TRUE;
+#endif
+
+ mali_group_unlock(group);
+
+ mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* Failed to start, so "cancel" the MALI_CORE_EVENT_PP_START */
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+{
+ mali_group_lock(group);
+
+ if (group->gp_state != MALI_GROUP_CORE_STATE_OOM ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id)
+ {
+ mali_group_unlock(group);
+ return; /* Illegal request or job has already been aborted */
+ }
+
+ mali_cluster_l2_cache_invalidate_all_force(group->cluster);
+ mali_mmu_zap_tlb_without_stall(group->mmu);
+
+ mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
+ group->gp_state = MALI_GROUP_CORE_STATE_WORKING;
+
+ mali_group_unlock(group);
+}
+
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
+{
+ mali_group_lock(group);
+
+ if (group->gp_state == MALI_GROUP_CORE_STATE_IDLE ||
+ mali_gp_job_get_id(group->gp_running_job) != job_id)
+ {
+ mali_group_unlock(group);
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* Will release group lock */
+}
+
+void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
+{
+ mali_group_lock(group);
+
+ if (group->pp_state == MALI_GROUP_CORE_STATE_IDLE ||
+ mali_pp_job_get_id(group->pp_running_job) != job_id)
+ {
+ mali_group_unlock(group);
+ return; /* No need to cancel or job has already been aborted or completed */
+ }
+
+ mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* Will release group lock */
+}
+
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_pp_job *pp_job;
+ u32 gp_job_id = 0;
+ u32 pp_job_id = 0;
+ mali_bool abort_pp = MALI_FALSE;
+ mali_bool abort_gp = MALI_FALSE;
+
+ mali_group_lock(group);
+
+ gp_job = group->gp_running_job;
+ pp_job = group->pp_running_job;
+
+ if (gp_job && mali_gp_job_get_session(gp_job) == session)
+ {
+ MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
+
+ gp_job_id = mali_gp_job_get_id(gp_job);
+ abort_gp = MALI_TRUE;
+ }
+
+ if (pp_job && mali_pp_job_get_session(pp_job) == session)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
+
+ pp_job_id = mali_pp_job_get_id(pp_job);
+ abort_pp = MALI_TRUE;
+ }
+
+ mali_group_unlock(group);
+
+ /* These functions takes and releases the group lock */
+ if (0 != abort_gp)
+ {
+ mali_group_abort_gp_job(group, gp_job_id);
+ }
+ if (0 != abort_pp)
+ {
+ mali_group_abort_pp_job(group, pp_job_id);
+ }
+
+ mali_group_lock(group);
+ mali_group_remove_session_if_unused(group, session);
+ mali_group_unlock(group);
+}
+
+enum mali_group_core_state mali_group_gp_state(struct mali_group *group)
+{
+ return group->gp_state;
+}
+
+enum mali_group_core_state mali_group_pp_state(struct mali_group *group)
+{
+ return group->pp_state;
+}
+
+/* group lock need to be taken before calling mali_group_bottom_half */
+void mali_group_bottom_half(struct mali_group *group, enum mali_group_event_t event)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ switch (event)
+ {
+ case GROUP_EVENT_PP_JOB_COMPLETED:
+ mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_TRUE); /* PP job SUCCESS */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_PP_JOB_FAILED:
+ mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* PP job FAIL */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_PP_JOB_TIMED_OUT:
+ mali_group_complete_jobs(group, MALI_FALSE, MALI_TRUE, MALI_FALSE); /* PP job TIMEOUT */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_GP_JOB_COMPLETED:
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_TRUE); /* GP job SUCCESS */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_GP_JOB_FAILED:
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* GP job FAIL */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_GP_JOB_TIMED_OUT:
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_FALSE, MALI_FALSE); /* GP job TIMEOUT */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ case GROUP_EVENT_GP_OOM:
+ group->gp_state = MALI_GROUP_CORE_STATE_OOM;
+ mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
+ mali_gp_scheduler_oom(group, group->gp_running_job);
+ break;
+ case GROUP_EVENT_MMU_PAGE_FAULT:
+ mali_group_complete_jobs(group, MALI_TRUE, MALI_TRUE, MALI_FALSE); /* GP and PP job FAIL */
+ /* group lock is released by mali_group_complete_jobs() call above */
+ break;
+ default:
+ break;
+ }
+}
+
+struct mali_mmu_core *mali_group_get_mmu(struct mali_group *group)
+{
+ return group->mmu;
+}
+
+struct mali_session_data *mali_group_get_session(struct mali_group *group)
+{
+ return group->session;
+}
+
+struct mali_group *mali_group_get_glob_group(u32 index)
+{
+ if(mali_global_num_groups > index)
+ {
+ return mali_global_groups[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_group_get_glob_num_groups(void)
+{
+ return mali_global_num_groups;
+}
+
+/* Used to check if scheduler for the other core type needs to be called on job completion.
+ *
+ * Used only for Mali-200, where job start may fail if the only MMU is busy
+ * with another session's address space.
+ */
+static inline mali_bool mali_group_other_reschedule_needed(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+#if defined(USING_MALI200)
+ if (group->pagedir_activation_failed)
+ {
+ group->pagedir_activation_failed = MALI_FALSE;
+ return MALI_TRUE;
+ }
+ else
+#endif
+ {
+ return MALI_FALSE;
+ }
+}
+
+static enum mali_group_activate_pd_status mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ enum mali_group_activate_pd_status retval;
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+ MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count);
+
+ if (0 != group->page_dir_ref_count)
+ {
+ if (group->session != session)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activating session FAILED: 0x%08x on group 0x%08X. Existing session: 0x%08x\n", session, group, group->session));
+ return MALI_GROUP_ACTIVATE_PD_STATUS_FAILED;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activating session already activated: 0x%08x on group 0x%08X. New Ref: %d\n", session, group, 1+group->page_dir_ref_count));
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD;
+
+ }
+ }
+ else
+ {
+ /* There might be another session here, but it is ok to overwrite it since group->page_dir_ref_count==0 */
+ if (group->session != session)
+ {
+ mali_bool activate_success;
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X. Ref: %d\n", session, group->session, group, 1+group->page_dir_ref_count));
+
+ activate_success = mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
+ MALI_DEBUG_ASSERT(activate_success);
+ if ( MALI_FALSE== activate_success ) return MALI_GROUP_ACTIVATE_PD_STATUS_FAILED;
+ group->session = session;
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_SWITCHED_PD;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X. Ref: %d\n", session->page_directory, group, 1+group->page_dir_ref_count));
+ retval = MALI_GROUP_ACTIVATE_PD_STATUS_OK_KEPT_PD;
+ }
+ }
+
+ group->page_dir_ref_count++;
+ return retval;
+}
+
+static void mali_group_deactivate_page_directory(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ MALI_DEBUG_ASSERT(0 < group->page_dir_ref_count);
+ MALI_DEBUG_ASSERT(session == group->session);
+
+ group->page_dir_ref_count--;
+
+ /* As an optimization, the MMU still points to the group->session even if (0 == group->page_dir_ref_count),
+ and we do not call mali_mmu_activate_empty_page_directory(group->mmu); */
+ MALI_DEBUG_ASSERT(0 <= group->page_dir_ref_count);
+}
+
+void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (0 == group->page_dir_ref_count)
+ {
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);
+
+ if (group->session == session)
+ {
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+ }
+}
+
+void mali_group_power_on(void)
+{
+ int i;
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ struct mali_group *group = mali_global_groups[i];
+ mali_group_lock(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);
+ MALI_DEBUG_ASSERT_POINTER(group->cluster);
+ group->power_is_on = MALI_TRUE;
+ mali_cluster_power_is_enabled_set(group->cluster, MALI_TRUE);
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(3,("group: POWER ON\n"));
+}
+
+mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+ return group->power_is_on;
+}
+
+void mali_group_power_off(void)
+{
+ int i;
+ /* It is necessary to set group->session = NULL; so that the powered off MMU is not written to on map /unmap */
+ /* It is necessary to set group->power_is_on=MALI_FALSE so that pending bottom_halves does not access powered off cores. */
+ for (i = 0; i < mali_global_num_groups; i++)
+ {
+ struct mali_group *group = mali_global_groups[i];
+ mali_group_lock(group);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->gp_state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_CORE_STATE_IDLE == group->pp_state);
+ MALI_DEBUG_ASSERT_POINTER(group->cluster);
+ group->session = NULL;
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ group->power_is_on = MALI_FALSE;
+ mali_cluster_power_is_enabled_set(group->cluster, MALI_FALSE);
+ mali_group_unlock(group);
+ }
+ MALI_DEBUG_PRINT(3,("group: POWER OFF\n"));
+}
+
+
+static void mali_group_recovery_reset(struct mali_group *group)
+{
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ /* Stop cores, bus stop */
+ if (NULL != group->pp_core)
+ {
+ mali_pp_stop_bus(group->pp_core);
+ }
+ if (NULL != group->gp_core)
+ {
+ mali_gp_stop_bus(group->gp_core);
+ }
+
+ /* Flush MMU */
+ mali_mmu_activate_fault_flush_page_directory(group->mmu);
+ mali_mmu_page_fault_done(group->mmu);
+
+ /* Wait for cores to stop bus */
+ if (NULL != group->pp_core)
+ {
+ mali_pp_stop_bus_wait(group->pp_core);
+ }
+ if (NULL != group->gp_core)
+ {
+ mali_gp_stop_bus_wait(group->gp_core);
+ }
+
+ /* Reset cores */
+ if (NULL != group->pp_core)
+ {
+ mali_pp_hard_reset(group->pp_core);
+ }
+ if (NULL != group->gp_core)
+ {
+ mali_gp_hard_reset(group->gp_core);
+ }
+
+ /* Reset MMU */
+ mali_mmu_reset(group->mmu);
+ group->session = NULL;
+}
+
+/* Group lock need to be taken before calling mali_group_complete_jobs. Will release the lock here. */
+static void mali_group_complete_jobs(struct mali_group *group, mali_bool complete_gp, mali_bool complete_pp, bool success)
+{
+ mali_bool need_group_reset = MALI_FALSE;
+ mali_bool gp_success = success;
+ mali_bool pp_success = success;
+
+ MALI_ASSERT_GROUP_LOCKED(group);
+
+ if (complete_gp && !complete_pp)
+ {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ if (_MALI_OSK_ERR_OK == mali_gp_reset(group->gp_core))
+ {
+ struct mali_gp_job *gp_job_to_return = group->gp_running_job;
+ group->gp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->gp_running_job = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job_to_return);
+
+ mali_group_deactivate_page_directory(group, mali_gp_job_get_session(gp_job_to_return));
+
+ if(mali_group_other_reschedule_needed(group))
+ {
+ mali_group_unlock(group);
+ mali_pp_scheduler_do_schedule();
+ }
+ else
+ {
+ mali_group_unlock(group);
+ }
+
+ mali_gp_scheduler_job_done(group, gp_job_to_return, gp_success);
+ mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+
+ return;
+ }
+ else
+ {
+ need_group_reset = MALI_TRUE;
+ MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset GP, need to reset entire group\n"));
+ pp_success = MALI_FALSE; /* This might kill PP as well, so this should fail */
+ }
+ }
+ if (complete_pp && !complete_gp)
+ {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ if (_MALI_OSK_ERR_OK == mali_pp_reset(group->pp_core))
+ {
+ struct mali_pp_job *pp_job_to_return = group->pp_running_job;
+ u32 pp_sub_job_to_return = group->pp_running_sub_job;
+ group->pp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->pp_running_job = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(pp_job_to_return);
+
+ mali_group_deactivate_page_directory(group, mali_pp_job_get_session(pp_job_to_return));
+
+ if(mali_group_other_reschedule_needed(group))
+ {
+ mali_group_unlock(group);
+ mali_gp_scheduler_do_schedule();
+ }
+ else
+ {
+ mali_group_unlock(group);
+ }
+
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, pp_success);
+ mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+
+ return;
+ }
+ else
+ {
+ need_group_reset = MALI_TRUE;
+ MALI_DEBUG_PRINT(3, ("Mali group: Failed to reset PP, need to reset entire group\n"));
+ gp_success = MALI_FALSE; /* This might kill GP as well, so this should fail */
+ }
+ }
+ else if (complete_gp && complete_pp)
+ {
+ need_group_reset = MALI_TRUE;
+ }
+
+ if (MALI_TRUE == need_group_reset)
+ {
+ struct mali_gp_job *gp_job_to_return = group->gp_running_job;
+ struct mali_pp_job *pp_job_to_return = group->pp_running_job;
+ u32 pp_sub_job_to_return = group->pp_running_sub_job;
+ mali_bool schedule_other = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(3, ("Mali group: Resetting entire group\n"));
+
+ group->gp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->gp_running_job = NULL;
+ if (NULL != gp_job_to_return)
+ {
+ mali_group_deactivate_page_directory(group, mali_gp_job_get_session(gp_job_to_return));
+ }
+
+ group->pp_state = MALI_GROUP_CORE_STATE_IDLE;
+ group->pp_running_job = NULL;
+ if (NULL != pp_job_to_return)
+ {
+ mali_group_deactivate_page_directory(group, mali_pp_job_get_session(pp_job_to_return));
+ }
+
+ /* The reset has to be done after mali_group_deactivate_page_directory() */
+ mali_group_recovery_reset(group);
+
+ if (mali_group_other_reschedule_needed(group) && (NULL == gp_job_to_return || NULL == pp_job_to_return))
+ {
+ schedule_other = MALI_TRUE;
+ }
+
+ mali_group_unlock(group);
+
+ if (NULL != gp_job_to_return)
+ {
+ mali_gp_scheduler_job_done(group, gp_job_to_return, gp_success);
+ mali_pm_core_event(MALI_CORE_EVENT_GP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+ }
+ else if (schedule_other)
+ {
+ mali_pp_scheduler_do_schedule();
+ }
+
+ if (NULL != pp_job_to_return)
+ {
+ mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, pp_success);
+ mali_pm_core_event(MALI_CORE_EVENT_PP_STOP); /* It is ok to do this after schedule, since START/STOP is simply ++ and -- anyways */
+ }
+ else if (schedule_other)
+ {
+ mali_gp_scheduler_do_schedule();
+ }
+
+ return;
+ }
+
+ mali_group_unlock(group);
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
+ if (group->gp_core)
+ {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP state: %d\n", group->gp_state);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
+ }
+ if (group->pp_core)
+ {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP state: %d\n", group->pp_state);
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
+ group->pp_running_job, group->pp_running_sub_job);
+ }
+
+ return n;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_group.h b/drivers/media/video/samsung/mali/common/mali_group.h
new file mode 100644
index 0000000..3533d13
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_group.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_GROUP_H__
+#define __MALI_GROUP_H__
+
+#include "linux/jiffies.h"
+#include "mali_osk.h"
+#include "mali_cluster.h"
+#include "mali_mmu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_session.h"
+
+/* max runtime [ms] for a core job - used by timeout timers */
+#define MAX_RUNTIME 5000
+/** @brief A mali group object represents a MMU and a PP and/or a GP core.
+ *
+ */
+#define MALI_MAX_NUMBER_OF_GROUPS 9
+
+struct mali_group;
+
+enum mali_group_event_t
+{
+ GROUP_EVENT_PP_JOB_COMPLETED, /**< PP job completed successfully */
+ GROUP_EVENT_PP_JOB_FAILED, /**< PP job completed with failure */
+ GROUP_EVENT_PP_JOB_TIMED_OUT, /**< PP job reached max runtime */
+ GROUP_EVENT_GP_JOB_COMPLETED, /**< GP job completed successfully */
+ GROUP_EVENT_GP_JOB_FAILED, /**< GP job completed with failure */
+ GROUP_EVENT_GP_JOB_TIMED_OUT, /**< GP job reached max runtime */
+ GROUP_EVENT_GP_OOM, /**< GP job ran out of heap memory */
+ GROUP_EVENT_MMU_PAGE_FAULT, /**< MMU page fault */
+};
+
+enum mali_group_core_state
+{
+ MALI_GROUP_CORE_STATE_IDLE,
+ MALI_GROUP_CORE_STATE_WORKING,
+ MALI_GROUP_CORE_STATE_OOM
+};
+
+/** @brief Create a new Mali group object
+ *
+ * @param cluster Pointer to the cluster to which the group is connected.
+ * @param mmu Pointer to the MMU that defines this group
+ * @return A pointer to a new group object
+ */
+struct mali_group *mali_group_create(struct mali_cluster *cluster, struct mali_mmu_core *mmu);
+void mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core* gp_core);
+void mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core* pp_core);
+void mali_group_delete(struct mali_group *group);
+
+/** @brief Reset group
+ *
+ * This function will reset the entire group, including all the cores present in the group.
+ *
+ * @param group Pointer to the group to reset
+ */
+void mali_group_reset(struct mali_group *group);
+
+/** @brief Get pointer to GP core object
+ */
+struct mali_gp_core* mali_group_get_gp_core(struct mali_group *group);
+
+/** @brief Get pointer to PP core object
+ */
+struct mali_pp_core* mali_group_get_pp_core(struct mali_group *group);
+
+/** @brief Lock group object
+ *
+ * Most group functions will lock the group object themselves. The expection is
+ * the group_bottom_half which requires the group to be locked on entry.
+ *
+ * @param group Pointer to group to lock
+ */
+void mali_group_lock(struct mali_group *group);
+
+/** @brief Unlock group object
+ *
+ * @param group Pointer to group to unlock
+ */
+void mali_group_unlock(struct mali_group *group);
+#ifdef DEBUG
+void mali_group_assert_locked(struct mali_group *group);
+#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
+#else
+#define MALI_ASSERT_GROUP_LOCKED(group)
+#endif
+
+/** @brief Start GP job
+ */
+_mali_osk_errcode_t mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+/** @brief Start fragment of PP job
+ */
+_mali_osk_errcode_t mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+
+/** @brief Resume GP job that suspended waiting for more heap memory
+ */
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
+/** @brief Abort GP job
+ *
+ * Used to abort suspended OOM jobs when user space failed to allocte more memory.
+ */
+void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
+/** @brief Abort all GP jobs from \a session
+ *
+ * Used on session close when terminating all running and queued jobs from \a session.
+ */
+void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+
+enum mali_group_core_state mali_group_gp_state(struct mali_group *group);
+enum mali_group_core_state mali_group_pp_state(struct mali_group *group);
+
+/** @brief The common group bottom half interrupt handler
+ *
+ * This is only called from the GP and PP bottom halves.
+ *
+ * The action taken is dictated by the \a event.
+ *
+ * @param event The event code
+ */
+void mali_group_bottom_half(struct mali_group *group, enum mali_group_event_t event);
+
+struct mali_mmu_core *mali_group_get_mmu(struct mali_group *group);
+struct mali_session_data *mali_group_get_session(struct mali_group *group);
+
+void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session_data);
+
+void mali_group_power_on(void);
+void mali_group_power_off(void);
+mali_bool mali_group_power_is_on(struct mali_group *group);
+
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
+
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+
+#endif /* __MALI_GROUP_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_hw_core.c b/drivers/media/video/samsung/mali/common/mali_hw_core.c
new file mode 100644
index 0000000..0b08622
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_hw_core.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_hw_core.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size)
+{
+ core->phys_addr = resource->base;
+ core->description = resource->description;
+ core->size = reg_size;
+ if (_MALI_OSK_ERR_OK == _mali_osk_mem_reqregion(core->phys_addr, core->size, core->description))
+ {
+ core->mapped_registers = _mali_osk_mem_mapioregion(core->phys_addr, core->size, core->description);
+ if (NULL != core->mapped_registers)
+ {
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to map memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to request memory region for core %s at phys_addr 0x%08X\n", core->description, core->phys_addr));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_hw_core_delete(struct mali_hw_core *core)
+{
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ _mali_osk_mem_unreqregion(core->phys_addr, core->size);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_hw_core.h b/drivers/media/video/samsung/mali/common/mali_hw_core.h
new file mode 100644
index 0000000..c797804
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_hw_core.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_HW_CORE_H__
+#define __MALI_HW_CORE_H__
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+/**
+ * The common parts for all Mali HW cores (GP, PP, MMU, L2 and PMU)
+ * This struct is embedded inside all core specific structs.
+ */
+struct mali_hw_core
+{
+ u32 phys_addr; /**< Physical address of the registers */
+ u32 size; /**< Size of registers */
+ mali_io_address mapped_registers; /**< Virtual mapping of the registers */
+ const char* description; /**< Name of unit (as specified in device configuration) */
+};
+
+#define MALI_HW_CORE_NO_COUNTER ((u32)-1)
+#define MALI_HW_CORE_INVALID_VALUE ((u32)-1)
+
+_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
+void mali_hw_core_delete(struct mali_hw_core *core);
+
+MALI_STATIC_INLINE u32 mali_hw_core_register_read(struct mali_hw_core *core, u32 relative_address)
+{
+ u32 read_val;
+ read_val = _mali_osk_mem_ioread32(core->mapped_registers, relative_address);
+ MALI_DEBUG_PRINT(6, ("register_read for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, read_val));
+ return read_val;
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_relaxed(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write_relaxed for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32_relaxed(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
+{
+ MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
+ core->description, relative_address, new_val));
+ _mali_osk_mem_iowrite32(core->mapped_registers, relative_address, new_val);
+}
+
+MALI_STATIC_INLINE void mali_hw_core_register_write_array_relaxed(struct mali_hw_core *core, u32 relative_address, u32 *write_array, u32 nr_of_regs)
+{
+ u32 i;
+ MALI_DEBUG_PRINT(6, ("register_write_array: for core %s, relative addr=0x%04X, nr of regs=%u\n",
+ core->description,relative_address, nr_of_regs));
+
+ /* Do not use burst writes against the registers */
+ for (i = 0; i< nr_of_regs; i++)
+ {
+ mali_hw_core_register_write_relaxed(core, relative_address + i*4, write_array[i]);
+ }
+}
+
+#endif /* __MALI_HW_CORE_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_common.h b/drivers/media/video/samsung/mali/common/mali_kernel_common.h
new file mode 100644
index 0000000..b354f92
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_common.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_COMMON_H__
+#define __MALI_KERNEL_COMMON_H__
+
+/* Make sure debug is defined when it should be */
+#ifndef DEBUG
+ #if defined(_DEBUG)
+ #define DEBUG
+ #endif
+#endif
+
+/* Macro for generating a kernel panic.
+ * Turned on off by compile-time Makefile settings
+ */
+#if defined(USING_KERNEL_PANIC)
+#include <linux/kernel.h>
+ #define MALI_PANIC(fmt, args...) panic( fmt, ## args );
+#else
+ #define MALI_PANIC(fmt, args...)
+#endif
+
+/* The file include several useful macros for error checking, debugging and printing.
+ * - MALI_PRINTF(...) Do not use this function: Will be included in Release builds.
+ * - MALI_DEBUG_PRINT(nr, (X) ) Prints the second argument if nr<=MALI_DEBUG_LEVEL.
+ * - MALI_DEBUG_ERROR( (X) ) Prints an errortext, a source trace, and the given error message.
+ * - MALI_DEBUG_ASSERT(exp,(X)) If the asserted expr is false, the program will exit.
+ * - MALI_DEBUG_ASSERT_POINTER(pointer) Triggers if the pointer is a zero pointer.
+ * - MALI_DEBUG_CODE( X ) The code inside the macro is only compiled in Debug builds.
+ *
+ * The (X) means that you must add an extra parenthesis around the argumentlist.
+ *
+ * The printf function: MALI_PRINTF(...) is routed to _mali_osk_debugmsg
+ *
+ * Suggested range for the DEBUG-LEVEL is [1:6] where
+ * [1:2] Is messages with highest priority, indicate possible errors.
+ * [3:4] Is messages with medium priority, output important variables.
+ * [5:6] Is messages with low priority, used during extensive debugging.
+ */
+
+ /**
+ * Fundamental error macro. Reports an error code. This is abstracted to allow us to
+ * easily switch to a different error reporting method if we want, and also to allow
+ * us to search for error returns easily.
+ *
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_ERROR(MALI_ERROR_OUT_OF_MEMORY);
+ */
+#define MALI_ERROR(error_code) return (error_code)
+
+/**
+ * Basic error macro, to indicate success.
+ * Note no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_SUCCESS;
+ */
+#define MALI_SUCCESS MALI_ERROR(_MALI_OSK_ERR_OK)
+
+/**
+ * Basic error macro. This checks whether the given condition is true, and if not returns
+ * from this function with the supplied error code. This is a macro so that we can override it
+ * for stress testing.
+ *
+ * Note that this uses the do-while-0 wrapping to ensure that we don't get problems with dangling
+ * else clauses. Note also no closing semicolon - this is supplied in typical usage:
+ *
+ * MALI_CHECK((p!=NULL), ERROR_NO_OBJECT);
+ */
+#define MALI_CHECK(condition, error_code) do { if(!(condition)) MALI_ERROR(error_code); } while(0)
+
+/**
+ * Error propagation macro. If the expression given is anything other than _MALI_OSK_NO_ERROR,
+ * then the value is returned from the enclosing function as an error code. This effectively
+ * acts as a guard clause, and propagates error values up the call stack. This uses a
+ * temporary value to ensure that the error expression is not evaluated twice.
+ * If the counter for forcing a failure has been set using _mali_force_error, this error will be
+ * returned without evaluating the expression in MALI_CHECK_NO_ERROR
+ */
+#define MALI_CHECK_NO_ERROR(expression) \
+ do { _mali_osk_errcode_t _check_no_error_result=(expression); \
+ if(_check_no_error_result != _MALI_OSK_ERR_OK) \
+ MALI_ERROR(_check_no_error_result); \
+ } while(0)
+
+/**
+ * Pointer check macro. Checks non-null pointer.
+ */
+#define MALI_CHECK_NON_NULL(pointer, error_code) MALI_CHECK( ((pointer)!=NULL), (error_code) )
+
+/**
+ * Error macro with goto. This checks whether the given condition is true, and if not jumps
+ * to the specified label using a goto. The label must therefore be local to the function in
+ * which this macro appears. This is most usually used to execute some clean-up code before
+ * exiting with a call to ERROR.
+ *
+ * Like the other macros, this is a macro to allow us to override the condition if we wish,
+ * e.g. to force an error during stress testing.
+ */
+#define MALI_CHECK_GOTO(condition, label) do { if(!(condition)) goto label; } while(0)
+
+/**
+ * Explicitly ignore a parameter passed into a function, to suppress compiler warnings.
+ * Should only be used with parameter names.
+ */
+#define MALI_IGNORE(x) x=x
+
+#define MALI_PRINTF(args) _mali_osk_dbgmsg args;
+
+#define MALI_PRINT_ERROR(args) do{ \
+ MALI_PRINTF(("Mali: ERR: %s\n" ,__FILE__)); \
+ MALI_PRINTF((" %s()%4d\n ", __FUNCTION__, __LINE__)) ; \
+ MALI_PRINTF(args); \
+ MALI_PRINTF(("\n")); \
+ } while(0)
+
+#define MALI_PRINT(args) do{ \
+ MALI_PRINTF(("Mali: ")); \
+ MALI_PRINTF(args); \
+ } while (0)
+
+#ifdef DEBUG
+#ifndef mali_debug_level
+extern int mali_debug_level;
+#endif
+
+#define MALI_DEBUG_CODE(code) code
+#define MALI_DEBUG_PRINT(level, args) do { \
+ if((level) <= mali_debug_level)\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); } \
+ } while (0)
+
+#define MALI_DEBUG_PRINT_ERROR(args) MALI_PRINT_ERROR(args)
+
+#define MALI_DEBUG_PRINT_IF(level,condition,args) \
+ if((condition)&&((level) <= mali_debug_level))\
+ {MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+#define MALI_DEBUG_PRINT_ELSE(level, args)\
+ else if((level) <= mali_debug_level)\
+ { MALI_PRINTF(("Mali<" #level ">: ")); MALI_PRINTF(args); }
+
+/**
+ * @note these variants of DEBUG ASSERTS will cause a debugger breakpoint
+ * to be entered (see _mali_osk_break() ). An alternative would be to call
+ * _mali_osk_abort(), on OSs that support it.
+ */
+#define MALI_DEBUG_PRINT_ASSERT(condition, args) do {if( !(condition)) { MALI_PRINT_ERROR(args); _mali_osk_break(); } } while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) {MALI_PRINT_ERROR(("NULL pointer " #pointer)); _mali_osk_break();} } while(0)
+#define MALI_DEBUG_ASSERT(condition) do {if( !(condition)) {MALI_PRINT_ERROR(("ASSERT failed: " #condition )); _mali_osk_break();} } while(0)
+
+#else /* DEBUG */
+
+#define MALI_DEBUG_CODE(code)
+#define MALI_DEBUG_PRINT(string,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ERROR(args) do {} while(0)
+#define MALI_DEBUG_PRINT_IF(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ELSE(level,condition,args) do {} while(0)
+#define MALI_DEBUG_PRINT_ASSERT(condition,args) do {} while(0)
+#define MALI_DEBUG_ASSERT_POINTER(pointer) do {} while(0)
+#define MALI_DEBUG_ASSERT(condition) do {} while(0)
+
+#endif /* DEBUG */
+
+/**
+ * variables from user space cannot be dereferenced from kernel space; tagging them
+ * with __user allows the GCC compiler to generate a warning. Other compilers may
+ * not support this so we define it here as an empty macro if the compiler doesn't
+ * define it.
+ */
+#ifndef __user
+#define __user
+#endif
+
+#endif /* __MALI_KERNEL_COMMON_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_core.c b/drivers/media/video/samsung/mali/common/mali_kernel_core.c
new file mode 100644
index 0000000..45e86d2
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_core.c
@@ -0,0 +1,980 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_kernel_core.h"
+#include "mali_memory.h"
+#include "mali_mem_validation.h"
+#include "mali_mmu.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_dlbu.h"
+#include "mali_gp.h"
+#include "mali_pp.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_cluster.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_pmu.h"
+#include "mali_scheduler.h"
+#ifdef CONFIG_MALI400_GPU_UTILIZATION
+#include "mali_kernel_utilization.h"
+#endif
+#include "mali_l2_cache.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_osk_profiling.h"
+#endif
+
+/** Pointer to table of resource definitions available to the Mali driver.
+ * _mali_osk_resources_init() sets up the pointer to this table.
+ */
+static _mali_osk_resource_t *arch_configuration = NULL;
+
+/** Start profiling from module load? */
+int mali_boot_profiling = 0;
+
+/** Number of resources initialized by _mali_osk_resources_init() */
+static u32 num_resources;
+
+static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
+static u32 global_gpu_base_address = 0;
+static u32 global_gpu_major_version = 0;
+static u32 global_gpu_minor_version = 0;
+
+static u32 first_pp_offset = 0;
+
+#define HANG_CHECK_MSECS_DEFAULT 500 /* 500 ms */
+#define WATCHDOG_MSECS_DEFAULT 4000 /* 4 s */
+
+/* timer related */
+int mali_max_job_runtime = WATCHDOG_MSECS_DEFAULT;
+int mali_hang_check_interval = HANG_CHECK_MSECS_DEFAULT;
+
+static _mali_osk_resource_t *mali_find_resource(_mali_osk_resource_type_t type, u32 offset)
+{
+ int i;
+ u32 addr = global_gpu_base_address + offset;
+
+ for (i = 0; i < num_resources; i++)
+ {
+ if (type == arch_configuration[i].type && arch_configuration[i].base == addr)
+ {
+ return &(arch_configuration[i]);
+ }
+ }
+
+ return NULL;
+}
+
+static u32 mali_count_resources(_mali_osk_resource_type_t type)
+{
+ int i;
+ u32 retval = 0;
+
+ for (i = 0; i < num_resources; i++)
+ {
+ if (type == arch_configuration[i].type)
+ {
+ retval++;
+ }
+ }
+
+ return retval;
+}
+
+
+static _mali_osk_errcode_t mali_parse_gpu_base_and_first_pp_offset_address(void)
+{
+ int i;
+ _mali_osk_resource_t *first_gp_resource = NULL;
+ _mali_osk_resource_t *first_pp_resource = NULL;
+
+ for (i = 0; i < num_resources; i++)
+ {
+ if (MALI_GP == arch_configuration[i].type)
+ {
+ if (NULL == first_gp_resource || first_gp_resource->base > arch_configuration[i].base)
+ {
+ first_gp_resource = &(arch_configuration[i]);
+ }
+ }
+ if (MALI_PP == arch_configuration[i].type)
+ {
+ if (NULL == first_pp_resource || first_pp_resource->base > arch_configuration[i].base)
+ {
+ first_pp_resource = &(arch_configuration[i]);
+ }
+ }
+ }
+
+ if (NULL == first_gp_resource || NULL == first_pp_resource)
+ {
+ MALI_PRINT_ERROR(("No GP+PP core specified in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (first_gp_resource->base < first_pp_resource->base)
+ {
+ /* GP is first, so we are dealing with Mali-300, Mali-400 or Mali-450 */
+ global_gpu_base_address = first_gp_resource->base;
+ first_pp_offset = 0x8000;
+ }
+ else
+ {
+ /* PP is first, so we are dealing with Mali-200 */
+ global_gpu_base_address = first_pp_resource->base;
+ first_pp_offset = 0x0;
+ }
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_parse_product_info(void)
+{
+ _mali_osk_resource_t *first_pp_resource = NULL;
+
+ /* Find the first PP core */
+ first_pp_resource = mali_find_resource(MALI_PP, first_pp_offset);
+ if (NULL != first_pp_resource)
+ {
+ /* Create a dummy PP object for this core so that we can read the version register */
+ struct mali_group *group = mali_group_create(NULL, NULL);
+ if (NULL != group)
+ {
+ /*struct mali_pp_core *pp_core = mali_pp_create(first_pp_resource, group, 0);*/
+ struct mali_pp_core *pp_core = mali_pp_create(first_pp_resource, group);
+ if (NULL != pp_core)
+ {
+ u32 pp_version = mali_pp_core_get_version(pp_core);
+ mali_pp_delete(pp_core);
+ mali_group_delete(group);
+
+ global_gpu_major_version = (pp_version >> 8) & 0xFF;
+ global_gpu_minor_version = pp_version & 0xFF;
+
+ switch (pp_version >> 16)
+ {
+ case MALI200_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI200;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-200 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI300_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI300;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-300 r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI400_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI400;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-400 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ case MALI450_PP_PRODUCT_ID:
+ global_product_id = _MALI_PRODUCT_ID_MALI450;
+ MALI_DEBUG_PRINT(2, ("Found Mali GPU Mali-450 MP r%up%u\n", global_gpu_major_version, global_gpu_minor_version));
+ break;
+ default:
+ MALI_DEBUG_PRINT(2, ("Found unknown Mali GPU GPU (r%up%u)\n", global_gpu_major_version, global_gpu_minor_version));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to create initial PP object\n"));
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to create initial group object\n"));
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("First PP core not specified in config file\n"));
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_delete_clusters(void)
+{
+ u32 i;
+ u32 number_of_clusters = mali_cluster_get_glob_num_clusters();
+
+ for (i = 0; i < number_of_clusters; i++)
+ {
+ mali_cluster_delete(mali_cluster_get_global_cluster(i));
+ }
+}
+
+static _mali_osk_errcode_t mali_create_cluster(_mali_osk_resource_t *resource)
+{
+ if (NULL != resource)
+ {
+ struct mali_l2_cache_core *l2_cache;
+
+ if (mali_l2_cache_core_get_glob_num_l2_cores() >= mali_l2_cache_core_get_max_num_l2_cores())
+ {
+ MALI_PRINT_ERROR(("Found too many L2 cache core objects, max %u is supported\n", mali_l2_cache_core_get_max_num_l2_cores()));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Found L2 cache %s, starting new cluster\n", resource->description));
+
+ /*l2_cache = mali_l2_cache_create(resource, global_num_l2_cache_cores);*/
+ l2_cache = mali_l2_cache_create(resource);
+ if (NULL == l2_cache)
+ {
+ MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (NULL == mali_cluster_create(l2_cache))
+ {
+ MALI_PRINT_ERROR(("Failed to create cluster object\n"));
+ mali_l2_cache_delete(l2_cache);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ else
+ {
+ mali_cluster_create(NULL);
+ if (NULL == mali_cluster_get_global_cluster(0))
+ {
+ MALI_PRINT_ERROR(("Failed to create cluster object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Created cluster object\n"));
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_cluster(void)
+{
+ if (_MALI_PRODUCT_ID_MALI200 == global_product_id)
+ {
+ /* Create dummy cluster without L2 cache */
+ return mali_create_cluster(NULL);
+ }
+ else if (_MALI_PRODUCT_ID_MALI300 == global_product_id || _MALI_PRODUCT_ID_MALI400 == global_product_id)
+ {
+ _mali_osk_resource_t *l2_resource = mali_find_resource(MALI_L2, 0x1000);
+ if (NULL == l2_resource)
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return mali_create_cluster(l2_resource);
+ }
+ else if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ /*
+ * L2 for GP at 0x10000
+ * L2 for PP0-3 at 0x01000
+ * L2 for PP4-7 at 0x11000 (optional)
+ */
+
+ _mali_osk_resource_t *l2_gp_resource;
+ _mali_osk_resource_t *l2_pp_grp0_resource;
+ _mali_osk_resource_t *l2_pp_grp1_resource;
+
+ /* Make cluster for GP's L2 */
+ l2_gp_resource = mali_find_resource(MALI_L2, 0x10000);
+ if (NULL != l2_gp_resource)
+ {
+ _mali_osk_errcode_t ret;
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 cluster for GP\n"));
+ ret = mali_create_cluster(l2_gp_resource);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ return ret;
+ }
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Make cluster for first PP core group */
+ l2_pp_grp0_resource = mali_find_resource(MALI_L2, 0x1000);
+ if (NULL != l2_pp_grp0_resource)
+ {
+ _mali_osk_errcode_t ret;
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 cluster for PP group 0\n"));
+ ret = mali_create_cluster(l2_pp_grp0_resource);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ return ret;
+ }
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Second PP core group is optional, don't fail if we don't find it */
+ l2_pp_grp1_resource = mali_find_resource(MALI_L2, 0x11000);
+ if (NULL != l2_pp_grp1_resource)
+ {
+ _mali_osk_errcode_t ret;
+ MALI_DEBUG_PRINT(3, ("Creating Mali-450 cluster for PP group 0\n"));
+ ret = mali_create_cluster(l2_pp_grp1_resource);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ return ret;
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_create_group(struct mali_cluster *cluster,
+ _mali_osk_resource_t *resource_mmu,
+ _mali_osk_resource_t *resource_gp,
+ _mali_osk_resource_t *resource_pp)
+{
+ struct mali_mmu_core *mmu;
+ struct mali_group *group;
+ struct mali_pp_core *pp;
+
+ MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
+
+ /* Create the MMU object */
+ mmu = mali_mmu_create(resource_mmu);
+ if (NULL == mmu)
+ {
+ MALI_PRINT_ERROR(("Failed to create MMU object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Create the group object */
+ group = mali_group_create(cluster, mmu);
+ if (NULL == group)
+ {
+ MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
+ mali_mmu_delete(mmu);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Set pointer back to group in mmu.*/
+ mali_mmu_set_group(mmu, group);
+
+ /* Add this group to current cluster */
+ mali_cluster_add_group(cluster, group);
+
+ if (NULL != resource_gp)
+ {
+ /* Create the GP core object inside this group */
+ /* global_gp_core = mali_gp_create(resource_gp, group); */
+ if (NULL == mali_gp_create(resource_gp, group))
+ {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create GP object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Add GP object to this group */
+ MALI_DEBUG_PRINT(3, ("Adding GP %s to group\n", resource_gp->description));
+ mali_group_add_gp_core(group, mali_gp_get_global_gp_core());
+ }
+
+ if (NULL != resource_pp)
+ {
+ /* Create the PP core object inside this group */
+ pp = mali_pp_create(resource_pp, group);
+
+ if (NULL == pp)
+ {
+ /* No need to clean up now, as we will clean up everything linked in from the cluster when we fail this function */
+ MALI_PRINT_ERROR(("Failed to create PP object\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Add PP object to this group */
+ MALI_DEBUG_PRINT(3, ("Adding PP %s to group\n", resource_pp->description));
+ mali_group_add_pp_core(group, pp);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_groups(void)
+{
+ if (_MALI_PRODUCT_ID_MALI200 == global_product_id)
+ {
+ _mali_osk_resource_t *resource_gp;
+ _mali_osk_resource_t *resource_pp;
+ _mali_osk_resource_t *resource_mmu;
+
+ MALI_DEBUG_ASSERT(1 == mali_cluster_get_glob_num_clusters());
+
+ resource_gp = mali_find_resource(MALI_GP, 0x02000);
+ resource_pp = mali_find_resource(MALI_PP, 0x00000);
+ resource_mmu = mali_find_resource(MMU, 0x03000);
+
+ if (NULL == resource_mmu || NULL == resource_gp || NULL == resource_pp)
+ {
+ /* Missing mandatory core(s) */
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /*return mali_create_group(global_clusters[0], resource_mmu, resource_gp, resource_pp);*/
+ return mali_create_group(mali_cluster_get_global_cluster(0), resource_mmu, resource_gp, resource_pp);
+ }
+ else if (_MALI_PRODUCT_ID_MALI300 == global_product_id ||
+ _MALI_PRODUCT_ID_MALI400 == global_product_id ||
+ _MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ _mali_osk_errcode_t err;
+ int cluster_id_gp = 0;
+ int cluster_id_pp_grp0 = 0;
+ int cluster_id_pp_grp1 = 0;
+ int i;
+ _mali_osk_resource_t *resource_gp;
+ _mali_osk_resource_t *resource_gp_mmu;
+ _mali_osk_resource_t *resource_pp[mali_pp_get_max_num_pp_cores()];
+ _mali_osk_resource_t *resource_pp_mmu[mali_pp_get_max_num_pp_cores()];
+ u32 max_num_pp_cores = mali_pp_get_max_num_pp_cores();
+
+ if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ /* Mali-450 has separate L2s for GP, and PP core group(s) */
+ cluster_id_pp_grp0 = 1;
+ cluster_id_pp_grp1 = 2;
+ }
+
+ resource_gp = mali_find_resource(MALI_GP, 0x00000);
+ resource_gp_mmu = mali_find_resource(MMU, 0x03000);
+ resource_pp[0] = mali_find_resource(MALI_PP, 0x08000);
+ resource_pp[1] = mali_find_resource(MALI_PP, 0x0A000);
+ resource_pp[2] = mali_find_resource(MALI_PP, 0x0C000);
+ resource_pp[3] = mali_find_resource(MALI_PP, 0x0E000);
+ resource_pp[4] = mali_find_resource(MALI_PP, 0x28000);
+ resource_pp[5] = mali_find_resource(MALI_PP, 0x2A000);
+ resource_pp[6] = mali_find_resource(MALI_PP, 0x2C000);
+ resource_pp[7] = mali_find_resource(MALI_PP, 0x2E000);
+ resource_pp_mmu[0] = mali_find_resource(MMU, 0x04000);
+ resource_pp_mmu[1] = mali_find_resource(MMU, 0x05000);
+ resource_pp_mmu[2] = mali_find_resource(MMU, 0x06000);
+ resource_pp_mmu[3] = mali_find_resource(MMU, 0x07000);
+ resource_pp_mmu[4] = mali_find_resource(MMU, 0x1C000);
+ resource_pp_mmu[5] = mali_find_resource(MMU, 0x1D000);
+ resource_pp_mmu[6] = mali_find_resource(MMU, 0x1E000);
+ resource_pp_mmu[7] = mali_find_resource(MMU, 0x1F000);
+
+ if (NULL == resource_gp || NULL == resource_gp_mmu || NULL == resource_pp[0] || NULL == resource_pp_mmu[0])
+ {
+ /* Missing mandatory core(s) */
+ MALI_DEBUG_PRINT(2, ("Missing mandatory resource, need at least one GP and one PP, both with a separate MMU (0x%08X, 0x%08X, 0x%08X, 0x%08X)\n",
+ resource_gp, resource_gp_mmu, resource_pp[0], resource_pp_mmu[0]));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT(1 <= mali_cluster_get_glob_num_clusters());
+ err = mali_create_group(mali_cluster_get_global_cluster(cluster_id_gp), resource_gp_mmu, resource_gp, NULL);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ return err;
+ }
+
+ /* Create group for first (and mandatory) PP core */
+ MALI_DEBUG_ASSERT(mali_cluster_get_glob_num_clusters() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
+ err = mali_create_group(mali_cluster_get_global_cluster(cluster_id_pp_grp0), resource_pp_mmu[0], NULL, resource_pp[0]);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ return err;
+ }
+
+ /* Create groups for rest of the cores in the first PP core group */
+ for (i = 1; i < 4; i++) /* First half of the PP cores belong to first core group */
+ {
+ if (NULL != resource_pp[i])
+ {
+ err = mali_create_group(mali_cluster_get_global_cluster(cluster_id_pp_grp0), resource_pp_mmu[i], NULL, resource_pp[i]);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ return err;
+ }
+ }
+ }
+
+ /* Create groups for cores in the second PP core group */
+ for (i = 4; i < max_num_pp_cores; i++) /* Second half of the PP cores belong to second core group */
+ {
+ if (NULL != resource_pp[i])
+ {
+ MALI_DEBUG_ASSERT(mali_cluster_get_glob_num_clusters() >= 2); /* Only Mali-450 have more than 4 PPs, and these cores belong to second core group */
+ err = mali_create_group(mali_cluster_get_global_cluster(cluster_id_pp_grp1), resource_pp_mmu[i], NULL, resource_pp[i]);
+ if (err != _MALI_OSK_ERR_OK)
+ {
+ return err;
+ }
+ }
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static _mali_osk_errcode_t mali_parse_config_pmu(void)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+ _mali_osk_resource_t *resource_pmu;
+ u32 number_of_pp_cores;
+ u32 number_of_l2_caches;
+
+ resource_pmu = mali_find_resource(PMU, 0x02000);
+ number_of_pp_cores = mali_count_resources(MALI_PP);
+ number_of_l2_caches = mali_count_resources(MALI_L2);
+
+ if (NULL != resource_pmu)
+ {
+ if (NULL == mali_pmu_create(resource_pmu, number_of_pp_cores, number_of_l2_caches))
+ {
+ err = _MALI_OSK_ERR_FAULT;
+ }
+ }
+ return err;
+}
+
+static _mali_osk_errcode_t mali_parse_config_memory(void)
+{
+ int i;
+ _mali_osk_errcode_t ret;
+
+ for(i = 0; i < num_resources; i++)
+ {
+ switch(arch_configuration[i].type)
+ {
+ case OS_MEMORY:
+ ret = mali_memory_core_resource_os_memory(&arch_configuration[i]);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register OS_MEMORY\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ break;
+ case MEMORY:
+ ret = mali_memory_core_resource_dedicated_memory(&arch_configuration[i]);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register MEMORY\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ break;
+ case MEM_VALIDATION:
+ ret = mali_mem_validation_add_range(&arch_configuration[i]);
+ if (_MALI_OSK_ERR_OK != ret)
+ {
+ MALI_PRINT_ERROR(("Failed to register MEM_VALIDATION\n"));
+ mali_memory_terminate();
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_initialize_subsystems(void)
+{
+ _mali_osk_errcode_t err;
+ mali_bool is_pmu_enabled;
+
+ err = mali_session_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ /* No biggie if we wheren't able to initialize the profiling */
+ MALI_PRINT_ERROR(("Failed to initialize profiling, feature will be unavailable\n"));
+ }
+#endif
+
+ /* Get data from config.h */
+ err = _mali_osk_resources_init(&arch_configuration, &num_resources);
+ if (_MALI_OSK_ERR_OK != err) goto osk_resources_init_failed;
+
+ /* Initialize driver subsystems */
+ err = mali_memory_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+
+ /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+ err = mali_parse_config_memory();
+ if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+
+ /* Parsing the GPU base address and first pp offset */
+ err = mali_parse_gpu_base_and_first_pp_offset_address();
+ if (_MALI_OSK_ERR_OK != err) goto parse_gpu_base_address_failed;
+
+ /* Initialize the MALI PMU */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
+
+ is_pmu_enabled = mali_pmu_get_global_pmu_core() != NULL ? MALI_TRUE : MALI_FALSE;
+
+ /* Initialize the power management module */
+ err = mali_pm_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
+
+ /* Make sure the power stays on for the rest of this function */
+ mali_pm_always_on(MALI_TRUE);
+
+ /* Detect which Mali GPU we are dealing with */
+ err = mali_parse_product_info();
+ if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+
+ /* The global_product_id is now populated with the correct Mali GPU */
+
+ /* Initialize MMU module */
+ err = mali_mmu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+
+ /* Initialize the DLBU module for Mali-450 */
+ if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ err = mali_dlbu_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
+ }
+
+ /* Start configuring the actual Mali hardware. */
+ err = mali_parse_config_cluster();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ err = mali_parse_config_groups();
+ if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+
+ /* Initialize the schedulers */
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
+ err = mali_gp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+ err = mali_pp_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+
+#ifdef CONFIG_MALI400_GPU_UTILIZATION
+ /* Initialize the GPU utilization tracking */
+ err = mali_utilization_init();
+ if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
+#endif
+
+ /* We no longer need to stay */
+ mali_pm_always_on(MALI_FALSE);
+ MALI_SUCCESS; /* all ok */
+
+ /* Error handling */
+#ifdef CONFIG_MALI400_GPU_UTILIZATION
+utilization_init_failed:
+ mali_pp_scheduler_terminate();
+#endif
+pp_scheduler_init_failed:
+ mali_gp_scheduler_terminate();
+gp_scheduler_init_failed:
+ mali_scheduler_terminate();
+scheduler_init_failed:
+config_parsing_failed:
+ mali_delete_clusters(); /* Delete clusters even if config parsing failed. */
+ if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ mali_dlbu_terminate();
+ }
+dlbu_init_failed:
+ mali_mmu_terminate();
+mmu_init_failed:
+ /* Nothing to roll back */
+product_info_parsing_failed:
+ mali_pm_terminate();
+pm_init_failed:
+ if (is_pmu_enabled)
+ {
+ mali_pmu_delete(mali_pmu_get_global_pmu_core());
+ }
+parse_pmu_config_failed:
+parse_gpu_base_address_failed:
+parse_memory_config_failed:
+ mali_memory_terminate();
+memory_init_failed:
+ _mali_osk_resources_term(&arch_configuration, num_resources);
+osk_resources_init_failed:
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_term();
+#endif
+ mali_session_terminate();
+session_init_failed:
+ return err;
+}
+
+void mali_terminate_subsystems(void)
+{
+ struct mali_pmu_core *pmu;
+
+ MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
+
+ /* shut down subsystems in reverse order from startup */
+
+ mali_pm_always_on(MALI_TRUE); /* Mali will be powered off once PM subsystem terminates */
+
+#ifdef CONFIG_MALI400_GPU_UTILIZATION
+ mali_utilization_term();
+#endif
+
+ mali_pp_scheduler_terminate();
+ mali_gp_scheduler_terminate();
+ mali_scheduler_terminate();
+
+ mali_delete_clusters(); /* Delete clusters even if config parsing failed. */
+
+ if (_MALI_PRODUCT_ID_MALI450 == global_product_id)
+ {
+ mali_dlbu_terminate();
+ }
+
+ mali_mmu_terminate();
+
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu)
+ {
+ mali_pmu_delete(pmu);
+ }
+
+ mali_pm_terminate();
+
+ mali_memory_terminate();
+
+ _mali_osk_resources_term(&arch_configuration, num_resources);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_term();
+#endif
+
+ mali_session_terminate();
+}
+
+_mali_product_id_t mali_kernel_core_get_product_id(void)
+{
+ return global_product_id;
+}
+
+void mali_kernel_core_wakeup(void)
+{
+ u32 i;
+ u32 glob_num_clusters = mali_cluster_get_glob_num_clusters();
+ struct mali_cluster *cluster;
+
+ for (i = 0; i < glob_num_clusters; i++)
+ {
+ cluster = mali_cluster_get_global_cluster(i);
+ mali_cluster_reset(cluster);
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check compatability */
+ if ( args->version == _MALI_UK_API_VERSION )
+ {
+ args->compatible = 1;
+ }
+ else
+ {
+ args->compatible = 0;
+ }
+
+ args->version = _MALI_UK_API_VERSION; /* report our version */
+
+ /* success regardless of being compatible or not */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args )
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_notification_t *notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
+ MALI_SUCCESS;
+ }
+
+ /* receive a notification, might sleep */
+ err = _mali_osk_notification_queue_receive(queue, &notification);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err); /* errcode returned, pass on to caller */
+ }
+
+ /* copy the buffer to the user */
+ args->type = (_mali_uk_notification_type)notification->notification_type;
+ _mali_osk_memcpy(&args->data, notification->result_buffer, notification->result_buffer_size);
+
+ /* finished with the notification */
+ _mali_osk_notification_delete( notification );
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args )
+{
+ _mali_osk_notification_t * notification;
+ _mali_osk_notification_queue_t *queue;
+
+ /* check input */
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ queue = ((struct mali_session_data *)args->ctx)->ioctl_queue;
+
+ /* if the queue does not exist we're currently shutting down */
+ if (NULL == queue)
+ {
+ MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
+ MALI_SUCCESS;
+ }
+
+ notification = _mali_osk_notification_create(args->type, 0);
+ if ( NULL == notification)
+ {
+ MALI_PRINT_ERROR( ("Failed to create notification object\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ _mali_osk_notification_queue_send(queue, notification);
+
+ MALI_SUCCESS; /* all ok */
+}
+
+_mali_osk_errcode_t _mali_ukk_open(void **context)
+{
+ struct mali_session_data *session_data;
+
+ /* allocated struct to track this session */
+ session_data = (struct mali_session_data *)_mali_osk_calloc(1, sizeof(struct mali_session_data));
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_NOMEM);
+
+ MALI_DEBUG_PRINT(2, ("Session starting\n"));
+
+ /* create a response queue for this session */
+ session_data->ioctl_queue = _mali_osk_notification_queue_init();
+ if (NULL == session_data->ioctl_queue)
+ {
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session_data->page_directory = mali_mmu_pagedir_alloc();
+ if (NULL == session_data->page_directory)
+ {
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_mmu_pagedir_map(session_data->page_directory, MALI_DLB_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ MALI_PRINT_ERROR(("Failed to map DLB page into session\n"));
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ if (0 != mali_dlbu_phys_addr)
+ {
+ mali_mmu_pagedir_update(session_data->page_directory, MALI_DLB_VIRT_ADDR, mali_dlbu_phys_addr, _MALI_OSK_MALI_PAGE_SIZE, MALI_CACHE_STANDARD);
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_memory_session_begin(session_data))
+ {
+ mali_mmu_pagedir_free(session_data->page_directory);
+ _mali_osk_notification_queue_term(session_data->ioctl_queue);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ *context = (void*)session_data;
+
+ /* Add session to the list of all sessions. */
+ mali_session_add(session_data);
+
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_close(void **context)
+{
+ struct mali_session_data *session;
+ MALI_CHECK_NON_NULL(context, _MALI_OSK_ERR_INVALID_ARGS);
+ session = (struct mali_session_data *)*context;
+
+ MALI_DEBUG_PRINT(3, ("Session ending\n"));
+
+ /* Remove session from list of all sessions. */
+ mali_session_remove(session);
+
+ /* Abort queued and running jobs */
+ mali_gp_scheduler_abort_session(session);
+ mali_pp_scheduler_abort_session(session);
+
+ /* Flush pending work.
+ * Needed to make sure all bottom half processing related to this
+ * session has been completed, before we free internal data structures.
+ */
+ _mali_osk_flush_workqueue(NULL);
+
+ /* Free remaining memory allocated to this session */
+ mali_memory_session_end(session);
+
+ /* Free session data structures */
+ mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_notification_queue_term(session->ioctl_queue);
+ _mali_osk_free(session);
+
+ *context = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+
+ MALI_SUCCESS;
+}
+
+#if MALI_STATE_TRACKING
+u32 _mali_kernel_core_dump_state(char* buf, u32 size)
+{
+ int n = 0; /* Number of bytes written to buf */
+
+ n += mali_gp_scheduler_dump_state(buf + n, size - n);
+ n += mali_pp_scheduler_dump_state(buf + n, size - n);
+
+ return n;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_core.h b/drivers/media/video/samsung/mali/common/mali_kernel_core.h
new file mode 100644
index 0000000..d424c48
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_core.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_CORE_H__
+#define __MALI_KERNEL_CORE_H__
+
+#include "mali_osk.h"
+
+extern int mali_hang_check_interval;
+extern int mali_max_job_runtime;
+
+typedef enum
+{
+ _MALI_PRODUCT_ID_UNKNOWN,
+ _MALI_PRODUCT_ID_MALI200,
+ _MALI_PRODUCT_ID_MALI300,
+ _MALI_PRODUCT_ID_MALI400,
+ _MALI_PRODUCT_ID_MALI450,
+} _mali_product_id_t;
+
+_mali_osk_errcode_t mali_initialize_subsystems(void);
+
+void mali_terminate_subsystems(void);
+
+void mali_kernel_core_wakeup(void);
+
+_mali_product_id_t mali_kernel_core_get_product_id(void);
+
+u32 _mali_kernel_core_dump_state(char* buf, u32 size);
+
+#endif /* __MALI_KERNEL_CORE_H__ */
+
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.c b/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.c
new file mode 100644
index 0000000..b9f05ca
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+
+#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
+
+/**
+ * Allocate a descriptor table capable of holding 'count' mappings
+ * @param count Number of mappings in the table
+ * @return Pointer to a new table, NULL on error
+ */
+static mali_descriptor_table * descriptor_table_alloc(int count);
+
+/**
+ * Free a descriptor table
+ * @param table The table to free
+ */
+static void descriptor_table_free(mali_descriptor_table * table);
+
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries)
+{
+ mali_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(mali_descriptor_mapping));
+
+ init_entries = MALI_PAD_INT(init_entries);
+ max_entries = MALI_PAD_INT(max_entries);
+
+ if (NULL != map)
+ {
+ map->table = descriptor_table_alloc(init_entries);
+ if (NULL != map->table)
+ {
+ map->lock = _mali_osk_lock_init( (_mali_osk_lock_flags_t)(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE), 0, _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+ if (NULL != map->lock)
+ {
+ _mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
+ map->max_nr_mappings_allowed = max_entries;
+ map->current_nr_mappings = init_entries;
+ return map;
+ }
+ descriptor_table_free(map->table);
+ }
+ _mali_osk_free(map);
+ }
+ return NULL;
+}
+
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map)
+{
+ descriptor_table_free(map->table);
+ _mali_osk_lock_term(map->lock);
+ _mali_osk_free(map);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *odescriptor)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ int new_descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(odescriptor);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
+ if (new_descriptor == map->current_nr_mappings)
+ {
+ /* no free descriptor, try to expand the table */
+ mali_descriptor_table * new_table, * old_table;
+ if (map->current_nr_mappings >= map->max_nr_mappings_allowed) goto unlock_and_exit;
+
+ map->current_nr_mappings += BITS_PER_LONG;
+ new_table = descriptor_table_alloc(map->current_nr_mappings);
+ if (NULL == new_table) goto unlock_and_exit;
+
+ old_table = map->table;
+ _mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ map->table = new_table;
+ descriptor_table_free(old_table);
+ }
+
+ /* we have found a valid descriptor, set the value and usage bit */
+ _mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
+ map->table->mappings[new_descriptor] = target;
+ *odescriptor = new_descriptor;
+ err = _MALI_OSK_ERR_OK;
+
+unlock_and_exit:
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_ERROR(err);
+}
+
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*))
+{
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(map);
+ MALI_DEBUG_ASSERT_POINTER(callback);
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ /* id 0 is skipped as it's an reserved ID not mapping to anything */
+ for (i = 1; i < map->current_nr_mappings; ++i)
+ {
+ if (_mali_osk_test_bit(i, map->table->usage))
+ {
+ callback(i, map->table->mappings[i]);
+ }
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_ASSERT_POINTER(map);
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ *target = map->table->mappings[descriptor];
+ result = _MALI_OSK_ERR_OK;
+ }
+ else *target = NULL;
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target)
+{
+ _mali_osk_errcode_t result = _MALI_OSK_ERR_FAULT;
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ map->table->mappings[descriptor] = target;
+ result = _MALI_OSK_ERR_OK;
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+ MALI_ERROR(result);
+}
+
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor)
+{
+ void *old_value = NULL;
+
+ _mali_osk_lock_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ if ( (descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage) )
+ {
+ old_value = map->table->mappings[descriptor];
+ map->table->mappings[descriptor] = NULL;
+ _mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
+ }
+ _mali_osk_lock_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return old_value;
+}
+
+static mali_descriptor_table * descriptor_table_alloc(int count)
+{
+ mali_descriptor_table * table;
+
+ table = _mali_osk_calloc(1, sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count));
+
+ if (NULL != table)
+ {
+ table->usage = (u32*)((u8*)table + sizeof(mali_descriptor_table));
+ table->mappings = (void**)((u8*)table + sizeof(mali_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ }
+
+ return table;
+}
+
+static void descriptor_table_free(mali_descriptor_table * table)
+{
+ _mali_osk_free(table);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.h b/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.h
new file mode 100644
index 0000000..82ed94d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_descriptor_mapping.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_kernel_descriptor_mapping.h
+ */
+
+#ifndef __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+#define __MALI_KERNEL_DESCRIPTOR_MAPPING_H__
+
+#include "mali_osk.h"
+
+/**
+ * The actual descriptor mapping table, never directly accessed by clients
+ */
+typedef struct mali_descriptor_table
+{
+ u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void** mappings; /**< Array of the pointers the descriptors map to */
+} mali_descriptor_table;
+
+/**
+ * The descriptor mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct mali_descriptor_mapping
+{
+ _mali_osk_lock_t *lock; /**< Lock protecting access to the mapping object */
+ int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
+ int current_nr_mappings; /**< Current number of possible mappings */
+ mali_descriptor_table * table; /**< Pointer to the current mapping table */
+} mali_descriptor_mapping;
+
+/**
+ * Create a descriptor mapping object
+ * Create a descriptor mapping capable of holding init_entries growable to max_entries
+ * @param init_entries Number of entries to preallocate memory for
+ * @param max_entries Number of entries to max support
+ * @return Pointer to a descriptor mapping object, NULL on failure
+ */
+mali_descriptor_mapping * mali_descriptor_mapping_create(int init_entries, int max_entries);
+
+/**
+ * Destroy a descriptor mapping object
+ * @param map The map to free
+ */
+void mali_descriptor_mapping_destroy(mali_descriptor_mapping * map);
+
+/**
+ * Allocate a new mapping entry (descriptor ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The descriptor allocated, a negative value on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_allocate_mapping(mali_descriptor_mapping * map, void * target, int *descriptor);
+
+/**
+ * Get the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_get(mali_descriptor_mapping * map, int descriptor, void** target);
+
+/**
+ * Set the value mapped to by a descriptor ID
+ * @param map The map to lookup the descriptor id in
+ * @param descriptor The descriptor ID to lookup
+ * @param target Pointer to replace the current value with
+ * @return 0 on successful lookup, negative on error
+ */
+_mali_osk_errcode_t mali_descriptor_mapping_set(mali_descriptor_mapping * map, int descriptor, void * target);
+
+/**
+ * Call the specified callback function for each descriptor in map.
+ * Entire function is mutex protected.
+ * @param map The map to do callbacks for
+ * @param callback A callback function which will be calle for each entry in map
+ */
+void mali_descriptor_mapping_call_for_each(mali_descriptor_mapping * map, void (*callback)(int, void*));
+
+/**
+ * Free the descriptor ID
+ * For the descriptor to be reused it has to be freed
+ * @param map The map to free the descriptor from
+ * @param descriptor The descriptor ID to free
+ *
+ * @return old value of descriptor mapping
+ */
+void *mali_descriptor_mapping_free(mali_descriptor_mapping * map, int descriptor);
+
+#endif /* __MALI_KERNEL_DESCRIPTOR_MAPPING_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.c b/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.c
new file mode 100644
index 0000000..8ff3d37
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+
+typedef struct os_allocation
+{
+ u32 num_pages;
+ u32 offset_start;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+} os_allocation;
+
+typedef struct os_allocator
+{
+ _mali_osk_lock_t *mutex;
+
+ /**
+ * Maximum number of pages to allocate from the OS
+ */
+ u32 num_pages_max;
+
+ /**
+ * Number of pages allocated from the OS
+ */
+ u32 num_pages_allocated;
+
+ /** CPU Usage adjustment (add to mali physical address to get cpu physical address) */
+ u32 cpu_usage_adjust;
+} os_allocator;
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block);
+static void os_allocator_release(void * ctx, void * handle);
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block );
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator);
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator);
+
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name)
+{
+ mali_physical_memory_allocator * allocator;
+ os_allocator * info;
+
+ max_allocation = (max_allocation + _MALI_OSK_CPU_PAGE_SIZE-1) & ~(_MALI_OSK_CPU_PAGE_SIZE-1);
+
+ MALI_DEBUG_PRINT(2, ("Mali OS memory allocator created with max allocation size of 0x%X bytes, cpu_usage_adjust 0x%08X\n", max_allocation, cpu_usage_adjust));
+
+ allocator = _mali_osk_malloc(sizeof(mali_physical_memory_allocator));
+ if (NULL != allocator)
+ {
+ info = _mali_osk_malloc(sizeof(os_allocator));
+ if (NULL != info)
+ {
+ info->num_pages_max = max_allocation / _MALI_OSK_CPU_PAGE_SIZE;
+ info->num_pages_allocated = 0;
+ info->cpu_usage_adjust = cpu_usage_adjust;
+
+ info->mutex = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_MEM_INFO);
+ if (NULL != info->mutex)
+ {
+ allocator->allocate = os_allocator_allocate;
+ allocator->allocate_page_table_block = os_allocator_allocate_page_table_block;
+ allocator->destroy = os_allocator_destroy;
+ allocator->stat = os_allocator_stat;
+ allocator->ctx = info;
+ allocator->name = name;
+
+ return allocator;
+ }
+ _mali_osk_free(info);
+ }
+ _mali_osk_free(allocator);
+ }
+
+ return NULL;
+}
+
+static u32 os_allocator_stat(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ info = (os_allocator*)allocator->ctx;
+ return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
+}
+
+static void os_allocator_destroy(mali_physical_memory_allocator * allocator)
+{
+ os_allocator * info;
+ MALI_DEBUG_ASSERT_POINTER(allocator);
+ MALI_DEBUG_ASSERT_POINTER(allocator->ctx);
+ info = (os_allocator*)allocator->ctx;
+ _mali_osk_lock_term(info->mutex);
+ _mali_osk_free(info);
+ _mali_osk_free(allocator);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ mali_physical_memory_allocation_result result = MALI_MEM_ALLOC_NONE;
+ u32 left;
+ os_allocator * info;
+ os_allocation * allocation;
+ int pages_allocated = 0;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(offset);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ info = (os_allocator*)ctx;
+ left = descriptor->size - *offset;
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /** @note this code may not work on Linux, or may require a more complex Linux implementation */
+ allocation = _mali_osk_malloc(sizeof(os_allocation));
+ if (NULL != allocation)
+ {
+ u32 os_mem_max_usage = info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE;
+ allocation->offset_start = *offset;
+ allocation->num_pages = ((left + _MALI_OSK_CPU_PAGE_SIZE - 1) & ~(_MALI_OSK_CPU_PAGE_SIZE - 1)) >> _MALI_OSK_CPU_PAGE_ORDER;
+ MALI_DEBUG_PRINT(6, ("Allocating page array of size %d bytes\n", allocation->num_pages * sizeof(struct page*)));
+
+ while (left > 0)
+ {
+ err = mali_allocation_engine_map_physical(engine, descriptor, *offset, MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, info->cpu_usage_adjust, _MALI_OSK_CPU_PAGE_SIZE);
+ if ( _MALI_OSK_ERR_OK != err)
+ {
+ if ( _MALI_OSK_ERR_NOMEM == err)
+ {
+ /* 'Partial' allocation (or, out-of-memory on first page) */
+ break;
+ }
+
+ MALI_DEBUG_PRINT(1, ("Mapping of physical memory failed\n"));
+
+ /* Fatal error, cleanup any previous pages allocated. */
+ if ( pages_allocated > 0 )
+ {
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*pages_allocated, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+ /* (*offset) doesn't need to be restored; it will not be used by the caller on failure */
+ }
+
+ pages_allocated = 0;
+
+ result = MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ break;
+ }
+
+ /* Loop iteration */
+ if (left < _MALI_OSK_CPU_PAGE_SIZE) left = 0;
+ else left -= _MALI_OSK_CPU_PAGE_SIZE;
+
+ pages_allocated++;
+
+ *offset += _MALI_OSK_CPU_PAGE_SIZE;
+ }
+
+ if (left) MALI_PRINT(("Out of memory. Mali memory allocated: %d kB Configured maximum OS memory usage: %d kB\n",
+ (info->num_pages_allocated * _MALI_OSK_CPU_PAGE_SIZE)/1024, (info->num_pages_max* _MALI_OSK_CPU_PAGE_SIZE)/1024));
+
+ /* Loop termination; decide on result */
+ if (pages_allocated)
+ {
+ MALI_DEBUG_PRINT(6, ("Allocated %d pages\n", pages_allocated));
+ if (left) result = MALI_MEM_ALLOC_PARTIAL;
+ else result = MALI_MEM_ALLOC_FINISHED;
+
+ /* Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+ _mali_osk_cache_ensure_uncached_range_flushed( (void *)descriptor, allocation->offset_start, pages_allocated *_MALI_OSK_CPU_PAGE_SIZE );
+ allocation->num_pages = pages_allocated;
+ allocation->engine = engine; /* Necessary to make the engine's unmap call */
+ allocation->descriptor = descriptor; /* Necessary to make the engine's unmap call */
+ info->num_pages_allocated += pages_allocated;
+
+ MALI_DEBUG_PRINT(6, ("%d out of %d pages now allocated\n", info->num_pages_allocated, info->num_pages_max));
+
+ alloc_info->ctx = info;
+ alloc_info->handle = allocation;
+ alloc_info->release = os_allocator_release;
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(6, ("Releasing pages array due to no pages allocated\n"));
+ _mali_osk_free( allocation );
+ }
+ }
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return result;
+}
+
+static void os_allocator_release(void * ctx, void * handle)
+{
+ os_allocator * info;
+ os_allocation * allocation;
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(handle);
+
+ info = (os_allocator*)ctx;
+ allocation = (os_allocation*)handle;
+ engine = allocation->engine;
+ descriptor = allocation->descriptor;
+
+ MALI_DEBUG_ASSERT_POINTER( engine );
+ MALI_DEBUG_ASSERT_POINTER( descriptor );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_PRINT(6, ("Releasing %d os pages\n", allocation->num_pages));
+
+ MALI_DEBUG_ASSERT( allocation->num_pages <= info->num_pages_allocated);
+ info->num_pages_allocated -= allocation->num_pages;
+
+ mali_allocation_engine_unmap_physical( engine, descriptor, allocation->offset_start, _MALI_OSK_CPU_PAGE_SIZE*allocation->num_pages, _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_free(allocation);
+}
+
+static mali_physical_memory_allocation_result os_allocator_allocate_page_table_block(void * ctx, mali_page_table_block * block)
+{
+#if defined(CONFIG_MACH_KONA)
+#ifndef CONFIG_FORCE_MAX_ZONEORDER
+ int allocation_order = 10;
+#else
+ int allocation_order = CONFIG_FORCE_MAX_ZONEORDER - 1;
+#endif
+#else
+ int allocation_order = 11; /* _MALI_OSK_CPU_PAGE_SIZE << 6 */
+#endif
+ void *virt = NULL;
+ u32 size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ os_allocator * info;
+
+ u32 cpu_phys_base;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ info = (os_allocator*)ctx;
+
+ /* Ensure we don't allocate more than we're supposed to from the ctx */
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW)) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ /* if the number of pages to be requested lead to exceeding the memory
+ * limit in info->num_pages_max, reduce the size that is to be requested. */
+ while ( (info->num_pages_allocated + (1 << allocation_order) > info->num_pages_max)
+ && _mali_osk_mem_check_allocated(info->num_pages_max * _MALI_OSK_CPU_PAGE_SIZE) )
+ {
+ if ( allocation_order > 0 ) {
+ --allocation_order;
+ } else {
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+ }
+
+ /* try to allocate 2^(allocation_order) pages, if that fails, try
+ * allocation_order-1 to allocation_order 0 (inclusive) */
+ while ( allocation_order >= 0 )
+ {
+ size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+ virt = _mali_osk_mem_allocioregion( &cpu_phys_base, size );
+
+ if (NULL != virt) break;
+
+ --allocation_order;
+ }
+
+ if ( NULL == virt )
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate consistent memory. Is CONSISTENT_DMA_SIZE set too low?\n"));
+ /* return OOM */
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+ return MALI_MEM_ALLOC_NONE;
+ }
+
+ MALI_DEBUG_PRINT(5, ("os_allocator_allocate_page_table_block: Allocation of order %i succeeded\n",
+ allocation_order));
+
+ /* we now know the size of the allocation since we know for what
+ * allocation_order the allocation succeeded */
+ size = _MALI_OSK_CPU_PAGE_SIZE << allocation_order;
+
+
+ block->release = os_allocator_page_table_block_release;
+ block->ctx = ctx;
+ block->handle = (void*)allocation_order;
+ block->size = size;
+ block->phys_base = cpu_phys_base - info->cpu_usage_adjust;
+ block->mapping = virt;
+
+ info->num_pages_allocated += (1 << allocation_order);
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void os_allocator_page_table_block_release( mali_page_table_block *page_table_block )
+{
+ os_allocator * info;
+ u32 allocation_order;
+ u32 pages_allocated;
+
+ MALI_DEBUG_ASSERT_POINTER( page_table_block );
+
+ info = (os_allocator*)page_table_block->ctx;
+
+ MALI_DEBUG_ASSERT_POINTER( info );
+
+ allocation_order = (u32)page_table_block->handle;
+
+ pages_allocated = 1 << allocation_order;
+
+ MALI_DEBUG_ASSERT( pages_allocated * _MALI_OSK_CPU_PAGE_SIZE == page_table_block->size );
+
+ if (_MALI_OSK_ERR_OK != _mali_osk_lock_wait(info->mutex, _MALI_OSK_LOCKMODE_RW))
+ {
+ MALI_DEBUG_PRINT(1, ("allocator release: Failed to get mutex\n"));
+ return;
+ }
+
+ MALI_DEBUG_ASSERT( pages_allocated <= info->num_pages_allocated);
+ info->num_pages_allocated -= pages_allocated;
+
+ /* Adjust phys_base from mali physical address to CPU physical address */
+ _mali_osk_mem_freeioregion( page_table_block->phys_base + info->cpu_usage_adjust, page_table_block->size, page_table_block->mapping );
+
+ _mali_osk_lock_signal(info->mutex, _MALI_OSK_LOCKMODE_RW);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.h b/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.h
new file mode 100644
index 0000000..59e6494
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_mem_os.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEM_OS_H__
+#define __MALI_KERNEL_MEM_OS_H__
+
+/**
+ * @brief Creates an object that manages allocating OS memory
+ *
+ * Creates an object that provides an interface to allocate OS memory and
+ * have it mapped into the Mali virtual memory space.
+ *
+ * The object exposes pointers to
+ * - allocate OS memory
+ * - allocate Mali page tables in OS memory
+ * - destroy the object
+ *
+ * Allocations from OS memory are of type mali_physical_memory_allocation
+ * which provides a function to release the allocation.
+ *
+ * @param max_allocation max. number of bytes that can be allocated from OS memory
+ * @param cpu_usage_adjust value to add to mali physical addresses to obtain CPU physical addresses
+ * @param name description of the allocator
+ * @return pointer to mali_physical_memory_allocator object. NULL on failure.
+ **/
+mali_physical_memory_allocator * mali_os_allocator_create(u32 max_allocation, u32 cpu_usage_adjust, const char *name);
+
+#endif /* __MALI_KERNEL_MEM_OS_H__ */
+
+
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c b/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c
new file mode 100644
index 0000000..d770e3e
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+typedef struct memory_engine
+{
+ mali_kernel_mem_address_manager * mali_address;
+ mali_kernel_mem_address_manager * process_address;
+} memory_engine;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
+{
+ memory_engine * engine;
+
+ /* Mali Address Manager need not support unmap_physical */
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
+
+ /* Process Address Manager must support unmap_physical for OS allocation
+ * error path handling */
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
+ MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
+
+
+ engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
+ if (NULL == engine) return NULL;
+
+ engine->mali_address = mali_address_manager;
+ engine->process_address = process_address_manager;
+
+ return (mali_allocation_engine)engine;
+}
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine)
+{
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ _mali_osk_free(engine);
+}
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_allocators);
+ /* ASSERT that the list member has been initialized, even if it won't be
+ * used for tracking. We need it to be initialized to see if we need to
+ * delete it from a list in the release function. */
+ MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
+
+ if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
+ {
+ _mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ res = engine->process_address->allocate(descriptor);
+ }
+ if ( _MALI_OSK_ERR_OK == res )
+ {
+ /* address space setup OK, commit physical memory to the allocation */
+ mali_physical_memory_allocator * active_allocator = physical_allocators;
+ struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
+ u32 offset = 0;
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ if ( NULL != tracking_list )
+ {
+ /* Insert into the memory session list */
+ /* ASSERT that it is not already part of a list */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
+ _mali_osk_list_add( &descriptor->list, tracking_list );
+ }
+
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* reuse current active_allocation_tracker */
+ MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ if (NULL != active_allocator->next)
+ {
+ /* need a new allocation tracker */
+ active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
+ if (NULL != active_allocation_tracker->next)
+ {
+ active_allocation_tracker = active_allocation_tracker->next;
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));
+
+ /* allocation failure, start cleanup */
+ /* loop over any potential partial allocations */
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ /* handle blank trackers which will show up during failure */
+ if (NULL != active_allocation_tracker->release)
+ {
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ }
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ /* release the address spaces */
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+ }
+ engine->mali_address->release(descriptor);
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ mali_allocation_engine_release_pt1_mali_pagetables_unmap(mem_engine, descriptor);
+ mali_allocation_engine_release_pt2_physical_memory_free(mem_engine, descriptor);
+}
+
+void mali_allocation_engine_release_pt1_mali_pagetables_unmap(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* Calling: mali_address_manager_release() */
+ /* This function is allowed to be called several times, and it only does the release on the first call. */
+ engine->mali_address->release(descriptor);
+}
+
+void mali_allocation_engine_release_pt2_physical_memory_free(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+ mali_physical_memory_allocation * active_allocation_tracker;
+
+ /* Remove this from a tracking list in session_data->memory_head */
+ if ( ! _mali_osk_list_empty( &descriptor->list ) )
+ {
+ _mali_osk_list_del( &descriptor->list );
+ /* Clear the list for debug mode, catch use-after-free */
+ MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
+ }
+
+ active_allocation_tracker = &descriptor->physical_allocation;
+ while (NULL != active_allocation_tracker)
+ {
+ MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
+ active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
+ active_allocation_tracker = active_allocation_tracker->next;
+ }
+
+ /* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
+ for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
+ {
+ void * buf = active_allocation_tracker;
+ active_allocation_tracker = active_allocation_tracker->next;
+ _mali_osk_free(buf);
+ }
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ engine->process_address->release(descriptor);
+ }
+}
+
+_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
+{
+ _mali_osk_errcode_t err;
+ memory_engine * engine = (memory_engine*)mem_engine;
+ _mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
+
+ /* Handle process address manager first, because we may need them to
+ * allocate the physical page */
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Handle OS-allocated specially, since an adjustment may be required */
+ if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
+ {
+ MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
+
+ /* Set flags to use on error path */
+ unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
+
+ err = engine->process_address->map_physical(descriptor, offset, &phys, size);
+ /* Adjust for cpu physical address to mali physical address */
+ phys -= cpu_usage_adjust;
+ }
+ else
+ {
+ u32 cpu_phys;
+ /* Adjust mali physical address to cpu physical address */
+ cpu_phys = phys + cpu_usage_adjust;
+ err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
+ }
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ MALI_DEBUG_PRINT(2, ("Map failed: %s %d\n", __FUNCTION__, __LINE__));
+ MALI_ERROR( err );
+ }
+ }
+
+ MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
+
+ /* Mali address manager must use the physical address - no point in asking
+ * it to allocate another one for us */
+ MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
+
+ err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
+
+ if ( _MALI_OSK_ERR_OK != err )
+ {
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
+ engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
+ }
+ MALI_DEBUG_PRINT(2, ("Map mali failed: %s %d\n", __FUNCTION__, __LINE__));
+ MALI_ERROR( err );
+ }
+
+ MALI_SUCCESS;
+}
+
+void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
+{
+ memory_engine * engine = (memory_engine*)mem_engine;
+
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
+
+ MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
+ MALI_DEBUG_ASSERT_POINTER(engine->process_address);
+
+ if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
+ {
+ /* Mandetory for process_address manager to have an unmap function*/
+ engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+
+ /* Optional for mali_address manager to have an unmap function*/
+ if ( NULL != engine->mali_address->unmap_physical )
+ {
+ engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
+ }
+}
+
+
+_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ while ( NULL != active_allocator )
+ {
+ switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
+ {
+ case MALI_MEM_ALLOC_FINISHED:
+ MALI_SUCCESS; /* all done */
+ case MALI_MEM_ALLOC_NONE:
+ /* try next */
+ MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
+ ( active_allocator->name ) ? active_allocator->name : "UNNAMED",
+ ( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
+ active_allocator = active_allocator->next;
+ break;
+ case MALI_MEM_ALLOC_PARTIAL:
+ MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
+ /* FALL THROUGH */
+ case MALI_MEM_ALLOC_INTERNAL_FAILURE:
+ MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
+ active_allocator = NULL; /* end the while loop */
+ break;
+ }
+ }
+
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+}
+
+
+void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
+{
+ mali_physical_memory_allocator * active_allocator = physical_provider;
+ MALI_DEBUG_ASSERT_POINTER(physical_provider);
+
+ MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
+ while ( NULL != active_allocator )
+ {
+ if ( NULL != active_allocator->name )
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
+ }
+ else
+ {
+ MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
+ }
+ active_allocator = active_allocator->next;
+ }
+
+}
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator)
+{
+ u32 sum = 0;
+ while(NULL != allocator)
+ {
+ /* Only count allocators that have set up a stat function. */
+ if(allocator->stat)
+ sum += allocator->stat(allocator);
+
+ allocator = allocator->next;
+ }
+
+ return sum;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.h b/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.h
new file mode 100644
index 0000000..3b41cee
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_MEMORY_ENGINE_H__
+#define __MALI_KERNEL_MEMORY_ENGINE_H__
+
+typedef void * mali_allocation_engine;
+
+typedef enum { MALI_MEM_ALLOC_FINISHED, MALI_MEM_ALLOC_PARTIAL, MALI_MEM_ALLOC_NONE, MALI_MEM_ALLOC_INTERNAL_FAILURE } mali_physical_memory_allocation_result;
+
+typedef struct mali_physical_memory_allocation
+{
+ void (*release)(void * ctx, void * handle); /**< Function to call on to release the physical memory */
+ void * ctx;
+ void * handle;
+ struct mali_physical_memory_allocation * next;
+} mali_physical_memory_allocation;
+
+struct mali_page_table_block;
+
+typedef struct mali_page_table_block
+{
+ void (*release)(struct mali_page_table_block *page_table_block);
+ void * ctx;
+ void * handle;
+ u32 size; /**< In bytes, should be a multiple of MALI_MMU_PAGE_SIZE to avoid internal fragementation */
+ u32 phys_base; /**< Mali physical address */
+ mali_io_address mapping;
+} mali_page_table_block;
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+typedef enum
+{
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE = 0x1,
+ MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE = 0x2,
+} mali_memory_allocation_flag;
+
+/**
+ * Supplying this 'magic' physical address requests that the OS allocate the
+ * physical address at page commit time, rather than committing a specific page
+ */
+#define MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC ((u32)(-1))
+
+typedef struct mali_memory_allocation
+{
+ /* Information about the allocation */
+ void * mapping; /**< CPU virtual address where the memory is mapped at */
+ u32 mali_address; /**< The Mali seen address of the memory allocation */
+ u32 size; /**< Size of the allocation */
+ u32 permission; /**< Permission settings */
+ mali_memory_allocation_flag flags;
+ u32 cache_settings; /* type: mali_memory_cache_settings, found in <linux/mali/mali_utgard_uk_types.h> Ump DD breaks if we include it...*/
+
+ _mali_osk_lock_t * lock;
+
+ /* Manager specific information pointers */
+ void * mali_addr_mapping_info; /**< Mali address allocation specific info */
+ void * process_addr_mapping_info; /**< Mapping manager specific info */
+
+ mali_physical_memory_allocation physical_allocation;
+
+ _mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
+} mali_memory_allocation;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+typedef struct mali_physical_memory_allocator
+{
+ mali_physical_memory_allocation_result (*allocate)(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+ mali_physical_memory_allocation_result (*allocate_page_table_block)(void * ctx, mali_page_table_block * block); /* MALI_MEM_ALLOC_PARTIAL not allowed */
+ void (*destroy)(struct mali_physical_memory_allocator * allocator);
+ u32 (*stat)(struct mali_physical_memory_allocator * allocator);
+ void * ctx;
+ const char * name; /**< Descriptive name for use in mali_allocation_engine_report_allocators, or NULL */
+ u32 alloc_order; /**< Order in which the allocations should happen */
+ struct mali_physical_memory_allocator * next;
+} mali_physical_memory_allocator;
+
+typedef struct mali_kernel_mem_address_manager
+{
+ _mali_osk_errcode_t (*allocate)(mali_memory_allocation *); /**< Function to call to reserve an address */
+ void (*release)(mali_memory_allocation *); /**< Function to call to free the address allocated */
+
+ /**
+ * Function called for each physical sub allocation.
+ * Called for each physical block allocated by the physical memory manager.
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in,out] phys_addr A pointer to the physical address of the start of the
+ * physical block. When *phys_addr == MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC
+ * is used, this requests the function to allocate the physical page
+ * itself, and return it through the pointer provided.
+ * @param[in] size Length in bytes of the physical block
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ * Specifically, _MALI_OSK_ERR_UNSUPPORTED indicates that the function
+ * does not support allocating physical pages itself.
+ */
+ _mali_osk_errcode_t (*map_physical)(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+
+ /**
+ * Function called to remove a physical sub allocation.
+ * Called on error paths where one of the address managers fails.
+ *
+ * @note this is optional. For address managers where this is not
+ * implemented, the value of this member is NULL. The memory engine
+ * currently does not require the mali address manager to be able to
+ * unmap individual pages, but the process address manager must have this
+ * capability.
+ *
+ * @param[in] descriptor The memory descriptor in question
+ * @param[in] off Offset from the start of range
+ * @param[in] size Length in bytes of the physical block
+ * @param[in] flags flags to use on a per-page basis. For OS-allocated
+ * physical pages, this must include _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR.
+ * @return _MALI_OSK_ERR_OK on success.
+ * A value of type _mali_osk_errcode_t other than _MALI_OSK_ERR_OK indicates failure.
+ */
+ void (*unmap_physical)(mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags);
+
+} mali_kernel_mem_address_manager;
+
+mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager);
+
+void mali_allocation_engine_destroy(mali_allocation_engine engine);
+
+int mali_allocation_engine_allocate_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_provider, _mali_osk_list_t *tracking_list );
+void mali_allocation_engine_release_memory(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+void mali_allocation_engine_release_pt1_mali_pagetables_unmap(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+void mali_allocation_engine_release_pt2_physical_memory_free(mali_allocation_engine engine, mali_memory_allocation * descriptor);
+
+int mali_allocation_engine_map_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size);
+void mali_allocation_engine_unmap_physical(mali_allocation_engine engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags);
+
+int mali_allocation_engine_allocate_page_tables(mali_allocation_engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider);
+
+void mali_allocation_engine_report_allocators(mali_physical_memory_allocator * physical_provider);
+
+u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator);
+
+#endif /* __MALI_KERNEL_MEMORY_ENGINE_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_utilization.c b/drivers/media/video/samsung/mali/common/mali_kernel_utilization.c
new file mode 100644
index 0000000..a374dbf
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_utilization.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_platform.h"
+
+/* Define how often to calculate and report GPU utilization, in milliseconds */
+#define MALI_GPU_UTILIZATION_TIMEOUT 1000
+
+static _mali_osk_lock_t *time_data_lock;
+
+static _mali_osk_atomic_t num_running_cores;
+
+static u64 period_start_time = 0;
+static u64 work_start_time = 0;
+static u64 accumulated_work_time = 0;
+
+static _mali_osk_timer_t *utilization_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+
+static void calculate_gpu_utilization(void* arg)
+{
+ u64 time_now;
+ u64 time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 work_normalized;
+ u32 period_normalized;
+ u32 utilization;
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (accumulated_work_time == 0 && work_start_time == 0)
+ {
+ /* Don't reschedule timer, this will be started if new work arrives */
+ timer_running = MALI_FALSE;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* No work done for this period, report zero usage */
+ mali_gpu_utilization_handler(0);
+
+ return;
+ }
+
+ time_now = _mali_osk_time_get_ns();
+ time_period = time_now - period_start_time;
+
+ /* If we are currently busy, update working period up to now */
+ if (work_start_time != 0)
+ {
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = time_now;
+ }
+
+ /*
+ * We have two 64-bit values, a dividend and a divisor.
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ * We shift the dividend up and possibly the divisor down, making the result X in 256.
+ */
+
+ /* Shift the 64-bit values down so they fit inside a 32-bit integer */
+ leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ shift_val = 32 - leading_zeroes;
+ work_normalized = (u32)(accumulated_work_time >> shift_val);
+ period_normalized = (u32)(time_period >> shift_val);
+
+ /*
+ * Now, we should report the usage in parts of 256
+ * this means we must shift up the dividend or down the divisor by 8
+ * (we could do a combination, but we just use one for simplicity,
+ * but the end result should be good enough anyway)
+ */
+ if (period_normalized > 0x00FFFFFF)
+ {
+ /* The divisor is so big that it is safe to shift it down */
+ period_normalized >>= 8;
+ }
+ else
+ {
+ /*
+ * The divisor is so small that we can shift up the dividend, without loosing any data.
+ * (dividend is always smaller than the divisor)
+ */
+ work_normalized <<= 8;
+ }
+
+ utilization = work_normalized / period_normalized;
+
+ accumulated_work_time = 0;
+ period_start_time = time_now; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+
+
+ mali_gpu_utilization_handler(utilization);
+}
+
+_mali_osk_errcode_t mali_utilization_init(void)
+{
+ time_data_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+
+ if (NULL == time_data_lock)
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ _mali_osk_atomic_init(&num_running_cores, 0);
+
+ utilization_timer = _mali_osk_timer_init();
+ if (NULL == utilization_timer)
+ {
+ _mali_osk_lock_term(time_data_lock);
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_utilization_suspend(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ }
+}
+
+void mali_utilization_term(void)
+{
+ if (NULL != utilization_timer)
+ {
+ _mali_osk_timer_del(utilization_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(utilization_timer);
+ utilization_timer = NULL;
+ }
+
+ _mali_osk_atomic_term(&num_running_cores);
+
+ _mali_osk_lock_term(time_data_lock);
+}
+
+void mali_utilization_core_start(u64 time_now)
+{
+ if (_mali_osk_atomic_inc_return(&num_running_cores) == 1)
+ {
+ /*
+ * We went from zero cores working, to one core working,
+ * we now consider the entire GPU for being busy
+ */
+
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (time_now < period_start_time)
+ {
+ /*
+ * This might happen if the calculate_gpu_utilization() was able
+ * to run between the sampling of time_now and us grabbing the lock above
+ */
+ time_now = period_start_time;
+ }
+
+ work_start_time = time_now;
+ if (timer_running != MALI_TRUE)
+ {
+ timer_running = MALI_TRUE;
+ period_start_time = work_start_time; /* starting a new period */
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(MALI_GPU_UTILIZATION_TIMEOUT));
+ }
+ else
+ {
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+ }
+}
+
+void mali_utilization_core_end(u64 time_now)
+{
+ if (_mali_osk_atomic_dec_return(&num_running_cores) == 0)
+ {
+ /*
+ * No more cores are working, so accumulate the time we was busy.
+ */
+ _mali_osk_lock_wait(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (time_now < work_start_time)
+ {
+ /*
+ * This might happen if the calculate_gpu_utilization() was able
+ * to run between the sampling of time_now and us grabbing the lock above
+ */
+ time_now = work_start_time;
+ }
+
+ accumulated_work_time += (time_now - work_start_time);
+ work_start_time = 0;
+
+ _mali_osk_lock_signal(time_data_lock, _MALI_OSK_LOCKMODE_RW);
+ }
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_utilization.h b/drivers/media/video/samsung/mali/common/mali_kernel_utilization.h
new file mode 100644
index 0000000..1f60517
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_utilization.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_UTILIZATION_H__
+#define __MALI_KERNEL_UTILIZATION_H__
+
+#include "mali_osk.h"
+
+/**
+ * Initialize/start the Mali GPU utilization metrics reporting.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_utilization_init(void);
+
+/**
+ * Terminate the Mali GPU utilization metrics reporting
+ */
+void mali_utilization_term(void);
+
+/**
+ * Should be called when a job is about to execute a job
+ */
+void mali_utilization_core_start(u64 time_now);
+
+/**
+ * Should be called to stop the utilization timer during system suspend
+ */
+void mali_utilization_suspend(void);
+
+/**
+ * Should be called when a job has completed executing a job
+ */
+void mali_utilization_core_end(u64 time_now);
+
+
+#endif /* __MALI_KERNEL_UTILIZATION_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_kernel_vsync.c b/drivers/media/video/samsung/mali/common/mali_kernel_vsync.c
new file mode 100644
index 0000000..63c9f5b
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_kernel_vsync.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_ukk.h"
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_osk_profiling.h"
+#endif
+
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
+{
+ _mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
+ MALI_IGNORE(event); /* event is not used for release code, and that is OK */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ /*
+ * Manually generate user space events in kernel space.
+ * This saves user space from calling kernel space twice in this case.
+ * We just need to remember to add pid and tid manually.
+ */
+ if ( event==_MALI_UK_VSYNC_EVENT_BEGIN_WAIT)
+ {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+
+ if (event==_MALI_UK_VSYNC_EVENT_END_WAIT)
+ {
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
+ _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+ }
+#endif
+
+ MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
+ MALI_SUCCESS;
+}
+
diff --git a/drivers/media/video/samsung/mali/common/mali_l2_cache.c b/drivers/media/video/samsung/mali/common/mali_l2_cache.c
new file mode 100644
index 0000000..b7267f1
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_l2_cache.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+#include "mali_l2_cache.h"
+#include "mali_hw_core.h"
+#include "mali_pm.h"
+
+/**
+ * Size of the Mali L2 cache registers in bytes
+ */
+#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
+
+#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
+
+/**
+ * Mali L2 cache register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_l2_cache_register {
+ MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
+ /*unused = 0x000C */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
+} mali_l2_cache_register;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_command
+{
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
+ /* Read HW TRM carefully before adding/using other commands than the clear above */
+} mali_l2_cache_command;
+
+/**
+ * Mali L2 cache commands
+ * These are the commands that can be sent to the Mali L2 cache unit
+ */
+typedef enum mali_l2_cache_enable
+{
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+} mali_l2_cache_enable;
+
+/**
+ * Mali L2 cache status bits
+ */
+typedef enum mali_l2_cache_status
+{
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+} mali_l2_cache_status;
+
+/**
+ * Definition of the L2 cache core struct
+ * Used to track a L2 cache unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_l2_cache_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ u32 core_id; /**< Unique core ID */
+ _mali_osk_lock_t *command_lock; /**< Serialize all L2 cache commands */
+ _mali_osk_lock_t *counter_lock; /**< Synchronize L2 cache counter access */
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+};
+
+#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+
+static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+static u32 mali_global_num_l2_cache_cores = 0;
+
+int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+
+/* Local helper functions */
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+{
+ struct mali_l2_cache_core *cache = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+
+ if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES)
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+ return NULL;
+ }
+
+ cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
+ if (NULL != cache)
+ {
+ cache->core_id = mali_global_num_l2_cache_cores;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE))
+ {
+ cache->command_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE,
+ 0, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
+ if (NULL != cache->command_lock)
+ {
+ cache->counter_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE,
+ 0, _MALI_OSK_LOCK_ORDER_L2_COUNTER);
+ if (NULL != cache->counter_lock)
+ {
+ if (_MALI_OSK_ERR_OK == mali_l2_cache_reset(cache))
+ {
+ mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
+ mali_global_num_l2_cache_cores++;
+
+ return cache;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to reset L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ _mali_osk_lock_term(cache->counter_lock);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ _mali_osk_lock_term(cache->command_lock);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
+ }
+
+ mali_hw_core_delete(&cache->hw_core);
+ }
+
+ _mali_osk_free(cache);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
+{
+ u32 i;
+
+ /* reset to defaults */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
+
+ _mali_osk_lock_term(cache->counter_lock);
+ _mali_osk_lock_term(cache->command_lock);
+ mali_hw_core_delete(&cache->hw_core);
+
+ for (i = 0; i < mali_global_num_l2_cache_cores; i++)
+ {
+ if (mali_global_l2_cache_cores[i] == cache)
+ {
+ mali_global_l2_cache_cores[i] = NULL;
+ mali_global_num_l2_cache_cores--;
+ }
+ }
+
+ _mali_osk_free(cache);
+}
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+{
+ return cache->core_id;
+}
+
+mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+{
+ u32 value = 0; /* disabled src */
+ mali_bool core_is_on;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ core_is_on = mali_l2_cache_lock_power_state(cache);
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ cache->counter_src0 = counter;
+
+ if (MALI_HW_CORE_NO_COUNTER != counter)
+ {
+ value = counter;
+ }
+
+ if (MALI_TRUE == core_is_on)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_l2_cache_unlock_power_state(cache);
+
+ return MALI_TRUE;
+}
+
+mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
+{
+ u32 value = 0; /* disabled src */
+ mali_bool core_is_on;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ core_is_on = mali_l2_cache_lock_power_state(cache);
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ cache->counter_src1 = counter;
+
+ if (MALI_HW_CORE_NO_COUNTER != counter)
+ {
+ value = counter;
+ }
+
+ if (MALI_TRUE == core_is_on)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ mali_l2_cache_unlock_power_state(cache);
+
+ return MALI_TRUE;
+}
+
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src0;
+}
+
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
+{
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+{
+ MALI_DEBUG_ASSERT(NULL != src0);
+ MALI_DEBUG_ASSERT(NULL != value0);
+ MALI_DEBUG_ASSERT(NULL != src1);
+ MALI_DEBUG_ASSERT(NULL != value1);
+
+ /* Caller must hold the PM lock and know that we are powered on */
+
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ *src0 = cache->counter_src0;
+ *src1 = cache->counter_src1;
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+ {
+ *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ }
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+ {
+ *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
+{
+ if (MALI_MAX_NUMBER_OF_L2_CACHE_CORES > index)
+ {
+ return mali_global_l2_cache_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
+{
+ return mali_global_num_l2_cache_cores;
+}
+
+u32 mali_l2_cache_core_get_max_num_l2_cores(void)
+{
+ return MALI_MAX_NUMBER_OF_L2_CACHE_CORES;
+}
+
+_mali_osk_errcode_t mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+{
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_invalidate_all(cache);
+
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
+
+ /* Restart any performance counters (if enabled) */
+ _mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
+ {
+ mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
+ }
+
+ _mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_all(struct mali_l2_cache_core *cache)
+{
+ return mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+}
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_pages(struct mali_l2_cache_core *cache, u32 *pages, u32 num_pages)
+{
+ u32 i;
+ _mali_osk_errcode_t ret1, ret = _MALI_OSK_ERR_OK;
+
+ for (i = 0; i < num_pages; i++)
+ {
+ ret1 = mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[i]);
+ if (_MALI_OSK_ERR_OK != ret1)
+ {
+ ret = ret1;
+ }
+ }
+
+ return ret;
+}
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
+{
+ /*
+ * Take PM lock and check power state.
+ * Returns MALI_TRUE if module is powered on.
+ * Power state will not change until mali_l2_cache_unlock_power_state() is called.
+ */
+ mali_pm_lock();
+ return mali_pm_is_powered_on();
+}
+
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+{
+ /* Release PM lock */
+ mali_pm_unlock();
+}
+
+/* -------- local helper functions below -------- */
+
+
+static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+{
+ int i = 0;
+ const int loop_count = 100000;
+
+ /*
+ * Grab lock in order to send commands to the L2 cache in a serialized fashion.
+ * The L2 cache will ignore commands if it is busy.
+ */
+ _mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* First, wait for L2 cache command handler to go idle */
+
+ for (i = 0; i < loop_count; i++)
+ {
+ if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
+ {
+ break;
+ }
+ }
+
+ if (i == loop_count)
+ {
+ _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
+ MALI_ERROR( _MALI_OSK_ERR_FAULT );
+ }
+
+ /* then issue the command */
+ mali_hw_core_register_write(&cache->hw_core, reg, val);
+
+ _mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_l2_cache.h b/drivers/media/video/samsung/mali/common/mali_l2_cache.h
new file mode 100644
index 0000000..5a8e4da
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_l2_cache.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_KERNEL_L2_CACHE_H__
+#define __MALI_KERNEL_L2_CACHE_H__
+
+#include "mali_osk.h"
+
+struct mali_l2_cache_core;
+
+_mali_osk_errcode_t mali_l2_cache_initialize(void);
+void mali_l2_cache_terminate(void);
+
+struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t * resource);
+void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
+
+u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+
+mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
+mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
+u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
+u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
+void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
+struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
+u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
+u32 mali_l2_cache_core_get_max_num_l2_cores(void);
+
+_mali_osk_errcode_t mali_l2_cache_reset(struct mali_l2_cache_core *cache);
+
+_mali_osk_errcode_t mali_l2_cache_invalidate_all(struct mali_l2_cache_core *cache);
+_mali_osk_errcode_t mali_l2_cache_invalidate_pages(struct mali_l2_cache_core *cache, u32 *pages, u32 num_pages);
+
+mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
+void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
+
+#endif /* __MALI_KERNEL_L2_CACHE_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_mem_validation.c b/drivers/media/video/samsung/mali/common/mali_mem_validation.c
new file mode 100644
index 0000000..ea9c428
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_mem_validation.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_mem_validation.h"
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#define MALI_INVALID_MEM_ADDR 0xFFFFFFFF
+
+typedef struct
+{
+ u32 phys_base; /**< Mali physical base of the memory, page aligned */
+ u32 size; /**< size in bytes of the memory, multiple of page size */
+} _mali_mem_validation_t;
+
+static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
+
+_mali_osk_errcode_t mali_mem_validation_add_range(const _mali_osk_resource_t *resource)
+{
+ /* Check that no other MEM_VALIDATION resources exist */
+ if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base)
+ {
+ MALI_PRINT_ERROR(("Failed to add MEM_VALIDATION resource %s; another range is already specified\n", resource->description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ /* Check restrictions on page alignment */
+ if ((0 != (resource->base & (~_MALI_OSK_CPU_PAGE_MASK))) ||
+ (0 != (resource->size & (~_MALI_OSK_CPU_PAGE_MASK))))
+ {
+ MALI_PRINT_ERROR(("Failed to add MEM_VALIDATION resource %s; incorrect alignment\n", resource->description));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ mali_mem_validator.phys_base = resource->base;
+ mali_mem_validator.size = resource->size;
+ MALI_DEBUG_PRINT(2, ("Memory Validator '%s' installed for Mali physical address base=0x%08X, size=0x%08X\n",
+ resource->description, mali_mem_validator.phys_base, mali_mem_validator.size));
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size)
+{
+ if (phys_addr < (phys_addr + size)) /* Don't allow overflow (or zero size) */
+ {
+ if ((0 == ( phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+ (0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK))))
+ {
+ if ((phys_addr >= mali_mem_validator.phys_base) &&
+ ((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
+ (phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
+ ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) )
+ {
+ MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
+ return _MALI_OSK_ERR_OK;
+ }
+ }
+ }
+
+ MALI_PRINT_ERROR(("MALI PHYSICAL RANGE VALIDATION ERROR: The range supplied was: phys_base=0x%08X, size=0x%08X\n", phys_addr, size));
+
+ return _MALI_OSK_ERR_FAULT;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_mem_validation.h b/drivers/media/video/samsung/mali/common/mali_mem_validation.h
new file mode 100644
index 0000000..2043b44
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_mem_validation.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEM_VALIDATION_H__
+#define __MALI_MEM_VALIDATION_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_mem_validation_add_range(const _mali_osk_resource_t * resource);
+_mali_osk_errcode_t mali_mem_validation_check(u32 phys_addr, u32 size);
+
+#endif /* __MALI_MEM_VALIDATION_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_memory.c b/drivers/media/video/samsung/mali/common/mali_memory.c
new file mode 100644
index 0000000..75506ed
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_memory.c
@@ -0,0 +1,1319 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_mem_validation.h"
+#include "mali_memory.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_memory_engine.h"
+#include "mali_block_allocator.h"
+#include "mali_kernel_mem_os.h"
+#include "mali_session.h"
+#include "mali_l2_cache.h"
+#include "mali_cluster.h"
+#include "mali_group.h"
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+#include "ump_kernel_interface.h"
+#endif
+
+/* kernel side OS functions and user-kernel interface */
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_ukk.h"
+#include "mali_osk_list.h"
+#include "mali_osk_bitops.h"
+
+/**
+ * Per-session memory descriptor mapping table sizes
+ */
+#define MALI_MEM_DESCRIPTORS_INIT 64
+#define MALI_MEM_DESCRIPTORS_MAX 65536
+
+typedef struct dedicated_memory_info
+{
+ u32 base;
+ u32 size;
+ struct dedicated_memory_info * next;
+} dedicated_memory_info;
+
+/* types used for external_memory and ump_memory physical memory allocators, which are using the mali_allocation_engine */
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+typedef struct ump_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size_allocated;
+ ump_dd_handle ump_mem;
+} ump_mem_allocation ;
+#endif
+
+typedef struct external_mem_allocation
+{
+ mali_allocation_engine * engine;
+ mali_memory_allocation * descriptor;
+ u32 initial_offset;
+ u32 size;
+} external_mem_allocation;
+
+/**
+ * @brief Internal function for unmapping memory
+ *
+ * Worker function for unmapping memory from a user-process. We assume that the
+ * session/descriptor's lock was obtained before entry. For example, the
+ * wrapper _mali_ukk_mem_munmap() will lock the descriptor, then call this
+ * function to do the actual unmapping. mali_memory_core_session_end() could
+ * also call this directly (depending on compilation options), having locked
+ * the descriptor.
+ *
+ * This function will fail if it is unable to put the MMU in stall mode (which
+ * might be the case if a page fault is also being processed).
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args );
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static void ump_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0*/
+
+
+static void external_memory_release(void * ctx, void * handle);
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info);
+
+
+/* nop functions */
+
+/* mali address manager needs to allocate page tables on allocate, write to page table(s) on map, write to page table(s) and release page tables on release */
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor); /* validates the range, allocates memory for the page tables if needed */
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size);
+static void mali_address_manager_release(mali_memory_allocation * descriptor);
+
+/* MMU variables */
+
+typedef struct mali_mmu_page_table_allocation
+{
+ _mali_osk_list_t list;
+ u32 * usage_map;
+ u32 usage_count;
+ u32 num_pages;
+ mali_page_table_block pages;
+} mali_mmu_page_table_allocation;
+
+typedef struct mali_mmu_page_table_allocations
+{
+ _mali_osk_lock_t *lock;
+ _mali_osk_list_t partial;
+ _mali_osk_list_t full;
+ /* we never hold on to a empty allocation */
+} mali_mmu_page_table_allocations;
+
+static mali_kernel_mem_address_manager mali_address_manager =
+{
+ mali_address_manager_allocate, /* allocate */
+ mali_address_manager_release, /* release */
+ mali_address_manager_map, /* map_physical */
+ NULL /* unmap_physical not present*/
+};
+
+/* the mmu page table cache */
+static struct mali_mmu_page_table_allocations page_table_cache;
+
+
+static mali_kernel_mem_address_manager process_address_manager =
+{
+ _mali_osk_mem_mapregion_init, /* allocate */
+ _mali_osk_mem_mapregion_term, /* release */
+ _mali_osk_mem_mapregion_map, /* map_physical */
+ _mali_osk_mem_mapregion_unmap /* unmap_physical */
+};
+
+static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void);
+static void mali_mmu_page_table_cache_destroy(void);
+
+static mali_allocation_engine memory_engine = NULL;
+static mali_physical_memory_allocator * physical_memory_allocators = NULL;
+
+static dedicated_memory_info * mem_region_registrations = NULL;
+
+mali_allocation_engine mali_mem_get_memory_engine(void)
+{
+ return memory_engine;
+}
+
+/* called during module init */
+_mali_osk_errcode_t mali_memory_initialize(void)
+{
+ _mali_osk_errcode_t err;
+
+ MALI_DEBUG_PRINT(2, ("Memory system initializing\n"));
+
+ err = mali_mmu_page_table_cache_create();
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MALI_ERROR(err);
+ }
+
+ memory_engine = mali_allocation_engine_create(&mali_address_manager, &process_address_manager);
+ MALI_CHECK_NON_NULL( memory_engine, _MALI_OSK_ERR_FAULT);
+
+ MALI_SUCCESS;
+}
+
+/* called if/when our module is unloaded */
+void mali_memory_terminate(void)
+{
+ MALI_DEBUG_PRINT(2, ("Memory system terminating\n"));
+
+ mali_mmu_page_table_cache_destroy();
+
+ while ( NULL != mem_region_registrations)
+ {
+ dedicated_memory_info * m;
+ m = mem_region_registrations;
+ mem_region_registrations = m->next;
+ _mali_osk_mem_unreqregion(m->base, m->size);
+ _mali_osk_free(m);
+ }
+
+ while ( NULL != physical_memory_allocators)
+ {
+ mali_physical_memory_allocator * m;
+ m = physical_memory_allocators;
+ physical_memory_allocators = m->next;
+ m->destroy(m);
+ }
+
+ if (NULL != memory_engine)
+ {
+ mali_allocation_engine_destroy(memory_engine);
+ memory_engine = NULL;
+ }
+}
+
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data * session_data)
+{
+ MALI_DEBUG_PRINT(5, ("Memory session begin\n"));
+
+ /* create descriptor mapping table */
+ session_data->descriptor_mapping = mali_descriptor_mapping_create(MALI_MEM_DESCRIPTORS_INIT, MALI_MEM_DESCRIPTORS_MAX);
+
+ if (NULL == session_data->descriptor_mapping)
+ {
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ session_data->memory_lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_SESSION);
+ if (NULL == session_data->memory_lock)
+ {
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* Init the session's memory allocation list */
+ _MALI_OSK_INIT_LIST_HEAD( &session_data->memory_head );
+
+ MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
+ MALI_SUCCESS;
+}
+
+static void descriptor_table_cleanup_callback(int descriptor_id, void* map_target)
+{
+ mali_memory_allocation * descriptor;
+
+ descriptor = (mali_memory_allocation*)map_target;
+
+ MALI_DEBUG_PRINT(3, ("Cleanup of descriptor %d mapping to 0x%x in descriptor table\n", descriptor_id, map_target));
+ MALI_DEBUG_ASSERT(descriptor);
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+}
+
+void mali_memory_session_end(struct mali_session_data *session_data)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_BUSY;
+
+ MALI_DEBUG_PRINT(3, ("MMU session end\n"));
+
+ if (NULL == session_data)
+ {
+ MALI_DEBUG_PRINT(1, ("No session data found during session end\n"));
+ return;
+ }
+
+ while (err == _MALI_OSK_ERR_BUSY)
+ {
+ /* Lock the session so we can modify the memory list */
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+ err = _MALI_OSK_ERR_OK;
+
+ /* Free all memory engine allocations */
+ if (0 == _mali_osk_list_empty(&session_data->memory_head))
+ {
+ mali_memory_allocation *descriptor;
+ mali_memory_allocation *temp;
+ _mali_uk_mem_munmap_s unmap_args;
+
+ MALI_DEBUG_PRINT(1, ("Memory found on session usage list during session termination\n"));
+
+ unmap_args.ctx = session_data;
+
+ /* use the 'safe' list iterator, since freeing removes the active block from the list we're iterating */
+ _MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->memory_head, mali_memory_allocation, list)
+ {
+ MALI_DEBUG_PRINT(4, ("Freeing block with mali address 0x%x size %d mapped in user space at 0x%x\n",
+ descriptor->mali_address, descriptor->size, descriptor->size, descriptor->mapping)
+ );
+ /* ASSERT that the descriptor's lock references the correct thing */
+ MALI_DEBUG_ASSERT( descriptor->lock == session_data->memory_lock );
+ /* Therefore, we have already locked the descriptor */
+
+ unmap_args.size = descriptor->size;
+ unmap_args.mapping = descriptor->mapping;
+ unmap_args.cookie = (u32)descriptor;
+
+ /*
+ * This removes the descriptor from the list, and frees the descriptor
+ *
+ * Does not handle the _MALI_OSK_SPECIFIC_INDIRECT_MMAP case, since
+ * the only OS we are aware of that requires indirect MMAP also has
+ * implicit mmap cleanup.
+ */
+ err = _mali_ukk_mem_munmap_internal( &unmap_args );
+
+ if (err == _MALI_OSK_ERR_BUSY)
+ {
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+ /*
+ * Reason for this;
+ * We where unable to stall the MMU, probably because we are in page fault handling.
+ * Sleep for a while with the session lock released, then try again.
+ * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+ */
+ _mali_osk_time_ubusydelay(10);
+ break; /* Will jump back into: "while (err == _MALI_OSK_ERR_BUSY)" */
+ }
+ }
+ }
+ }
+ /* Assert that we really did free everything */
+ MALI_DEBUG_ASSERT( _mali_osk_list_empty(&session_data->memory_head) );
+
+ if (NULL != session_data->descriptor_mapping)
+ {
+ mali_descriptor_mapping_call_for_each(session_data->descriptor_mapping, descriptor_table_cleanup_callback);
+ mali_descriptor_mapping_destroy(session_data->descriptor_mapping);
+ session_data->descriptor_mapping = NULL;
+ }
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ /**
+ * @note Could the VMA close handler mean that we use the session data after it was freed?
+ * In which case, would need to refcount the session data, and free on VMA close
+ */
+
+ /* Free the lock */
+ _mali_osk_lock_term( session_data->memory_lock );
+
+ return;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+
+ u32 alloc_order = resource->alloc_order;
+
+ allocator = mali_os_allocator_create(resource->size, resource->cpu_usage_adjust, resource->description);
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to create OS memory allocator\n"));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ allocator->alloc_order = alloc_order;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while (NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource)
+{
+ mali_physical_memory_allocator * allocator;
+ mali_physical_memory_allocator ** next_allocator_list;
+ dedicated_memory_info * cleanup_data;
+
+ u32 alloc_order = resource->alloc_order;
+
+ /* do the low level linux operation first */
+
+ /* Request ownership of the memory */
+ if (_MALI_OSK_ERR_OK != _mali_osk_mem_reqregion(resource->base, resource->size, resource->description))
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to request memory region %s (0x%08X - 0x%08X)\n", resource->description, resource->base, resource->base + resource->size - 1));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* create generic block allocator object to handle it */
+ allocator = mali_block_allocator_create(resource->base, resource->cpu_usage_adjust, resource->size, resource->description );
+
+ if (NULL == allocator)
+ {
+ MALI_DEBUG_PRINT(1, ("Memory bank registration failed\n"));
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ /* save low level cleanup info */
+ allocator->alloc_order = alloc_order;
+
+ cleanup_data = _mali_osk_malloc(sizeof(dedicated_memory_info));
+
+ if (NULL == cleanup_data)
+ {
+ _mali_osk_mem_unreqregion(resource->base, resource->size);
+ allocator->destroy(allocator);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ cleanup_data->base = resource->base;
+ cleanup_data->size = resource->size;
+
+ cleanup_data->next = mem_region_registrations;
+ mem_region_registrations = cleanup_data;
+
+ /* link in the allocator: insertion into ordered list
+ * resources of the same alloc_order will be Last-in-first */
+ next_allocator_list = &physical_memory_allocators;
+
+ while ( NULL != *next_allocator_list &&
+ (*next_allocator_list)->alloc_order < alloc_order )
+ {
+ next_allocator_list = &((*next_allocator_list)->next);
+ }
+
+ allocator->next = (*next_allocator_list);
+ (*next_allocator_list) = allocator;
+
+ MALI_SUCCESS;
+}
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+static mali_physical_memory_allocation_result ump_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ ump_dd_handle ump_mem;
+ u32 nr_blocks;
+ u32 i;
+ ump_dd_physical_block * ump_blocks;
+ ump_mem_allocation *ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof( ump_mem_allocation ) );
+ if ( NULL==ret_allocation ) return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+
+ ump_mem = (ump_dd_handle)ctx;
+
+ MALI_DEBUG_PRINT(4, ("In ump_memory_commit\n"));
+
+ nr_blocks = ump_dd_phys_block_count_get(ump_mem);
+
+ MALI_DEBUG_PRINT(4, ("Have %d blocks\n", nr_blocks));
+
+ if (nr_blocks == 0)
+ {
+ MALI_DEBUG_PRINT(1, ("No block count\n"));
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ ump_blocks = _mali_osk_malloc(sizeof(*ump_blocks)*nr_blocks );
+ if ( NULL==ump_blocks )
+ {
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ if (UMP_DD_INVALID == ump_dd_phys_blocks_get(ump_mem, ump_blocks, nr_blocks))
+ {
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free( ret_allocation );
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ /* Store away the initial offset for unmapping purposes */
+ ret_allocation->initial_offset = *offset;
+
+ for(i=0; i<nr_blocks; ++i)
+ {
+ MALI_DEBUG_PRINT(4, ("Mapping in 0x%08x size %d\n", ump_blocks[i].addr , ump_blocks[i].size));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[i].addr , 0, ump_blocks[i].size ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += ump_blocks[i].size;
+ }
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, ump_blocks[0].addr , 0, _MALI_OSK_MALI_PAGE_SIZE ))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap all previous blocks (if any) */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+
+ _mali_osk_free(ump_blocks);
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ _mali_osk_free( ump_blocks );
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->ump_mem = ump_mem;
+ ret_allocation->size_allocated = *offset - ret_allocation->initial_offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = ump_memory_release;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void ump_memory_release(void * ctx, void * handle)
+{
+ ump_dd_handle ump_mem;
+ ump_mem_allocation *allocation;
+
+ allocation = (ump_mem_allocation *)handle;
+
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ ump_mem = allocation->ump_mem;
+
+ MALI_DEBUG_ASSERT(UMP_DD_HANDLE_INVALID!=ump_mem);
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size_allocated,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+ _mali_osk_free( allocation );
+
+
+ ump_dd_reference_release(ump_mem) ;
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args )
+{
+ ump_dd_handle ump_mem;
+ mali_physical_memory_allocator external_memory_allocator;
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map ump memory with secure id %d into virtual memory 0x%08X, size 0x%08X\n",
+ args->secure_id, args->mali_address, args->size));
+
+ ump_mem = ump_dd_handle_create_from_secure_id( (int)args->secure_id ) ;
+
+ if ( UMP_DD_HANDLE_INVALID==ump_mem ) MALI_ERROR(_MALI_OSK_ERR_FAULT);
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor)
+ {
+ ump_dd_reference_release(ump_mem);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->cache_settings = (u32) MALI_CACHE_STANDARD;
+ descriptor->lock = session_data->memory_lock;
+
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ external_memory_allocator.allocate = ump_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = ump_mem;
+ external_memory_allocator.name = "UMP Memory";
+ external_memory_allocator.next = NULL;
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ mali_descriptor_mapping_free(session_data->descriptor_mapping, md);
+ ump_dd_reference_release(ump_mem);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from UMP attach\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to release ump memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ descriptor = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+ if (NULL != descriptor)
+ {
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ _mali_osk_free(descriptor);
+ }
+
+ MALI_SUCCESS;
+
+}
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER != 0 */
+
+
+static mali_physical_memory_allocation_result external_memory_commit(void* ctx, mali_allocation_engine * engine, mali_memory_allocation * descriptor, u32* offset, mali_physical_memory_allocation * alloc_info)
+{
+ u32 * data;
+ external_mem_allocation * ret_allocation;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(engine);
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(alloc_info);
+
+ ret_allocation = _mali_osk_malloc( sizeof(external_mem_allocation) );
+
+ if ( NULL == ret_allocation )
+ {
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+
+ data = (u32*)ctx;
+
+ ret_allocation->engine = engine;
+ ret_allocation->descriptor = descriptor;
+ ret_allocation->initial_offset = *offset;
+
+ alloc_info->ctx = NULL;
+ alloc_info->handle = ret_allocation;
+ alloc_info->next = NULL;
+ alloc_info->release = external_memory_release;
+
+ MALI_DEBUG_PRINT(5, ("External map: mapping phys 0x%08X at mali virtual address 0x%08X staring at offset 0x%08X length 0x%08X\n", data[0], descriptor->mali_address, *offset, data[1]));
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, data[1]))
+ {
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory failed\n"));
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += data[1];
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ /* Map in an extra virtual guard page at the end of the VMA */
+ MALI_DEBUG_PRINT(4, ("Mapping in extra guard page\n"));
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_map_physical(engine, descriptor, *offset, data[0], 0, _MALI_OSK_MALI_PAGE_SIZE))
+ {
+ u32 size_allocated = *offset - ret_allocation->initial_offset;
+ MALI_DEBUG_PRINT(1, ("Mapping of external memory (guard page) failed\n"));
+
+ /* unmap what we previously mapped */
+ mali_allocation_engine_unmap_physical(engine, descriptor, ret_allocation->initial_offset, size_allocated, (_mali_osk_mem_mapregion_flags_t)0 );
+ _mali_osk_free(ret_allocation);
+ return MALI_MEM_ALLOC_INTERNAL_FAILURE;
+ }
+ *offset += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ ret_allocation->size = *offset - ret_allocation->initial_offset;
+
+ return MALI_MEM_ALLOC_FINISHED;
+}
+
+static void external_memory_release(void * ctx, void * handle)
+{
+ external_mem_allocation * allocation;
+
+ allocation = (external_mem_allocation *) handle;
+ MALI_DEBUG_ASSERT_POINTER( allocation );
+
+ /* At present, this is a no-op. But, it allows the mali_address_manager to
+ * do unmapping of a subrange in future. */
+
+ mali_allocation_engine_unmap_physical( allocation->engine,
+ allocation->descriptor,
+ allocation->initial_offset,
+ allocation->size,
+ (_mali_osk_mem_mapregion_flags_t)0
+ );
+
+ _mali_osk_free( allocation );
+
+ return;
+}
+
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args )
+{
+ mali_physical_memory_allocator external_memory_allocator;
+ struct mali_session_data *session_data;
+ u32 info[2];
+ mali_memory_allocation * descriptor;
+ int md;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ external_memory_allocator.allocate = external_memory_commit;
+ external_memory_allocator.allocate_page_table_block = NULL;
+ external_memory_allocator.ctx = &info[0];
+ external_memory_allocator.name = "External Memory";
+ external_memory_allocator.next = NULL;
+
+ /* check arguments */
+ /* NULL might be a valid Mali address */
+ if ( ! args->size) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ /* size must be a multiple of the system page size */
+ if ( args->size % _MALI_OSK_MALI_PAGE_SIZE ) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
+
+ MALI_DEBUG_PRINT(3,
+ ("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
+ (void*)args->phys_addr,
+ (void*)(args->phys_addr + args->size -1),
+ (void*)args->mali_address)
+ );
+
+ /* Validate the mali physical range */
+ if (_MALI_OSK_ERR_OK != mali_mem_validation_check(args->phys_addr, args->size))
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ info[0] = args->phys_addr;
+ info[1] = args->size;
+
+ descriptor = _mali_osk_calloc(1, sizeof(mali_memory_allocation));
+ if (NULL == descriptor) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ descriptor->size = args->size;
+ descriptor->mapping = NULL;
+ descriptor->mali_address = args->mali_address;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+ descriptor->process_addr_mapping_info = NULL; /* do not map to process address space */
+ descriptor->cache_settings = (u32)MALI_CACHE_STANDARD;
+ descriptor->lock = session_data->memory_lock;
+ if (args->flags & _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE)
+ {
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE;
+ }
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_memory(memory_engine, descriptor, &external_memory_allocator, NULL))
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session_data->descriptor_mapping, descriptor, &md))
+ {
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+ _mali_osk_free(descriptor);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ args->cookie = md;
+
+ MALI_DEBUG_PRINT(5,("Returning from range_map_external_memory\n"));
+
+ /* All OK */
+ MALI_SUCCESS;
+}
+
+
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args )
+{
+ mali_memory_allocation * descriptor;
+ void* old_value;
+ struct mali_session_data *session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)args->ctx;
+ MALI_CHECK_NON_NULL(session_data, _MALI_OSK_ERR_INVALID_ARGS);
+
+ if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_get(session_data->descriptor_mapping, args->cookie, (void**)&descriptor))
+ {
+ MALI_DEBUG_PRINT(1, ("Invalid memory descriptor %d used to unmap external memory\n", args->cookie));
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
+ old_value = mali_descriptor_mapping_free(session_data->descriptor_mapping, args->cookie);
+
+ if (NULL != old_value)
+ {
+ _mali_osk_lock_wait( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ mali_allocation_engine_release_memory(memory_engine, descriptor);
+
+ _mali_osk_lock_signal( session_data->memory_lock, _MALI_OSK_LOCKMODE_RW );
+
+ _mali_osk_free(descriptor);
+ }
+
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ args->memory_size = 2 * 1024 * 1024 * 1024UL; /* 2GB address space */
+ args->mali_address_base = 1 * 1024 * 1024 * 1024UL; /* staring at 1GB, causing this layout: (0-1GB unused)(1GB-3G usage by Mali)(3G-4G unused) */
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args )
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t mali_address_manager_allocate(mali_memory_allocation * descriptor)
+{
+ struct mali_session_data *session_data;
+ u32 actual_size;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+
+ actual_size = descriptor->size;
+
+ if (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_GUARD_PAGE)
+ {
+ actual_size += _MALI_OSK_MALI_PAGE_SIZE;
+ }
+
+ return mali_mmu_pagedir_map(session_data->page_directory, descriptor->mali_address, actual_size);
+}
+
+static void mali_address_manager_release(mali_memory_allocation * descriptor)
+{
+ const u32 illegal_mali_address = 0xffffffff;
+ struct mali_session_data *session_data;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /* It is allowed to call this function several times on the same descriptor.
+ When memory is released we set the illegal_mali_address so we can early out here. */
+ if ( illegal_mali_address == descriptor->mali_address) return;
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ mali_mmu_pagedir_unmap(session_data->page_directory, descriptor->mali_address, descriptor->size);
+
+ descriptor->mali_address = illegal_mali_address ;
+}
+
+static _mali_osk_errcode_t mali_address_manager_map(mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size)
+{
+ struct mali_session_data *session_data;
+ u32 mali_address;
+
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+ MALI_DEBUG_ASSERT_POINTER(phys_addr);
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ mali_address = descriptor->mali_address + offset;
+
+ MALI_DEBUG_PRINT(7, ("Mali map: mapping 0x%08X to Mali address 0x%08X length 0x%08X\n", *phys_addr, mali_address, size));
+
+ mali_mmu_pagedir_update(session_data->page_directory, mali_address, *phys_addr, size, descriptor->cache_settings);
+
+ MALI_SUCCESS;
+}
+
+/* This handler registered to mali_mmap for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args )
+{
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+
+ /* validate input */
+ if (NULL == args) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: args was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS); }
+
+ /* Unpack arguments */
+ session_data = (struct mali_session_data *)args->ctx;
+
+ /* validate input */
+ if (NULL == session_data) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: session data was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_FAULT); }
+
+ descriptor = (mali_memory_allocation*) _mali_osk_calloc( 1, sizeof(mali_memory_allocation) );
+ if (NULL == descriptor) { MALI_DEBUG_PRINT(3,("mali_ukk_mem_mmap: descriptor was NULL\n")); MALI_ERROR(_MALI_OSK_ERR_NOMEM); }
+
+ descriptor->size = args->size;
+ descriptor->mali_address = args->phys_addr;
+ descriptor->mali_addr_mapping_info = (void*)session_data;
+
+ descriptor->process_addr_mapping_info = args->ukk_private; /* save to be used during physical manager callback */
+ descriptor->flags = MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE;
+ descriptor->cache_settings = (u32) args->cache_settings ;
+ descriptor->lock = session_data->memory_lock;
+ _mali_osk_list_init( &descriptor->list );
+
+ _mali_osk_lock_wait(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == mali_allocation_engine_allocate_memory(memory_engine, descriptor, physical_memory_allocators, &session_data->memory_head))
+ {
+ /* We do not FLUSH nor TLB_ZAP on MMAP, since we do both of those on job start*/
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+
+ args->mapping = descriptor->mapping;
+ args->cookie = (u32)descriptor;
+
+ MALI_DEBUG_PRINT(7, ("MMAP OK\n"));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ _mali_osk_lock_signal(session_data->memory_lock, _MALI_OSK_LOCKMODE_RW);
+ /* OOM, but not a fatal error */
+ MALI_DEBUG_PRINT(4, ("Memory allocation failure, OOM\n"));
+ _mali_osk_free(descriptor);
+ /* Linux will free the CPU address allocation, userspace client the Mali address allocation */
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+}
+
+static _mali_osk_errcode_t _mali_ukk_mem_munmap_internal( _mali_uk_mem_munmap_s *args )
+{
+ struct mali_session_data *session_data;
+ mali_memory_allocation * descriptor;
+
+ u32 num_groups = mali_group_get_glob_num_groups();
+ struct mali_group *group;
+ u32 i;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ session_data = (struct mali_session_data *)descriptor->mali_addr_mapping_info;
+ MALI_DEBUG_ASSERT_POINTER(session_data);
+
+ /* Unmapping the memory from the mali virtual address space.
+ It is allowed to call this function severeal times, which might happen if zapping below fails. */
+ mali_allocation_engine_release_pt1_mali_pagetables_unmap(memory_engine, descriptor);
+
+#ifdef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ {
+ u32 number_of_clusters = mali_cluster_get_glob_num_clusters();
+ for (i = 0; i < number_of_clusters; i++)
+ {
+ struct mali_cluster *cluster;
+ cluster = mali_cluster_get_global_cluster(i);
+ if( mali_cluster_power_is_enabled_get(cluster) )
+ {
+ mali_cluster_l2_cache_invalidate_all_force(cluster);
+ }
+ }
+ }
+#endif
+
+ for (i = 0; i < num_groups; i++)
+ {
+ group = mali_group_get_glob_group(i);
+ mali_group_lock(group);
+ mali_group_remove_session_if_unused(group, session_data);
+ if (mali_group_get_session(group) == session_data)
+ {
+ /* The Zap also does the stall and disable_stall */
+ mali_bool zap_success = mali_mmu_zap_tlb(mali_group_get_mmu(group));
+ if (MALI_TRUE != zap_success)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
+ mali_group_bottom_half(group, GROUP_EVENT_MMU_PAGE_FAULT);
+ /* The bottom half will also do the unlock */
+ continue;
+ }
+ }
+ mali_group_unlock(group);
+ }
+
+ /* Removes the descriptor from the session's memory list, releases physical memory, releases descriptor */
+ mali_allocation_engine_release_pt2_physical_memory_free(memory_engine, descriptor);
+
+ _mali_osk_free(descriptor);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+/* Handler for unmapping memory for MMU builds */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args )
+{
+ mali_memory_allocation * descriptor;
+ _mali_osk_lock_t *descriptor_lock;
+ _mali_osk_errcode_t err;
+
+ descriptor = (mali_memory_allocation *)args->cookie;
+ MALI_DEBUG_ASSERT_POINTER(descriptor);
+
+ /** @note args->context unused; we use the memory_session from the cookie */
+ /* args->mapping and args->size are also discarded. They are only necessary
+ for certain do_munmap implementations. However, they could be used to check the
+ descriptor at this point. */
+
+ MALI_DEBUG_ASSERT_POINTER((struct mali_session_data *)descriptor->mali_addr_mapping_info);
+
+ descriptor_lock = descriptor->lock; /* should point to the session data lock... */
+
+ err = _MALI_OSK_ERR_BUSY;
+ while (err == _MALI_OSK_ERR_BUSY)
+ {
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_wait( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+
+ err = _mali_ukk_mem_munmap_internal( args );
+
+ if (descriptor_lock)
+ {
+ _mali_osk_lock_signal( descriptor_lock, _MALI_OSK_LOCKMODE_RW );
+ }
+
+ if (err == _MALI_OSK_ERR_BUSY)
+ {
+ /*
+ * Reason for this;
+ * We where unable to stall the MMU, probably because we are in page fault handling.
+ * Sleep for a while with the session lock released, then try again.
+ * Abnormal termination of programs with running Mali jobs is a normal reason for this.
+ */
+ _mali_osk_time_ubusydelay(10);
+ }
+ }
+
+ return err;
+}
+
+u32 _mali_ukk_report_memory_usage(void)
+{
+ return mali_allocation_engine_memory_usage(physical_memory_allocators);
+}
+
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping)
+{
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ if (0 == _mali_osk_list_empty(&page_table_cache.partial))
+ {
+ mali_mmu_page_table_allocation * alloc = _MALI_OSK_LIST_ENTRY(page_table_cache.partial.next, mali_mmu_page_table_allocation, list);
+ int page_number = _mali_osk_find_first_zero_bit(alloc->usage_map, alloc->num_pages);
+ MALI_DEBUG_PRINT(6, ("Partial page table allocation found, using page offset %d\n", page_number));
+ _mali_osk_set_nonatomic_bit(page_number, alloc->usage_map);
+ alloc->usage_count++;
+ if (alloc->num_pages == alloc->usage_count)
+ {
+ /* full, move alloc to full list*/
+ _mali_osk_list_move(&alloc->list, &page_table_cache.full);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ *table_page = (MALI_MMU_PAGE_SIZE * page_number) + alloc->pages.phys_base;
+ *mapping = (mali_io_address)((MALI_MMU_PAGE_SIZE * page_number) + (u32)alloc->pages.mapping);
+ MALI_DEBUG_PRINT(4, ("Page table allocated for VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+ else
+ {
+ mali_mmu_page_table_allocation * alloc;
+ /* no free pages, allocate a new one */
+
+ alloc = (mali_mmu_page_table_allocation *)_mali_osk_calloc(1, sizeof(mali_mmu_page_table_allocation));
+ if (NULL == alloc)
+ {
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _MALI_OSK_INIT_LIST_HEAD(&alloc->list);
+
+ if (_MALI_OSK_ERR_OK != mali_allocation_engine_allocate_page_tables(memory_engine, &alloc->pages, physical_memory_allocators))
+ {
+ MALI_DEBUG_PRINT(1, ("No more memory for page tables\n"));
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ /* create the usage map */
+ alloc->num_pages = alloc->pages.size / MALI_MMU_PAGE_SIZE;
+ alloc->usage_count = 1;
+ MALI_DEBUG_PRINT(3, ("New page table cache expansion, %d pages in new cache allocation\n", alloc->num_pages));
+ alloc->usage_map = _mali_osk_calloc(1, ((alloc->num_pages + BITS_PER_LONG - 1) & ~(BITS_PER_LONG-1) / BITS_PER_LONG) * sizeof(unsigned long));
+ if (NULL == alloc->usage_map)
+ {
+ MALI_DEBUG_PRINT(1, ("Failed to allocate memory to describe MMU page table cache usage\n"));
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc);
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = MALI_INVALID_PAGE;
+ *mapping = NULL;
+ MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+ }
+
+ _mali_osk_set_nonatomic_bit(0, alloc->usage_map);
+
+ if (alloc->num_pages > 1)
+ {
+ _mali_osk_list_add(&alloc->list, &page_table_cache.partial);
+ }
+ else
+ {
+ _mali_osk_list_add(&alloc->list, &page_table_cache.full);
+ }
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ *table_page = alloc->pages.phys_base; /* return the first page */
+ *mapping = alloc->pages.mapping; /* Mapping for first page */
+ MALI_DEBUG_PRINT(4, ("Page table allocated: VA=0x%08X, MaliPA=0x%08X\n", *mapping, *table_page ));
+ MALI_SUCCESS;
+ }
+}
+
+void mali_mmu_release_table_page(u32 pa)
+{
+ mali_mmu_page_table_allocation * alloc, * temp_alloc;
+
+ MALI_DEBUG_PRINT_IF(1, pa & 4095, ("Bad page address 0x%x given to mali_mmu_release_table_page\n", (void*)pa));
+
+ MALI_DEBUG_PRINT(4, ("Releasing table page 0x%08X to the cache\n", pa));
+
+ _mali_osk_lock_wait(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+
+ /* find the entry this address belongs to */
+ /* first check the partial list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ MALI_DEBUG_ASSERT(0 != _mali_osk_test_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map));
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(partial list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ /* the check the full list */
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp_alloc, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ u32 start = alloc->pages.phys_base;
+ u32 last = start + (alloc->num_pages - 1) * MALI_MMU_PAGE_SIZE;
+ if (pa >= start && pa <= last)
+ {
+ _mali_osk_clear_nonatomic_bit((pa - start)/MALI_MMU_PAGE_SIZE, alloc->usage_map);
+ alloc->usage_count--;
+
+ _mali_osk_memset((void*)( ((u32)alloc->pages.mapping) + (pa - start) ), 0, MALI_MMU_PAGE_SIZE);
+
+
+ if (0 == alloc->usage_count)
+ {
+ /* empty, release whole page alloc */
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+ else
+ {
+ /* transfer to partial list */
+ _mali_osk_list_move(&alloc->list, &page_table_cache.partial);
+ }
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+ MALI_DEBUG_PRINT(4, ("(full list)Released table page 0x%08X to the cache\n", pa));
+ return;
+ }
+ }
+
+ MALI_DEBUG_PRINT(1, ("pa 0x%x not found in the page table cache\n", (void*)pa));
+
+ _mali_osk_lock_signal(page_table_cache.lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+static _mali_osk_errcode_t mali_mmu_page_table_cache_create(void)
+{
+ page_table_cache.lock = _mali_osk_lock_init( _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ONELOCK
+ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE);
+ MALI_CHECK_NON_NULL( page_table_cache.lock, _MALI_OSK_ERR_FAULT );
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.partial);
+ _MALI_OSK_INIT_LIST_HEAD(&page_table_cache.full);
+ MALI_SUCCESS;
+}
+
+static void mali_mmu_page_table_cache_destroy(void)
+{
+ mali_mmu_page_table_allocation * alloc, *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.partial, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT_IF(1, 0 != alloc->usage_count, ("Destroying page table cache while pages are tagged as in use. %d allocations still marked as in use.\n", alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ MALI_DEBUG_PRINT_IF(1, 0 == _mali_osk_list_empty(&page_table_cache.full), ("Page table cache full list contains one or more elements \n"));
+
+ _MALI_OSK_LIST_FOREACHENTRY(alloc, temp, &page_table_cache.full, mali_mmu_page_table_allocation, list)
+ {
+ MALI_DEBUG_PRINT(1, ("Destroy alloc 0x%08X with usage count %d\n", (u32)alloc, alloc->usage_count));
+ _mali_osk_list_del(&alloc->list);
+ alloc->pages.release(&alloc->pages);
+ _mali_osk_free(alloc->usage_map);
+ _mali_osk_free(alloc);
+ }
+
+ _mali_osk_lock_term(page_table_cache.lock);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_memory.h b/drivers/media/video/samsung/mali/common/mali_memory.h
new file mode 100644
index 0000000..78e2945
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_memory.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MEMORY_H__
+#define __MALI_MEMORY_H__
+
+#include "mali_osk.h"
+#include "mali_session.h"
+
+struct mali_cluster;
+struct mali_group;
+
+/** @brief Initialize Mali memory subsystem
+ *
+ * Allocate and initialize internal data structures. Must be called before
+ * allocating Mali memory.
+ *
+ * @return On success _MALI_OSK_ERR_OK, othervise some error code describing the error.
+ */
+_mali_osk_errcode_t mali_memory_initialize(void);
+
+/** @brief Terminate Mali memory system
+ *
+ * Clean up and release internal data structures.
+ */
+void mali_memory_terminate(void);
+
+/** @brief Start new Mali memory session
+ *
+ * Allocate and prepare session specific memory allocation data data. The
+ * session page directory, lock, and descriptor map is set up.
+ *
+ * @param mali_session_data pointer to the session data structure
+ */
+_mali_osk_errcode_t mali_memory_session_begin(struct mali_session_data *mali_session_data);
+
+/** @brief Close a Mali memory session
+ *
+ * Release session specific memory allocation related data.
+ *
+ * @param mali_session_data pointer to the session data structure
+ */
+void mali_memory_session_end(struct mali_session_data *mali_session_data);
+
+/** @brief Allocate a page table page
+ *
+ * Allocate a page for use as a page directory or page table. The page is
+ * mapped into kernel space.
+ *
+ * @return _MALI_OSK_ERR_OK on success, othervise an error code
+ * @param table_page GPU pointer to the allocated page
+ * @param mapping CPU pointer to the mapping of the allocated page
+ */
+_mali_osk_errcode_t mali_mmu_get_table_page(u32 *table_page, mali_io_address *mapping);
+
+/** @brief Release a page table page
+ *
+ * Release a page table page allocated through \a mali_mmu_get_table_page
+ *
+ * @param pa the GPU address of the page to release
+ */
+void mali_mmu_release_table_page(u32 pa);
+
+
+/** @brief Parse resource and prepare the OS memory allocator
+ */
+_mali_osk_errcode_t mali_memory_core_resource_os_memory(_mali_osk_resource_t * resource);
+
+/** @brief Parse resource and prepare the dedicated memory allocator
+ */
+_mali_osk_errcode_t mali_memory_core_resource_dedicated_memory(_mali_osk_resource_t * resource);
+
+mali_allocation_engine mali_mem_get_memory_engine(void);
+
+#endif /* __MALI_MEMORY_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_mmu.c b/drivers/media/video/samsung/mali/common/mali_mmu.c
new file mode 100644
index 0000000..2f2fa4d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_mmu.c
@@ -0,0 +1,619 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_bitops.h"
+#include "mali_osk_list.h"
+#include "mali_ukk.h"
+
+#include "mali_mmu.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_mmu_page_directory.h"
+
+/**
+ * Size of the MMU registers in bytes
+ */
+#define MALI_MMU_REGISTERS_SIZE 0x24
+
+/**
+ * MMU register numbers
+ * Used in the register read/write routines.
+ * See the hardware documentation for more information about each register
+ */
+typedef enum mali_mmu_register {
+ MALI_MMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
+ MALI_MMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
+ MALI_MMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
+ MALI_MMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
+ MALI_MMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
+ MALI_MMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
+ MALI_MMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
+ MALI_MMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
+ MALI_MMU_REGISTER_INT_STATUS = 0x0020 /**< Interrupt status based on the mask */
+} mali_mmu_register;
+
+/**
+ * MMU interrupt register bits
+ * Each cause of the interrupt is reported
+ * through the (raw) interrupt status registers.
+ * Multiple interrupts can be pending, so multiple bits
+ * can be set at once.
+ */
+typedef enum mali_mmu_interrupt
+{
+ MALI_MMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
+ MALI_MMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
+} mali_mmu_interrupt;
+
+/**
+ * MMU commands
+ * These are the commands that can be sent
+ * to the MMU unit.
+ */
+typedef enum mali_mmu_command
+{
+ MALI_MMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
+ MALI_MMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
+ MALI_MMU_COMMAND_ENABLE_STALL = 0x02, /**< Enable stall on page fault */
+ MALI_MMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
+ MALI_MMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
+ MALI_MMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
+ MALI_MMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
+} mali_mmu_command;
+
+typedef enum mali_mmu_status_bits
+{
+ MALI_MMU_STATUS_BIT_PAGING_ENABLED = 1 << 0,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE = 1 << 1,
+ MALI_MMU_STATUS_BIT_STALL_ACTIVE = 1 << 2,
+ MALI_MMU_STATUS_BIT_IDLE = 1 << 3,
+ MALI_MMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
+ MALI_MMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
+} mali_mmu_status_bits;
+
+/**
+ * Definition of the MMU struct
+ * Used to track a MMU unit in the system.
+ * Contains information about the mapping of the registers
+ */
+struct mali_mmu_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ struct mali_group *group; /**< Parent core group */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+};
+
+/**
+ * The MMU interrupt handler
+ * Upper half of the MMU interrupt processing.
+ * Called by the kernel when the MMU has triggered an interrupt.
+ * The interrupt function supports IRQ sharing. So it'll probe the MMU in question
+ * @param irq The irq number (not used)
+ * @param dev_id Points to the MMU object being handled
+ * @param regs Registers of interrupted process (not used)
+ * @return Standard Linux interrupt result.
+ * Subset used by the driver is IRQ_HANDLED processed
+ * IRQ_NONE Not processed
+ */
+static _mali_osk_errcode_t mali_mmu_upper_half(void * data);
+
+/**
+ * The MMU reset hander
+ * Bottom half of the MMU interrupt processing for page faults and bus errors
+ * @param work The item to operate on, NULL in our case
+ */
+static void mali_mmu_bottom_half(void *data);
+
+static void mali_mmu_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data);
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu);
+
+/* page fault queue flush helper pages
+ * note that the mapping pointers are currently unused outside of the initialization functions */
+static u32 mali_page_fault_flush_page_directory = MALI_INVALID_PAGE;
+static u32 mali_page_fault_flush_page_table = MALI_INVALID_PAGE;
+static u32 mali_page_fault_flush_data_page = MALI_INVALID_PAGE;
+
+/* an empty page directory (no address valid) which is active on any MMU not currently marked as in use */
+static u32 mali_empty_page_directory = MALI_INVALID_PAGE;
+
+_mali_osk_errcode_t mali_mmu_initialize(void)
+{
+ /* allocate the helper pages */
+ mali_empty_page_directory = mali_allocate_empty_page();
+ if(0 == mali_empty_page_directory)
+ {
+ mali_empty_page_directory = MALI_INVALID_PAGE;
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_create_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page))
+ {
+ mali_free_empty_page(mali_empty_page_directory);
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_mmu_terminate(void)
+{
+ MALI_DEBUG_PRINT(3, ("Mali MMU: terminating\n"));
+
+ /* Free global helper pages */
+ mali_free_empty_page(mali_empty_page_directory);
+
+ /* Free the page fault flush pages */
+ mali_destroy_fault_flush_pages(&mali_page_fault_flush_page_directory,
+ &mali_page_fault_flush_page_table, &mali_page_fault_flush_data_page);
+}
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource)
+{
+ struct mali_mmu_core* mmu = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(resource);
+
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Creating Mali MMU: %s\n", resource->description));
+
+ mmu = _mali_osk_calloc(1,sizeof(struct mali_mmu_core));
+ if (NULL != mmu)
+ {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&mmu->hw_core, resource, MALI_MMU_REGISTERS_SIZE))
+ {
+ if (_MALI_OSK_ERR_OK == mali_mmu_reset(mmu))
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ mmu->irq = _mali_osk_irq_init(resource->irq,
+ mali_mmu_upper_half,
+ mali_mmu_bottom_half,
+ mali_mmu_probe_trigger,
+ mali_mmu_probe_ack,
+ mmu,
+ "mali_mmu_irq_handlers");
+ if (NULL != mmu->irq)
+ {
+ return mmu;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to setup interrupt handlers for MMU %s\n", mmu->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&mmu->hw_core);
+ }
+
+ _mali_osk_free(mmu);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to allocate memory for MMU\n"));
+ }
+
+ return NULL;
+}
+
+void mali_mmu_delete(struct mali_mmu_core *mmu)
+{
+ _mali_osk_irq_term(mmu->irq);
+ mali_hw_core_delete(&mmu->hw_core);
+ _mali_osk_free(mmu);
+}
+
+void mali_mmu_set_group(struct mali_mmu_core *mmu, struct mali_group *group)
+{
+ mmu->group = group;
+}
+
+static void mali_mmu_enable_paging(struct mali_mmu_core *mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+ int i;
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_PAGING);
+
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS) & MALI_MMU_STATUS_BIT_PAGING_ENABLED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ if (max_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Enable paging request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ }
+}
+
+mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 999;
+ int i;
+ u32 mmu_status;
+
+ /* There are no group when it is called from mali_mmu_create */
+ if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED) )
+ {
+ MALI_DEBUG_PRINT(4, ("MMU stall is implicit when Paging is not enebled.\n"));
+ return MALI_TRUE;
+ }
+
+ if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
+ return MALI_FALSE;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ENABLE_STALL);
+
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if ( mmu_status & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE))
+ {
+ break;
+ }
+ if ( 0 == (mmu_status & ( MALI_MMU_STATUS_BIT_PAGING_ENABLED )))
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ if (max_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Enable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return MALI_FALSE;
+ }
+
+ if ( mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it has a pagefault.\n"));
+ return MALI_FALSE;
+ }
+
+ return MALI_TRUE;
+}
+
+void mali_mmu_disable_stall(struct mali_mmu_core *mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+ int i;
+ u32 mmu_status;
+ /* There are no group when it is called from mali_mmu_create */
+ if ( mmu->group ) MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ mmu_status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
+ {
+ MALI_DEBUG_PRINT(3, ("MMU disable skipped since it was not enabled.\n"));
+ return;
+ }
+ if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)
+ {
+ MALI_DEBUG_PRINT(2, ("Aborting MMU disable stall request since it is in pagefault state.\n"));
+ return;
+ }
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_DISABLE_STALL);
+
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ u32 status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+ if ( 0 == (status & MALI_MMU_STATUS_BIT_STALL_ACTIVE) )
+ {
+ break;
+ }
+ if ( status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE )
+ {
+ break;
+ }
+ if ( 0 == (mmu_status & MALI_MMU_STATUS_BIT_PAGING_ENABLED ))
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ if (max_loop_count == i) MALI_DEBUG_PRINT(1,("Disable stall request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu)
+{
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ MALI_DEBUG_PRINT(4, ("Mali MMU: %s: Leaving page fault mode\n", mmu->hw_core.description));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_PAGE_FAULT_DONE);
+}
+
+MALI_STATIC_INLINE _mali_osk_errcode_t mali_mmu_raw_reset(struct mali_mmu_core *mmu)
+{
+ const int max_loop_count = 100;
+ const int delay_in_usecs = 1;
+ int i;
+ /* The _if_ is neccessary when called from mali_mmu_create and NULL==group */
+ if (mmu->group)MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, 0xCAFEBABE);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_HARD_RESET);
+
+ for (i = 0; i < max_loop_count; ++i)
+ {
+ if (mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR) == 0)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(delay_in_usecs);
+ }
+ if (max_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Reset request failed, MMU status is 0x%08X\n", mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu)
+{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ /* The _if_ is neccessary when called from mali_mmu_create and NULL==group */
+ if (mmu->group)
+ {
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ }
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ /* The stall can not fail in current hw-state */
+ MALI_DEBUG_ASSERT(stall_success);
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+
+ if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu))
+ {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* no session is active, so just activate the empty page directory */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, mali_empty_page_directory);
+ mali_mmu_enable_paging(mmu);
+ err = _MALI_OSK_ERR_OK;
+ }
+ mali_mmu_disable_stall(mmu);
+
+ return err;
+}
+
+
+/* ------------- interrupt handling below ------------------ */
+
+static _mali_osk_errcode_t mali_mmu_upper_half(void * data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+ if (0 != int_stat)
+ {
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, 0);
+ mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ _mali_osk_irq_schedulework(mmu->irq);
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ /* clear interrupt flag */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ /* reenable it */
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK,
+ mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK) | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ MALI_PRINT_ERROR(("Mali MMU: Read bus error\n"));
+ }
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_mmu_bottom_half(void * data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core*)data;
+ u32 raw, status, fault_address;
+
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+
+ MALI_DEBUG_PRINT(3, ("Mali MMU: Page fault bottom half: Locking subsystems\n"));
+
+ mali_group_lock(mmu->group); /* Unlocked in mali_group_bottom_half */
+
+ if ( MALI_FALSE == mali_group_power_is_on(mmu->group) )
+ {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.",mmu->hw_core.description));
+ mali_group_unlock(mmu->group);
+ return;
+ }
+
+ raw = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+ status = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS);
+
+ if ( (0==(raw & MALI_MMU_INTERRUPT_PAGE_FAULT)) && (0==(status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE)) )
+ {
+ MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault bottom half: No Irq found.\n"));
+ mali_group_unlock(mmu->group);
+ /* MALI_DEBUG_ASSERT(0); */
+ return;
+ }
+
+ /* An actual page fault has occurred. */
+
+ fault_address = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_PAGE_FAULT_ADDR);
+
+ MALI_DEBUG_PRINT(2,("Mali MMU: Page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void*)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ mmu->hw_core.description));
+
+ mali_group_bottom_half(mmu->group, GROUP_EVENT_MMU_PAGE_FAULT); /* Unlocks the group lock */
+}
+
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu)
+{
+ mali_bool stall_success;
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+ if (MALI_FALSE == stall_success)
+ {
+ /* False means that it is in Pagefault state. Not possible to disable_stall then */
+ return MALI_FALSE;
+ }
+
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu)
+{
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+}
+
+
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address)
+{
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_ZAP_ONE_LINE, MALI_MMU_PDE_ENTRY(mali_address));
+}
+
+static void mali_mmu_activate_address_space(struct mali_mmu_core *mmu, u32 page_directory)
+{
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ /* The MMU must be in stalled or page fault mode, for this writing to work */
+ MALI_DEBUG_ASSERT( 0 != ( mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)
+ & (MALI_MMU_STATUS_BIT_STALL_ACTIVE|MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) ) );
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_DTE_ADDR, page_directory);
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_COMMAND, MALI_MMU_COMMAND_ZAP_CACHE);
+
+}
+
+mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core *mmu, struct mali_page_directory *pagedir)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ MALI_DEBUG_PRINT(5, ("Asked to activate page directory 0x%x on MMU %s\n", pagedir, mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+
+ if ( MALI_FALSE==stall_success ) return MALI_FALSE;
+ mali_mmu_activate_address_space(mmu, pagedir->page_directory);
+ mali_mmu_disable_stall(mmu);
+ return MALI_TRUE;
+}
+
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+ MALI_DEBUG_PRINT(3, ("Activating the empty page directory on MMU %s\n", mmu->hw_core.description));
+
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function can only be called when the core is idle, so it could not fail. */
+ MALI_DEBUG_ASSERT( stall_success );
+ mali_mmu_activate_address_space(mmu, mali_empty_page_directory);
+ mali_mmu_disable_stall(mmu);
+}
+
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu)
+{
+ mali_bool stall_success;
+ MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_ASSERT_GROUP_LOCKED(mmu->group);
+
+ MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+ stall_success = mali_mmu_enable_stall(mmu);
+ /* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
+ mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
+ if ( MALI_TRUE==stall_success ) mali_mmu_disable_stall(mmu);
+}
+
+/* Is called when we want the mmu to give an interrupt */
+static void mali_mmu_probe_trigger(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT, MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+}
+
+/* Is called when the irq probe wants the mmu to acknowledge an interrupt from the hw */
+static _mali_osk_errcode_t mali_mmu_probe_ack(void *data)
+{
+ struct mali_mmu_core *mmu = (struct mali_mmu_core *)data;
+ u32 int_stat;
+
+ int_stat = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
+
+ MALI_DEBUG_PRINT(2, ("mali_mmu_probe_irq_acknowledge: intstat 0x%x\n", int_stat));
+ if (int_stat & MALI_MMU_INTERRUPT_PAGE_FAULT)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Page fault detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_PAGE_FAULT);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Probe: Page fault detect: FAILED\n"));
+ }
+
+ if (int_stat & MALI_MMU_INTERRUPT_READ_BUS_ERROR)
+ {
+ MALI_DEBUG_PRINT(2, ("Probe: Bus read error detect: PASSED\n"));
+ mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_CLEAR, MALI_MMU_INTERRUPT_READ_BUS_ERROR);
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(1, ("Probe: Bus read error detect: FAILED\n"));
+ }
+
+ if ( (int_stat & (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR)) ==
+ (MALI_MMU_INTERRUPT_PAGE_FAULT|MALI_MMU_INTERRUPT_READ_BUS_ERROR))
+ {
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+#if 0
+void mali_mmu_print_state(struct mali_mmu_core *mmu)
+{
+ MALI_DEBUG_PRINT(2, ("MMU: State of %s is 0x%08x\n", mmu->hw_core.description, mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_STATUS)));
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_mmu.h b/drivers/media/video/samsung/mali/common/mali_mmu.h
new file mode 100644
index 0000000..c7274b8
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_mmu.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_H__
+#define __MALI_MMU_H__
+
+#include "mali_osk.h"
+#include "mali_mmu_page_directory.h"
+
+/* Forward declaration from mali_group.h */
+struct mali_group;
+
+struct mali_mmu_core;
+
+_mali_osk_errcode_t mali_mmu_initialize(void);
+
+void mali_mmu_terminate(void);
+
+struct mali_mmu_core *mali_mmu_create(_mali_osk_resource_t *resource);
+void mali_mmu_set_group(struct mali_mmu_core *mmu, struct mali_group *group);
+void mali_mmu_delete(struct mali_mmu_core *mmu);
+
+_mali_osk_errcode_t mali_mmu_reset(struct mali_mmu_core *mmu);
+mali_bool mali_mmu_zap_tlb(struct mali_mmu_core *mmu);
+void mali_mmu_zap_tlb_without_stall(struct mali_mmu_core *mmu);
+void mali_mmu_invalidate_page(struct mali_mmu_core *mmu, u32 mali_address);
+
+mali_bool mali_mmu_activate_page_directory(struct mali_mmu_core* mmu, struct mali_page_directory *pagedir);
+void mali_mmu_activate_empty_page_directory(struct mali_mmu_core* mmu);
+void mali_mmu_activate_fault_flush_page_directory(struct mali_mmu_core* mmu);
+
+/**
+ * Issues the enable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ * @return MALI_TRUE if HW stall was successfully engaged, otherwise MALI_FALSE (req timed out)
+ */
+mali_bool mali_mmu_enable_stall(struct mali_mmu_core *mmu);
+
+/**
+ * Issues the disable stall command to the MMU and waits for HW to complete the request
+ * @param mmu The MMU to enable paging for
+ */
+void mali_mmu_disable_stall(struct mali_mmu_core *mmu);
+
+void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
+
+
+#endif /* __MALI_MMU_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_mmu_page_directory.c b/drivers/media/video/samsung/mali/common/mali_mmu_page_directory.c
new file mode 100644
index 0000000..cc91ae9
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_mmu_page_directory.c
@@ -0,0 +1,475 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+#include "mali_mmu_page_directory.h"
+#include "mali_memory.h"
+
+#include "mali_cluster.h"
+#include "mali_group.h"
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data);
+
+u32 mali_allocate_empty_page(void)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address mapping;
+ u32 address;
+
+ if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&address, &mapping))
+ {
+ /* Allocation failed */
+ return 0;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ err = fill_page(mapping, 0);
+ if (_MALI_OSK_ERR_OK != err)
+ {
+ mali_mmu_release_table_page(address);
+ }
+ return address;
+}
+
+void mali_free_empty_page(u32 address)
+{
+ if (MALI_INVALID_PAGE != address)
+ {
+ mali_mmu_release_table_page(address);
+ }
+}
+
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page)
+{
+ _mali_osk_errcode_t err;
+ mali_io_address page_directory_mapping;
+ mali_io_address page_table_mapping;
+ mali_io_address data_page_mapping;
+
+ err = mali_mmu_get_table_page(data_page, &data_page_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(page_table, &page_table_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ err = mali_mmu_get_table_page(page_directory, &page_directory_mapping);
+ if (_MALI_OSK_ERR_OK == err)
+ {
+ fill_page(data_page_mapping, 0);
+ fill_page(page_table_mapping, *data_page | MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT);
+ fill_page(page_directory_mapping, *page_table | MALI_MMU_FLAGS_PRESENT);
+ MALI_SUCCESS;
+ }
+ mali_mmu_release_table_page(*page_table);
+ *page_table = MALI_INVALID_PAGE;
+ }
+ mali_mmu_release_table_page(*data_page);
+ *data_page = MALI_INVALID_PAGE;
+ }
+ return err;
+}
+
+void mali_destroy_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page)
+{
+ if (MALI_INVALID_PAGE != *page_directory)
+ {
+ mali_mmu_release_table_page(*page_directory);
+ *page_directory = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != *page_table)
+ {
+ mali_mmu_release_table_page(*page_table);
+ *page_table = MALI_INVALID_PAGE;
+ }
+
+ if (MALI_INVALID_PAGE != *data_page)
+ {
+ mali_mmu_release_table_page(*data_page);
+ *data_page = MALI_INVALID_PAGE;
+ }
+}
+
+static _mali_osk_errcode_t fill_page(mali_io_address mapping, u32 data)
+{
+ int i;
+ MALI_DEBUG_ASSERT_POINTER( mapping );
+
+ for(i = 0; i < MALI_MMU_PAGE_SIZE/4; i++)
+ {
+ _mali_osk_mem_iowrite32_relaxed( mapping, i * sizeof(u32), data);
+ }
+ _mali_osk_mem_barrier();
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ _mali_osk_errcode_t err;
+ mali_io_address pde_mapping;
+ u32 pde_phys;
+ int i;
+
+ for(i = first_pde; i <= last_pde; i++)
+ {
+ if(0 == (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & MALI_MMU_FLAGS_PRESENT))
+ {
+ /* Page table not present */
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ MALI_DEBUG_ASSERT(NULL == pagedir->page_entries_mapped[i]);
+
+ err = mali_mmu_get_table_page(&pde_phys, &pde_mapping);
+ if(_MALI_OSK_ERR_OK != err)
+ {
+ MALI_PRINT_ERROR(("Failed to allocate page table page.\n"));
+ return err;
+ }
+ pagedir->page_entries_mapped[i] = pde_mapping;
+
+ /* Update PDE, mark as present */
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32),
+ pde_phys | MALI_MMU_FLAGS_PRESENT);
+
+ MALI_DEBUG_ASSERT(0 == pagedir->page_entries_usage_count[i]);
+ pagedir->page_entries_usage_count[i] = 1;
+ }
+ else
+ {
+ pagedir->page_entries_usage_count[i]++;
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ MALI_SUCCESS;
+}
+
+MALI_STATIC_INLINE void mali_mmu_zero_pte(mali_io_address page_table, u32 mali_address, u32 size)
+{
+ int i;
+ const int first_pte = MALI_MMU_PTE_ENTRY(mali_address);
+ const int last_pte = MALI_MMU_PTE_ENTRY(mali_address + size - 1);
+
+ for (i = first_pte; i <= last_pte; i++)
+ {
+ _mali_osk_mem_iowrite32_relaxed(page_table, i * sizeof(u32), 0);
+ }
+}
+
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size)
+{
+ const int first_pde = MALI_MMU_PDE_ENTRY(mali_address);
+ const int last_pde = MALI_MMU_PDE_ENTRY(mali_address + size - 1);
+ u32 left = size;
+ int i;
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ mali_bool pd_changed = MALI_FALSE;
+ u32 pages_to_invalidate[3]; /* hard-coded to 3: max two pages from the PT level plus max one page from PD level */
+ u32 num_pages_inv = 0;
+#endif
+
+ /* For all page directory entries in range. */
+ for (i = first_pde; i <= last_pde; i++)
+ {
+ u32 size_in_pde, offset;
+
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[i]);
+ MALI_DEBUG_ASSERT(0 != pagedir->page_entries_usage_count[i]);
+
+ /* Offset into page table, 0 if mali_address is 4MiB aligned */
+ offset = (mali_address & (MALI_MMU_VIRTUAL_PAGE_SIZE - 1));
+ if (left < MALI_MMU_VIRTUAL_PAGE_SIZE - offset)
+ {
+ size_in_pde = left;
+ }
+ else
+ {
+ size_in_pde = MALI_MMU_VIRTUAL_PAGE_SIZE - offset;
+ }
+
+ pagedir->page_entries_usage_count[i]--;
+
+ /* If entire page table is unused, free it */
+ if (0 == pagedir->page_entries_usage_count[i])
+ {
+ u32 page_address;
+ MALI_DEBUG_PRINT(4, ("Releasing page table as this is the last reference\n"));
+ /* last reference removed, no need to zero out each PTE */
+
+ page_address = MALI_MMU_ENTRY_ADDRESS(_mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)));
+ pagedir->page_entries_mapped[i] = NULL;
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i*sizeof(u32), 0);
+
+ mali_mmu_release_table_page(page_address);
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ pd_changed = MALI_TRUE;
+#endif
+ }
+ else
+ {
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ pages_to_invalidate[num_pages_inv] = mali_page_directory_get_phys_address(pagedir, i);
+ num_pages_inv++;
+ MALI_DEBUG_ASSERT(num_pages_inv<3);
+#endif
+
+ /* If part of the page table is still in use, zero the relevant PTEs */
+ mali_mmu_zero_pte(pagedir->page_entries_mapped[i], mali_address, size_in_pde);
+ }
+
+ left -= size_in_pde;
+ mali_address += size_in_pde;
+ }
+ _mali_osk_write_mem_barrier();
+
+#ifndef MALI_UNMAP_FLUSH_ALL_MALI_L2
+ /* L2 pages invalidation */
+ if (MALI_TRUE == pd_changed)
+ {
+ pages_to_invalidate[num_pages_inv] = pagedir->page_directory;
+ num_pages_inv++;
+ MALI_DEBUG_ASSERT(num_pages_inv<3);
+ }
+
+ if (_MALI_PRODUCT_ID_MALI200 != mali_kernel_core_get_product_id())
+ {
+ mali_cluster_invalidate_pages(pages_to_invalidate, num_pages_inv);
+ }
+#endif
+
+ MALI_SUCCESS;
+}
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void)
+{
+ struct mali_page_directory *pagedir;
+
+ pagedir = _mali_osk_calloc(1, sizeof(struct mali_page_directory));
+ if(NULL == pagedir)
+ {
+ return NULL;
+ }
+
+ if(_MALI_OSK_ERR_OK != mali_mmu_get_table_page(&pagedir->page_directory, &pagedir->page_directory_mapped))
+ {
+ _mali_osk_free(pagedir);
+ return NULL;
+ }
+
+ /* Zero page directory */
+ fill_page(pagedir->page_directory_mapped, 0);
+
+ return pagedir;
+}
+
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir)
+{
+ const int num_page_table_entries = sizeof(pagedir->page_entries_mapped) / sizeof(pagedir->page_entries_mapped[0]);
+ int i;
+
+ /* Free referenced page tables and zero PDEs. */
+ for (i = 0; i < num_page_table_entries; i++)
+ {
+ if (pagedir->page_directory_mapped && (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, sizeof(u32)*i) & MALI_MMU_FLAGS_PRESENT))
+ {
+ mali_mmu_release_table_page( _mali_osk_mem_ioread32(pagedir->page_directory_mapped, i*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_directory_mapped, i * sizeof(u32), 0);
+ }
+ }
+ _mali_osk_write_mem_barrier();
+
+ /* Free the page directory page. */
+ mali_mmu_release_table_page(pagedir->page_directory);
+
+ _mali_osk_free(pagedir);
+}
+
+
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, mali_memory_cache_settings cache_settings)
+{
+ u32 end_address = mali_address + size;
+ u32 permission_bits;
+
+ switch ( cache_settings )
+ {
+ case MALI_CACHE_GP_READ_ALLOCATE:
+ MALI_DEBUG_PRINT(3, ("Map L2 GP_Read_allocate\n"));
+ permission_bits = MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE;
+ break;
+
+ case MALI_CACHE_STANDARD:
+ MALI_DEBUG_PRINT(3, ("Map L2 Standard\n"));
+ /*falltrough */
+ default:
+ if ( MALI_CACHE_STANDARD != cache_settings) MALI_PRINT_ERROR(("Wrong cache settings\n"));
+ permission_bits = MALI_MMU_FLAGS_WRITE_PERMISSION | MALI_MMU_FLAGS_READ_PERMISSION | MALI_MMU_FLAGS_PRESENT;
+ }
+
+ /* Map physical pages into MMU page tables */
+ for ( ; mali_address < end_address; mali_address += MALI_MMU_PAGE_SIZE, phys_address += MALI_MMU_PAGE_SIZE)
+ {
+ MALI_DEBUG_ASSERT_POINTER(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)]);
+ _mali_osk_mem_iowrite32_relaxed(pagedir->page_entries_mapped[MALI_MMU_PDE_ENTRY(mali_address)],
+ MALI_MMU_PTE_ENTRY(mali_address) * sizeof(u32),
+ phys_address | permission_bits);
+ }
+ _mali_osk_write_mem_barrier();
+}
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index)
+{
+ return (_mali_osk_mem_ioread32(pagedir->page_directory_mapped, index*sizeof(u32)) & ~MALI_MMU_FLAGS_MASK);
+}
+
+/* For instrumented */
+struct dump_info
+{
+ u32 buffer_left;
+ u32 register_writes_size;
+ u32 page_table_dump_size;
+ u32 *buffer;
+};
+
+static _mali_osk_errcode_t writereg(u32 where, u32 what, const char *comment, struct dump_info *info)
+{
+ if (NULL != info)
+ {
+ info->register_writes_size += sizeof(u32)*2; /* two 32-bit words */
+
+ if (NULL != info->buffer)
+ {
+ /* check that we have enough space */
+ if (info->buffer_left < sizeof(u32)*2) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = where;
+ info->buffer++;
+
+ *info->buffer = what;
+ info->buffer++;
+
+ info->buffer_left -= sizeof(u32)*2;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_page(mali_io_address page, u32 phys_addr, struct dump_info * info)
+{
+ if (NULL != info)
+ {
+ /* 4096 for the page and 4 bytes for the address */
+ const u32 page_size_in_elements = MALI_MMU_PAGE_SIZE / 4;
+ const u32 page_size_in_bytes = MALI_MMU_PAGE_SIZE;
+ const u32 dump_size_in_bytes = MALI_MMU_PAGE_SIZE + 4;
+
+ info->page_table_dump_size += dump_size_in_bytes;
+
+ if (NULL != info->buffer)
+ {
+ if (info->buffer_left < dump_size_in_bytes) MALI_ERROR(_MALI_OSK_ERR_NOMEM);
+
+ *info->buffer = phys_addr;
+ info->buffer++;
+
+ _mali_osk_memcpy(info->buffer, page, page_size_in_bytes);
+ info->buffer += page_size_in_elements;
+
+ info->buffer_left -= dump_size_in_bytes;
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_page_table(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+ MALI_DEBUG_ASSERT_POINTER(pagedir);
+ MALI_DEBUG_ASSERT_POINTER(info);
+
+ if (NULL != pagedir->page_directory_mapped)
+ {
+ int i;
+
+ MALI_CHECK_NO_ERROR(
+ dump_page(pagedir->page_directory_mapped, pagedir->page_directory, info)
+ );
+
+ for (i = 0; i < 1024; i++)
+ {
+ if (NULL != pagedir->page_entries_mapped[i])
+ {
+ MALI_CHECK_NO_ERROR(
+ dump_page(pagedir->page_entries_mapped[i],
+ _mali_osk_mem_ioread32(pagedir->page_directory_mapped,
+ i * sizeof(u32)) & ~MALI_MMU_FLAGS_MASK, info)
+ );
+ }
+ }
+ }
+
+ MALI_SUCCESS;
+}
+
+static _mali_osk_errcode_t dump_mmu_registers(struct mali_page_directory *pagedir, struct dump_info * info)
+{
+ MALI_CHECK_NO_ERROR(writereg(0x00000000, pagedir->page_directory,
+ "set the page directory address", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 4, "zap???", info));
+ MALI_CHECK_NO_ERROR(writereg(0x00000008, 0, "enable paging", info));
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)(args->ctx);
+
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+ args->size = info.register_writes_size + info.page_table_dump_size;
+ MALI_SUCCESS;
+}
+
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args )
+{
+ struct dump_info info = { 0, 0, 0, NULL };
+ struct mali_session_data * session_data;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_CHECK_NON_NULL(args->ctx, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_CHECK_NON_NULL(args->buffer, _MALI_OSK_ERR_INVALID_ARGS);
+
+ session_data = (struct mali_session_data *)(args->ctx);
+
+ info.buffer_left = args->size;
+ info.buffer = args->buffer;
+
+ args->register_writes = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_registers(session_data->page_directory, &info));
+
+ args->page_table_dump = info.buffer;
+ MALI_CHECK_NO_ERROR(dump_mmu_page_table(session_data->page_directory, &info));
+
+ args->register_writes_size = info.register_writes_size;
+ args->page_table_dump_size = info.page_table_dump_size;
+
+ MALI_SUCCESS;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_mmu_page_directory.h b/drivers/media/video/samsung/mali/common/mali_mmu_page_directory.h
new file mode 100644
index 0000000..628833a
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_mmu_page_directory.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
+#define __MALI_MMU_PAGE_DIRECTORY_H__
+
+#include "mali_osk.h"
+
+/**
+ * Size of an MMU page in bytes
+ */
+#define MALI_MMU_PAGE_SIZE 0x1000
+
+/*
+ * Size of the address space referenced by a page table page
+ */
+#define MALI_MMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
+
+/**
+ * Page directory index from address
+ * Calculates the page directory index from the given address
+ */
+#define MALI_MMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
+
+/**
+ * Page table index from address
+ * Calculates the page table index from the given address
+ */
+#define MALI_MMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
+
+/**
+ * Extract the memory address from an PDE/PTE entry
+ */
+#define MALI_MMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
+
+#define MALI_INVALID_PAGE ((u32)(~0))
+
+/**
+ *
+ */
+typedef enum mali_mmu_entry_flags
+{
+ MALI_MMU_FLAGS_PRESENT = 0x01,
+ MALI_MMU_FLAGS_READ_PERMISSION = 0x02,
+ MALI_MMU_FLAGS_WRITE_PERMISSION = 0x04,
+ MALI_MMU_FLAGS_OVERRIDE_CACHE = 0x8,
+ MALI_MMU_FLAGS_WRITE_CACHEABLE = 0x10,
+ MALI_MMU_FLAGS_WRITE_ALLOCATE = 0x20,
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE = 0x40,
+ MALI_MMU_FLAGS_READ_CACHEABLE = 0x80,
+ MALI_MMU_FLAGS_READ_ALLOCATE = 0x100,
+ MALI_MMU_FLAGS_MASK = 0x1FF,
+} mali_mmu_entry_flags;
+
+
+#define MALI_MMU_FLAGS_FORCE_GP_READ_ALLOCATE ( \
+MALI_MMU_FLAGS_PRESENT | \
+ MALI_MMU_FLAGS_READ_PERMISSION | \
+ MALI_MMU_FLAGS_WRITE_PERMISSION | \
+ MALI_MMU_FLAGS_OVERRIDE_CACHE | \
+ MALI_MMU_FLAGS_WRITE_CACHEABLE | \
+ MALI_MMU_FLAGS_WRITE_BUFFERABLE | \
+ MALI_MMU_FLAGS_READ_CACHEABLE | \
+ MALI_MMU_FLAGS_READ_ALLOCATE )
+
+
+struct mali_page_directory
+{
+ u32 page_directory; /**< Physical address of the memory session's page directory */
+ mali_io_address page_directory_mapped; /**< Pointer to the mapped version of the page directory into the kernel's address space */
+
+ mali_io_address page_entries_mapped[1024]; /**< Pointers to the page tables which exists in the page directory mapped into the kernel's address space */
+ u32 page_entries_usage_count[1024]; /**< Tracks usage count of the page table pages, so they can be releases on the last reference */
+};
+
+/* Map Mali virtual address space (i.e. ensure page tables exist for the virtual range) */
+_mali_osk_errcode_t mali_mmu_pagedir_map(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+_mali_osk_errcode_t mali_mmu_pagedir_unmap(struct mali_page_directory *pagedir, u32 mali_address, u32 size);
+
+/* Back virtual address space with actual pages. Assumes input is contiguous and 4k aligned. */
+void mali_mmu_pagedir_update(struct mali_page_directory *pagedir, u32 mali_address, u32 phys_address, u32 size, u32 cache_settings);
+
+u32 mali_page_directory_get_phys_address(struct mali_page_directory *pagedir, u32 index);
+
+u32 mali_allocate_empty_page(void);
+void mali_free_empty_page(u32 address);
+_mali_osk_errcode_t mali_create_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page);
+void mali_destroy_fault_flush_pages(u32 *page_directory, u32 *page_table, u32 *data_page);
+
+struct mali_page_directory *mali_mmu_pagedir_alloc(void);
+void mali_mmu_pagedir_free(struct mali_page_directory *pagedir);
+
+#endif /* __MALI_MMU_PAGE_DIRECTORY_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_osk.h b/drivers/media/video/samsung/mali/common/mali_osk.h
new file mode 100644
index 0000000..e32d15d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk.h
@@ -0,0 +1,1798 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk.h
+ * Defines the OS abstraction layer for the kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_H__
+#define __MALI_OSK_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup oskapi UDD OS Abstraction for Kernel-side (OSK) APIs
+ *
+ * @{
+ */
+
+/** @defgroup _mali_osk_miscellaneous OSK Miscellaneous functions, constants and types
+ * @{ */
+
+/* Define integer types used by OSK. Note: these currently clash with Linux so we only define them if not defined already */
+#ifndef __KERNEL__
+ typedef unsigned char u8;
+ typedef signed char s8;
+ typedef unsigned short u16;
+ typedef signed short s16;
+ typedef unsigned int u32;
+ typedef signed int s32;
+ typedef unsigned long long u64;
+ #define BITS_PER_LONG (sizeof(long)*8)
+#else
+ /* Ensure Linux types u32, etc. are defined */
+ #include <linux/types.h>
+#endif
+
+/** @brief Mali Boolean type which uses MALI_TRUE and MALI_FALSE
+ */
+ typedef unsigned long mali_bool;
+
+#ifndef MALI_TRUE
+ #define MALI_TRUE ((mali_bool)1)
+#endif
+
+#ifndef MALI_FALSE
+ #define MALI_FALSE ((mali_bool)0)
+#endif
+
+/**
+ * @brief OSK Error codes
+ *
+ * Each OS may use its own set of error codes, and may require that the
+ * User/Kernel interface take certain error code. This means that the common
+ * error codes need to be sufficiently rich to pass the correct error code
+ * thorugh from the OSK to U/K layer, across all OSs.
+ *
+ * The result is that some error codes will appear redundant on some OSs.
+ * Under all OSs, the OSK layer must translate native OS error codes to
+ * _mali_osk_errcode_t codes. Similarly, the U/K layer must translate from
+ * _mali_osk_errcode_t codes to native OS error codes.
+ */
+typedef enum
+{
+ _MALI_OSK_ERR_OK = 0, /**< Success. */
+ _MALI_OSK_ERR_FAULT = -1, /**< General non-success */
+ _MALI_OSK_ERR_INVALID_FUNC = -2, /**< Invalid function requested through User/Kernel interface (e.g. bad IOCTL number) */
+ _MALI_OSK_ERR_INVALID_ARGS = -3, /**< Invalid arguments passed through User/Kernel interface */
+ _MALI_OSK_ERR_NOMEM = -4, /**< Insufficient memory */
+ _MALI_OSK_ERR_TIMEOUT = -5, /**< Timeout occurred */
+ _MALI_OSK_ERR_RESTARTSYSCALL = -6, /**< Special: On certain OSs, must report when an interruptable mutex is interrupted. Ignore otherwise. */
+ _MALI_OSK_ERR_ITEM_NOT_FOUND = -7, /**< Table Lookup failed */
+ _MALI_OSK_ERR_BUSY = -8, /**< Device/operation is busy. Try again later */
+ _MALI_OSK_ERR_UNSUPPORTED = -9, /**< Optional part of the interface used, and is unsupported */
+} _mali_osk_errcode_t;
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+/** @defgroup _mali_osk_irq OSK IRQ handling
+ * @{ */
+
+/** @brief Private type for IRQ handling objects */
+typedef struct _mali_osk_irq_t_struct _mali_osk_irq_t;
+
+/** @brief Optional function to trigger an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data */
+typedef void (*_mali_osk_irq_trigger_t)( void * arg );
+
+/** @brief Optional function to acknowledge an irq from a resource
+ *
+ * This function is implemented by the common layer to allow probing of a resource's IRQ.
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was successful, or a suitable _mali_osk_errcode_t on failure. */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_ack_t)( void * arg );
+
+/** @brief IRQ 'upper-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the initial handling of a
+ * resource's IRQ. This maps on to the concept of an ISR that does the minimum
+ * work necessary before handing off to an IST.
+ *
+ * The communication of the resource-specific data from the ISR to the IST is
+ * handled by the OSK implementation.
+ *
+ * On most systems, the IRQ upper-half handler executes in IRQ context.
+ * Therefore, the system may have restrictions about what can be done in this
+ * context
+ *
+ * If an IRQ upper-half handler requires more work to be done than can be
+ * acheived in an IRQ context, then it may defer the work with
+ * _mali_osk_irq_schedulework(). Refer to \ref _mali_osk_irq_schedulework() for
+ * more information.
+ *
+ * @param arg resource-specific data
+ * @return _MALI_OSK_ERR_OK if the IRQ was correctly handled, or a suitable
+ * _mali_osk_errcode_t otherwise.
+ */
+typedef _mali_osk_errcode_t (*_mali_osk_irq_uhandler_t)( void * arg );
+
+/** @brief IRQ 'bottom-half' handler callback.
+ *
+ * This function is implemented by the common layer to do the deferred handling
+ * of a resource's IRQ. Usually, this work cannot be carried out in IRQ context
+ * by the IRQ upper-half handler.
+ *
+ * The IRQ bottom-half handler maps on to the concept of an IST that may
+ * execute some time after the actual IRQ has fired.
+ *
+ * All OSK-registered IRQ bottom-half handlers will be serialized, across all
+ * CPU-cores in the system.
+ *
+ * Refer to \ref _mali_osk_irq_schedulework() for more information on the
+ * IRQ work-queue, and the calling of the IRQ bottom-half handler.
+ *
+ * @param arg resource-specific data
+ */
+typedef void (*_mali_osk_irq_bhandler_t)( void * arg );
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @defgroup _mali_osk_atomic OSK Atomic counters
+ * @{ */
+
+/** @brief Public type of atomic counters
+ *
+ * This is public for allocation on stack. On systems that support it, this is just a single 32-bit value.
+ * On others, it could be encapsulating an object stored elsewhere.
+ *
+ * Regardless of implementation, the \ref _mali_osk_atomic functions \b must be used
+ * for all accesses to the variable's value, even if atomicity is not required.
+ * Do not access u.val or u.obj directly.
+ */
+typedef struct
+{
+ union
+ {
+ u32 val;
+ void *obj;
+ } u;
+} _mali_osk_atomic_t;
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_lock OSK Mutual Exclusion Locks
+ * @{ */
+
+
+/** @brief OSK Mutual Exclusion Lock ordered list
+ *
+ * This lists the various types of locks in the system and is used to check
+ * that locks are taken in the correct order.
+ *
+ * Holding more than one lock of the same order at the same time is not
+ * allowed.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCK_ORDER_LAST = 0,
+
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
+ _MALI_OSK_LOCK_ORDER_UTILIZATION,
+ _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+ _MALI_OSK_LOCK_ORDER_PROFILING,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
+ _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
+ _MALI_OSK_LOCK_ORDER_GROUP,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER,
+
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
+ _MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
+ _MALI_OSK_LOCK_ORDER_MEM_INFO,
+ _MALI_OSK_LOCK_ORDER_MEM_SESSION,
+
+ _MALI_OSK_LOCK_ORDER_SESSIONS,
+
+ _MALI_OSK_LOCK_ORDER_FIRST
+} _mali_osk_lock_order_t;
+
+
+/** @brief OSK Mutual Exclusion Lock flags type
+ *
+ * Flags are supplied at the point where the Lock is initialized. Each flag can
+ * be combined with others using bitwise OR, '|'.
+ *
+ * The flags must be sufficiently rich to cope with all our OSs. This means
+ * that on some OSs, certain flags can be completely ignored. We define a
+ * number of terms that are significant across all OSs:
+ *
+ * - Sleeping/non-sleeping mutexs. Sleeping mutexs can block on waiting, and so
+ * schedule out the current thread. This is significant on OSs where there are
+ * situations in which the current thread must not be put to sleep. On OSs
+ * without this restriction, sleeping and non-sleeping mutexes can be treated
+ * as the same (if that is required).
+ * - Interruptable/non-interruptable mutexes. For sleeping mutexes, it may be
+ * possible for the sleep to be interrupted for a reason other than the thread
+ * being able to obtain the lock. OSs behaving in this way may provide a
+ * mechanism to control whether sleeping mutexes can be interrupted. On OSs
+ * that do not support the concept of interruption, \b or they do not support
+ * control of mutex interruption, then interruptable mutexes may be treated
+ * as non-interruptable.
+ *
+ * Some constrains apply to the lock type flags:
+ *
+ * - Spinlocks are by nature, non-interruptable. Hence, they must always be
+ * combined with the NONINTERRUPTABLE flag, because it is meaningless to ask
+ * for a spinlock that is interruptable (and this highlights its
+ * non-interruptable-ness). For example, on certain OSs they should be used when
+ * you must not sleep.
+ * - Reader/writer is an optimization hint, and any type of lock can be
+ * reader/writer. Since this is an optimization hint, the implementation need
+ * not respect this for any/all types of lock. For example, on certain OSs,
+ * there's no interruptable reader/writer mutex. If such a thing were requested
+ * on that OS, the fact that interruptable was requested takes priority over the
+ * reader/writer-ness, because reader/writer-ness is not necessary for correct
+ * operation.
+ * - Any lock can use the order parameter.
+ * - A onelock is an optimization hint specific to certain OSs. It can be
+ * specified when it is known that only one lock will be held by the thread,
+ * and so can provide faster mutual exclusion. This can be safely ignored if
+ * such optimization is not required/present.
+ *
+ * The absence of any flags (the value 0) results in a sleeping-mutex, which is interruptable.
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKFLAG_SPINLOCK = 0x1, /**< Specifically, don't sleep on those architectures that require it */
+ _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE = 0x2, /**< The mutex cannot be interrupted, e.g. delivery of signals on those architectures where this is required */
+ _MALI_OSK_LOCKFLAG_READERWRITER = 0x4, /**< Optimise for readers/writers */
+ _MALI_OSK_LOCKFLAG_ORDERED = 0x8, /**< Use the order parameter; otherwise use automatic ordering */
+ _MALI_OSK_LOCKFLAG_ONELOCK = 0x10, /**< Each thread can only hold one lock at a time */
+ _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ = 0x20, /**< IRQ version of spinlock */
+ /** @enum _mali_osk_lock_flags_t
+ *
+ * Flags from 0x10000--0x80000000 are RESERVED for User-mode */
+
+} _mali_osk_lock_flags_t;
+
+/** @brief Mutual Exclusion Lock Mode Optimization hint
+ *
+ * The lock mode is used to implement the read/write locking of locks specified
+ * as _MALI_OSK_LOCKFLAG_READERWRITER. In this case, the RO mode can be used
+ * to allow multiple concurrent readers, but no writers. The RW mode is used for
+ * writers, and so will wait for all readers to release the lock (if any present).
+ * Further readers and writers will wait until the writer releases the lock.
+ *
+ * The mode is purely an optimization hint: for example, it is permissible for
+ * all locks to behave in RW mode, regardless of that supplied.
+ *
+ * It is an error to attempt to use locks in anything other that RW mode when
+ * _MALI_OSK_LOCKFLAG_READERWRITER is not supplied.
+ *
+ */
+typedef enum
+{
+ _MALI_OSK_LOCKMODE_UNDEF = -1, /**< Undefined lock mode. For internal use only */
+ _MALI_OSK_LOCKMODE_RW = 0x0, /**< Read-write mode, default. All readers and writers are mutually-exclusive */
+ _MALI_OSK_LOCKMODE_RO, /**< Read-only mode, to support multiple concurrent readers, but mutual exclusion in the presence of writers. */
+ /** @enum _mali_osk_lock_mode_t
+ *
+ * Lock modes 0x40--0x7F are RESERVED for User-mode */
+} _mali_osk_lock_mode_t;
+
+/** @brief Private type for Mutual Exclusion lock objects */
+typedef struct _mali_osk_lock_t_struct _mali_osk_lock_t;
+
+#ifdef DEBUG
+/** @brief Macro for asserting that the current thread holds a given lock
+ */
+#define MALI_DEBUG_ASSERT_LOCK_HELD(l) MALI_DEBUG_ASSERT(_mali_osk_lock_get_owner(l) == _mali_osk_get_tid());
+
+/** @brief returns a lock's owner (thread id) if debugging is enabled
+ */
+u32 _mali_osk_lock_get_owner( _mali_osk_lock_t *lock );
+#endif
+
+/** @} */ /* end group _mali_osk_lock */
+
+/** @defgroup _mali_osk_low_level_memory OSK Low-level Memory Operations
+ * @{ */
+
+/**
+ * @brief Private data type for use in IO accesses to/from devices.
+ *
+ * This represents some range that is accessible from the device. Examples
+ * include:
+ * - Device Registers, which could be readable and/or writeable.
+ * - Memory that the device has access to, for storing configuration structures.
+ *
+ * Access to this range must be made through the _mali_osk_mem_ioread32() and
+ * _mali_osk_mem_iowrite32() functions.
+ */
+typedef struct _mali_io_address * mali_io_address;
+
+/** @defgroup _MALI_OSK_CPU_PAGE CPU Physical page size macros.
+ *
+ * The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The CPU Physical Page Size has been assumed to be the same as the Mali
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** CPU Page Order, as log to base 2 of the Page size. @see _MALI_OSK_CPU_PAGE_SIZE */
+#define _MALI_OSK_CPU_PAGE_ORDER ((u32)12)
+/** CPU Page Size, in bytes. */
+#define _MALI_OSK_CPU_PAGE_SIZE (((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER))
+/** CPU Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_CPU_PAGE_MASK (~((((u32)1) << (_MALI_OSK_CPU_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_CPU_PAGE */
+
+/** @defgroup _MALI_OSK_MALI_PAGE Mali Physical Page size macros
+ *
+ * Mali Physical page size macros. The order of the page size is supplied for
+ * ease of use by algorithms that might require it, since it is easier to know
+ * it ahead of time rather than calculating it.
+ *
+ * The Mali Page Mask macro masks off the lower bits of a physical address to
+ * give the start address of the page for that physical address.
+ *
+ * @note The Mali device driver code is designed for systems with 4KB page size.
+ * Changing these macros will not make the entire Mali device driver work with
+ * page sizes other than 4KB.
+ *
+ * @note The Mali Physical Page Size has been assumed to be the same as the CPU
+ * Physical Page Size.
+ *
+ * @{
+ */
+
+/** Mali Page Order, as log to base 2 of the Page size. @see _MALI_OSK_MALI_PAGE_SIZE */
+#define _MALI_OSK_MALI_PAGE_ORDER ((u32)12)
+/** Mali Page Size, in bytes. */
+#define _MALI_OSK_MALI_PAGE_SIZE (((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER))
+/** Mali Page Mask, which masks off the offset within a page */
+#define _MALI_OSK_MALI_PAGE_MASK (~((((u32)1) << (_MALI_OSK_MALI_PAGE_ORDER)) - ((u32)1)))
+/** @} */ /* end of group _MALI_OSK_MALI_PAGE*/
+
+/** @brief flags for mapping a user-accessible memory range
+ *
+ * Where a function with prefix '_mali_osk_mem_mapregion' accepts flags as one
+ * of the function parameters, it will use one of these. These allow per-page
+ * control over mappings. Compare with the mali_memory_allocation_flag type,
+ * which acts over an entire range
+ *
+ * These may be OR'd together with bitwise OR (|), but must be cast back into
+ * the type after OR'ing.
+ */
+typedef enum
+{
+ _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR = 0x1, /**< Physical address is OS Allocated */
+} _mali_osk_mem_mapregion_flags_t;
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+/** @defgroup _mali_osk_notification OSK Notification Queues
+ * @{ */
+
+/** @brief Private type for notification queue objects */
+typedef struct _mali_osk_notification_queue_t_struct _mali_osk_notification_queue_t;
+
+/** @brief Public notification data object type */
+typedef struct _mali_osk_notification_t_struct
+{
+ u32 notification_type; /**< The notification type */
+ u32 result_buffer_size; /**< Size of the result buffer to copy to user space */
+ void * result_buffer; /**< Buffer containing any type specific data */
+} _mali_osk_notification_t;
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @defgroup _mali_osk_timer OSK Timer Callbacks
+ * @{ */
+
+/** @brief Function to call when a timer expires
+ *
+ * When a timer expires, this function is called. Note that on many systems,
+ * a timer callback will be executed in IRQ context. Therefore, restrictions
+ * may apply on what can be done inside the timer callback.
+ *
+ * If a timer requires more work to be done than can be acheived in an IRQ
+ * context, then it may defer the work with a work-queue. For example, it may
+ * use \ref _mali_osk_irq_schedulework() to make use of the IRQ bottom-half handler
+ * to carry out the remaining work.
+ *
+ * Stopping the timer with \ref _mali_osk_timer_del() blocks on compeletion of
+ * the callback. Therefore, the callback may not obtain any mutexes also held
+ * by any callers of _mali_osk_timer_del(). Otherwise, a deadlock may occur.
+ *
+ * @param arg Function-specific data */
+typedef void (*_mali_osk_timer_callback_t)(void * arg );
+
+/** @brief Private type for Timer Callback Objects */
+typedef struct _mali_osk_timer_t_struct _mali_osk_timer_t;
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @addtogroup _mali_osk_list OSK Doubly-Linked Circular Lists
+ * @{ */
+
+/** @brief Public List objects.
+ *
+ * To use, add a _mali_osk_list_t member to the structure that may become part
+ * of a list. When traversing the _mali_osk_list_t objects, use the
+ * _MALI_OSK_CONTAINER_OF() macro to recover the structure from its
+ *_mali_osk_list_t member
+ *
+ * Each structure may have multiple _mali_osk_list_t members, so that the
+ * structure is part of multiple lists. When traversing lists, ensure that the
+ * correct _mali_osk_list_t member is used, because type-checking will be
+ * lost by the compiler.
+ */
+typedef struct _mali_osk_list_s
+{
+ struct _mali_osk_list_s *next;
+ struct _mali_osk_list_s *prev;
+} _mali_osk_list_t;
+
+/** @brief Initialize a list to be a head of an empty list
+ * @param exp the list to initialize. */
+#define _MALI_OSK_INIT_LIST_HEAD(exp) _mali_osk_list_init(exp)
+
+/** @brief Define a list variable, which is uninitialized.
+ * @param exp the name of the variable that the list will be defined as. */
+#define _MALI_OSK_LIST_HEAD(exp) _mali_osk_list_t exp
+
+/** @brief Find the containing structure of another structure
+ *
+ * This is the reverse of the operation 'offsetof'. This means that the
+ * following condition is satisfied:
+ *
+ * ptr == _MALI_OSK_CONTAINER_OF( &ptr->member, type, member )
+ *
+ * When ptr is of type 'type'.
+ *
+ * Its purpose it to recover a larger structure that has wrapped a smaller one.
+ *
+ * @note no type or memory checking occurs to ensure that a wrapper structure
+ * does in fact exist, and that it is being recovered with respect to the
+ * correct member.
+ *
+ * @param ptr the pointer to the member that is contained within the larger
+ * structure
+ * @param type the type of the structure that contains the member
+ * @param member the name of the member in the structure that ptr points to.
+ * @return a pointer to a \a type object which contains \a member, as pointed
+ * to by \a ptr.
+ */
+#define _MALI_OSK_CONTAINER_OF(ptr, type, member) \
+ ((type *)( ((char *)ptr) - offsetof(type,member) ))
+
+/** @brief Find the containing structure of a list
+ *
+ * When traversing a list, this is used to recover the containing structure,
+ * given that is contains a _mali_osk_list_t member.
+ *
+ * Each list must be of structures of one type, and must link the same members
+ * together, otherwise it will not be possible to correctly recover the
+ * sturctures that the lists link.
+ *
+ * @note no type or memory checking occurs to ensure that a structure does in
+ * fact exist for the list entry, and that it is being recovered with respect
+ * to the correct list member.
+ *
+ * @param ptr the pointer to the _mali_osk_list_t member in this structure
+ * @param type the type of the structure that contains the member
+ * @param member the member of the structure that ptr points to.
+ * @return a pointer to a \a type object which contains the _mali_osk_list_t
+ * \a member, as pointed to by the _mali_osk_list_t \a *ptr.
+ */
+#define _MALI_OSK_LIST_ENTRY(ptr, type, member) \
+ _MALI_OSK_CONTAINER_OF(ptr, type, member)
+
+/** @brief Enumerate a list safely
+ *
+ * With this macro, lists can be enumerated in a 'safe' manner. That is,
+ * entries can be deleted from the list without causing an error during
+ * enumeration. To achieve this, a 'temporary' pointer is required, which must
+ * be provided to the macro.
+ *
+ * Use it like a 'for()', 'while()' or 'do()' construct, and so it must be
+ * followed by a statement or compound-statement which will be executed for
+ * each list entry.
+ *
+ * Upon loop completion, providing that an early out was not taken in the
+ * loop body, then it is guaranteed that ptr->member == list, even if the loop
+ * body never executed.
+ *
+ * @param ptr a pointer to an object of type 'type', which points to the
+ * structure that contains the currently enumerated list entry.
+ * @param tmp a pointer to an object of type 'type', which must not be used
+ * inside the list-execution statement.
+ * @param list a pointer to a _mali_osk_list_t, from which enumeration will
+ * begin
+ * @param type the type of the structure that contains the _mali_osk_list_t
+ * member that is part of the list to be enumerated.
+ * @param member the _mali_osk_list_t member of the structure that is part of
+ * the list to be enumerated.
+ */
+#define _MALI_OSK_LIST_FOREACHENTRY(ptr, tmp, list, type, member) \
+ for (ptr = _MALI_OSK_LIST_ENTRY((list)->next, type, member), \
+ tmp = _MALI_OSK_LIST_ENTRY(ptr->member.next, type, member); \
+ &ptr->member != (list); \
+ ptr = tmp, tmp = _MALI_OSK_LIST_ENTRY(tmp->member.next, type, member))
+/** @} */ /* end group _mali_osk_list */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief The known resource types
+ *
+ * @note \b IMPORTANT: these must remain fixed, and only be extended. This is
+ * because not all systems use a header file for reading in their resources.
+ * The resources may instead come from a data file where these resources are
+ * 'hard-coded' in, because there's no easy way of transferring the enum values
+ * into such data files. E.g. the C-Pre-processor does \em not process enums.
+ */
+typedef enum _mali_osk_resource_type
+{
+ RESOURCE_TYPE_FIRST =0, /**< Duplicate resource marker for the first resource*/
+
+ MEMORY =0, /**< Physically contiguous memory block, not managed by the OS */
+ OS_MEMORY =1, /**< Memory managed by and shared with the OS */
+
+ MALI_PP =2, /**< Mali Pixel Processor core */
+ MALI450PP =2, /**< Compatibility option */
+ MALI400PP =2, /**< Compatibility option */
+ MALI300PP =2, /**< Compatibility option */
+ MALI200 =2, /**< Compatibility option */
+
+ MALI_GP =3, /**< Mali Geometry Processor core */
+ MALI450GP =3, /**< Compatibility option */
+ MALI400GP =3, /**< Compatibility option */
+ MALI300GP =3, /**< Compatibility option */
+ MALIGP2 =3, /**< Compatibility option */
+
+ MMU =4, /**< Mali MMU (Memory Management Unit) */
+
+ FPGA_FRAMEWORK =5, /**< Mali registers specific to FPGA implementations */
+
+ MALI_L2 =6, /**< Mali Level 2 cache core */
+ MALI450L2 =6, /**< Compatibility option */
+ MALI400L2 =6, /**< Compatibility option */
+ MALI300L2 =6, /**< Compatibility option */
+
+ MEM_VALIDATION =7, /**< External Memory Validator */
+
+ PMU =8, /**< Power Manangement Unit */
+
+ RESOURCE_TYPE_COUNT /**< The total number of known resources */
+} _mali_osk_resource_type_t;
+
+/** @brief resource description struct
+ *
+ * _mali_osk_resources_init() will enumerate objects of this type. Not all
+ * members have a valid meaning across all types.
+ *
+ * The mmu_id is used to group resources to a certain MMU, since there may be
+ * more than one MMU in the system, and each resource may be using a different
+ * MMU:
+ * - For MMU resources, the setting of mmu_id is a uniquely identifying number.
+ * - For Other resources, the setting of mmu_id determines which MMU the
+ * resource uses.
+ */
+typedef struct _mali_osk_resource
+{
+ _mali_osk_resource_type_t type; /**< type of the resource */
+ const char * description; /**< short description of the resource */
+ u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ s32 cpu_usage_adjust; /**< Offset added to the base address of the resource to arrive at the CPU physical address of the resource (if different from the Mali physical address) */
+ u32 size; /**< Size in bytes of the resource - either the size of its register range, or the size of the memory block. */
+ u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
+ u32 flags; /**< Resources-specific flags. */
+ u32 mmu_id; /**< Identifier for Mali MMU resources. */
+ u32 alloc_order; /**< Order in which MEMORY/OS_MEMORY resources are used */
+} _mali_osk_resource_t;
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+
+#include "mali_kernel_memory_engine.h" /* include for mali_memory_allocation and mali_physical_memory_allocation type */
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief Fake IRQ number for testing purposes
+ */
+#define _MALI_OSK_IRQ_NUMBER_FAKE ((u32)0xFFFFFFF1)
+
+/** @addtogroup _mali_osk_irq
+ * @{ */
+
+/** @brief PMM Virtual IRQ number
+ */
+#define _MALI_OSK_IRQ_NUMBER_PMM ((u32)0xFFFFFFF2)
+
+
+/** @brief Initialize IRQ handling for a resource
+ *
+ * The _mali_osk_irq_t returned must be written into the resource-specific data
+ * pointed to by data. This is so that the upper and lower handlers can call
+ * _mali_osk_irq_schedulework().
+ *
+ * @note The caller must ensure that the resource does not generate an
+ * interrupt after _mali_osk_irq_init() finishes, and before the
+ * _mali_osk_irq_t is written into the resource-specific data. Otherwise,
+ * the upper-half handler will fail to call _mali_osk_irq_schedulework().
+ *
+ * @param irqnum The IRQ number that the resource uses, as seen by the CPU.
+ * The value -1 has a special meaning which indicates the use of probing, and trigger_func and ack_func must be
+ * non-NULL.
+ * @param uhandler The upper-half handler, corresponding to a ISR handler for
+ * the resource
+ * @param bhandler The lower-half handler, corresponding to an IST handler for
+ * the resource
+ * @param trigger_func Optional: a function to trigger the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param ack_func Optional: a function to acknowledge the resource's irq, to
+ * probe for the interrupt. Use NULL if irqnum != -1.
+ * @param data resource-specific data, which will be passed to uhandler,
+ * bhandler and (if present) trigger_func and ack_funnc
+ * @param description textual description of the IRQ resource.
+ * @return on success, a pointer to a _mali_osk_irq_t object, which represents
+ * the IRQ handling on this resource. NULL on failure.
+ */
+_mali_osk_irq_t *_mali_osk_irq_init( u32 irqnum, _mali_osk_irq_uhandler_t uhandler, _mali_osk_irq_bhandler_t bhandler, _mali_osk_irq_trigger_t trigger_func, _mali_osk_irq_ack_t ack_func, void *data, const char *description );
+
+/** @brief Cause a queued, deferred call of the IRQ bottom-half.
+ *
+ * _mali_osk_irq_schedulework provides a mechanism for enqueuing deferred calls
+ * to the IRQ bottom-half handler. The queue is known as the IRQ work-queue.
+ * After calling _mali_osk_irq_schedulework(), the IRQ bottom-half handler will
+ * be scheduled to run at some point in the future.
+ *
+ * This is called by the IRQ upper-half to defer further processing of
+ * IRQ-related work to the IRQ bottom-half handler. This is necessary for work
+ * that cannot be done in an IRQ context by the IRQ upper-half handler. Timer
+ * callbacks also use this mechanism, because they are treated as though they
+ * operate in an IRQ context. Refer to \ref _mali_osk_timer_t for more
+ * information.
+ *
+ * Code that operates in a kernel-process context (with no IRQ context
+ * restrictions) may also enqueue deferred calls to the IRQ bottom-half. The
+ * advantage over direct calling is that deferred calling allows the caller and
+ * IRQ bottom half to hold the same mutex, with a guarantee that they will not
+ * deadlock just by using this mechanism.
+ *
+ * _mali_osk_irq_schedulework() places deferred call requests on a queue, to
+ * allow for more than one thread to make a deferred call. Therfore, if it is
+ * called 'K' times, then the IRQ bottom-half will be scheduled 'K' times too.
+ * 'K' is a number that is implementation-specific.
+ *
+ * _mali_osk_irq_schedulework() is guaranteed to not block on:
+ * - enqueuing a deferred call request.
+ * - the completion of the IRQ bottom-half handler.
+ *
+ * This is to prevent deadlock. For example, if _mali_osk_irq_schedulework()
+ * blocked, then it would cause a deadlock when the following two conditions
+ * hold:
+ * - The IRQ bottom-half callback (of type _mali_osk_irq_bhandler_t) locks
+ * a mutex
+ * - And, at the same time, the caller of _mali_osk_irq_schedulework() also
+ * holds the same mutex
+ *
+ * @note care must be taken to not overflow the queue that
+ * _mali_osk_irq_schedulework() operates on. Code must be structured to
+ * ensure that the number of requests made to the queue is bounded. Otherwise,
+ * IRQs will be lost.
+ *
+ * The queue that _mali_osk_irq_schedulework implements is a FIFO of N-writer,
+ * 1-reader type. The writers are the callers of _mali_osk_irq_schedulework
+ * (all OSK-registered IRQ upper-half handlers in the system, watchdog timers,
+ * callers from a Kernel-process context). The reader is a single thread that
+ * handles all OSK-registered IRQs.
+ *
+ * The consequence of the queue being a 1-reader type is that calling
+ * _mali_osk_irq_schedulework() on different _mali_osk_irq_t objects causes
+ * their IRQ bottom-halves to be serialized, across all CPU-cores in the
+ * system.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ bottom-half must begin processing.
+ */
+void _mali_osk_irq_schedulework( _mali_osk_irq_t *irq );
+
+/** @brief Terminate IRQ handling on a resource.
+ *
+ * This will disable the interrupt from the device, and then waits for the
+ * IRQ work-queue to finish the work that is currently in the queue. That is,
+ * for every deferred call currently in the IRQ work-queue, it waits for each
+ * of those to be processed by their respective IRQ bottom-half handler.
+ *
+ * This function is used to ensure that the bottom-half handler of the supplied
+ * IRQ object will not be running at the completion of this function call.
+ * However, the caller must ensure that no other sources could call the
+ * _mali_osk_irq_schedulework() on the same IRQ object. For example, the
+ * relevant timers must be stopped.
+ *
+ * @note While this function is being called, other OSK-registered IRQs in the
+ * system may enqueue work for their respective bottom-half handlers. This
+ * function will not wait for those entries in the work-queue to be flushed.
+ *
+ * Since this blocks on the completion of work in the IRQ work-queue, the
+ * caller of this function \b must \b not hold any mutexes that are taken by
+ * any OSK-registered IRQ bottom-half handler. To do so may cause a deadlock.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_irq_term( _mali_osk_irq_t *irq );
+
+/** @brief flushing workqueue.
+ *
+ * This will flush the workqueue.
+ *
+ * @param irq a pointer to the _mali_osk_irq_t object corresponding to the
+ * resource whose IRQ handling is to be terminated.
+ */
+void _mali_osk_flush_workqueue( _mali_osk_irq_t *irq );
+
+/** @} */ /* end group _mali_osk_irq */
+
+
+/** @addtogroup _mali_osk_atomic
+ * @{ */
+
+/** @brief Decrement an atomic counter
+ *
+ * @note It is an error to decrement the counter beyond -(1<<23)
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_dec( _mali_osk_atomic_t *atom );
+
+/** @brief Decrement an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter
+ * @return The new value, after decrement */
+u32 _mali_osk_atomic_dec_return( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter
+ *
+ * @note It is an error to increment the counter beyond (1<<23)-1
+ *
+ * @param atom pointer to an atomic counter */
+void _mali_osk_atomic_inc( _mali_osk_atomic_t *atom );
+
+/** @brief Increment an atomic counter, return new value
+ *
+ * @param atom pointer to an atomic counter */
+u32 _mali_osk_atomic_inc_return( _mali_osk_atomic_t *atom );
+
+/** @brief Initialize an atomic counter
+ *
+ * @note the parameter required is a u32, and so signed integers should be
+ * cast to u32.
+ *
+ * @param atom pointer to an atomic counter
+ * @param val the value to initialize the atomic counter.
+ * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_atomic_init( _mali_osk_atomic_t *atom, u32 val );
+
+/** @brief Read a value from an atomic counter
+ *
+ * This can only be safely used to determine the value of the counter when it
+ * is guaranteed that other threads will not be modifying the counter. This
+ * makes its usefulness limited.
+ *
+ * @param atom pointer to an atomic counter
+ */
+u32 _mali_osk_atomic_read( _mali_osk_atomic_t *atom );
+
+/** @brief Terminate an atomic counter
+ *
+ * @param atom pointer to an atomic counter
+ */
+void _mali_osk_atomic_term( _mali_osk_atomic_t *atom );
+/** @} */ /* end group _mali_osk_atomic */
+
+
+/** @defgroup _mali_osk_memory OSK Memory Allocation
+ * @{ */
+
+/** @brief Allocate zero-initialized memory.
+ *
+ * Returns a buffer capable of containing at least \a n elements of \a size
+ * bytes each. The buffer is initialized to zero.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * @param n Number of elements to allocate
+ * @param size Size of each element
+ * @return On success, the zero-initialized buffer allocated. NULL on failure
+ */
+void *_mali_osk_calloc( u32 n, u32 size );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * If there is a need for a bigger block of memory (16KB or bigger), then
+ * consider to use _mali_osk_vmalloc() instead, as this function might
+ * map down to a OS function with size limitations.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_malloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_malloc() and _mali_osk_calloc()
+ * must be freed before the application exits. Otherwise,
+ * a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_free( void *ptr );
+
+/** @brief Allocate memory.
+ *
+ * Returns a buffer capable of containing at least \a size bytes. The
+ * contents of the buffer are undefined.
+ *
+ * This function is potentially slower than _mali_osk_malloc() and _mali_osk_calloc(),
+ * but do support bigger sizes.
+ *
+ * The buffer is suitably aligned for storage and subsequent access of every
+ * type that the compiler supports. Therefore, the pointer to the start of the
+ * buffer may be cast into any pointer type, and be subsequently accessed from
+ * such a pointer, without loss of information.
+ *
+ * When the buffer is no longer in use, it must be freed with _mali_osk_free().
+ * Failure to do so will cause a memory leak.
+ *
+ * @note Most toolchains supply memory allocation functions that meet the
+ * compiler's alignment requirements.
+ *
+ * Remember to free memory using _mali_osk_free().
+ * @param size Number of bytes to allocate
+ * @return On success, the buffer allocated. NULL on failure.
+ */
+void *_mali_osk_valloc( u32 size );
+
+/** @brief Free memory.
+ *
+ * Reclaims the buffer pointed to by the parameter \a ptr for the system.
+ * All memory returned from _mali_osk_valloc() must be freed before the
+ * application exits. Otherwise a memory leak will occur.
+ *
+ * Memory must be freed once. It is an error to free the same non-NULL pointer
+ * more than once.
+ *
+ * It is legal to free the NULL pointer.
+ *
+ * @param ptr Pointer to buffer to free
+ */
+void _mali_osk_vfree( void *ptr );
+
+/** @brief Copies memory.
+ *
+ * Copies the \a len bytes from the buffer pointed by the parameter \a src
+ * directly to the buffer pointed by \a dst.
+ *
+ * It is an error for \a src to overlap \a dst anywhere in \a len bytes.
+ *
+ * @param dst Pointer to the destination array where the content is to be
+ * copied.
+ * @param src Pointer to the source of data to be copied.
+ * @param len Number of bytes to copy.
+ * @return \a dst is always passed through unmodified.
+ */
+void *_mali_osk_memcpy( void *dst, const void *src, u32 len );
+
+/** @brief Fills memory.
+ *
+ * Sets the first \a n bytes of the block of memory pointed to by \a s to
+ * the specified value
+ * @param s Pointer to the block of memory to fill.
+ * @param c Value to be set, passed as u32. Only the 8 Least Significant Bits (LSB)
+ * are used.
+ * @param n Number of bytes to be set to the value.
+ * @return \a s is always passed through unmodified
+ */
+void *_mali_osk_memset( void *s, u32 c, u32 n );
+/** @} */ /* end group _mali_osk_memory */
+
+
+/** @brief Checks the amount of memory allocated
+ *
+ * Checks that not more than \a max_allocated bytes are allocated.
+ *
+ * Some OS bring up an interactive out of memory dialogue when the
+ * system runs out of memory. This can stall non-interactive
+ * apps (e.g. automated test runs). This function can be used to
+ * not trigger the OOM dialogue by keeping allocations
+ * within a certain limit.
+ *
+ * @return MALI_TRUE when \a max_allocated bytes are not in use yet. MALI_FALSE
+ * when at least \a max_allocated bytes are in use.
+ */
+mali_bool _mali_osk_mem_check_allocated( u32 max_allocated );
+
+/** @addtogroup _mali_osk_lock
+ * @{ */
+
+/** @brief Initialize a Mutual Exclusion Lock
+ *
+ * Locks are created in the signalled (unlocked) state.
+ *
+ * initial must be zero, since there is currently no means of expressing
+ * whether a reader/writer lock should be initially locked as a reader or
+ * writer. This would require some encoding to be used.
+ *
+ * 'Automatic' ordering means that locks must be obtained in the order that
+ * they were created. For all locks that can be held at the same time, they must
+ * either all provide the order parameter, or they all must use 'automatic'
+ * ordering - because there is no way of mixing 'automatic' and 'manual'
+ * ordering.
+ *
+ * @param flags flags combined with bitwise OR ('|'), or zero. There are
+ * restrictions on which flags can be combined, @see _mali_osk_lock_flags_t.
+ * @param initial For future expansion into semaphores. SBZ.
+ * @param order The locking order of the mutex. That is, locks obtained by the
+ * same thread must have been created with an increasing order parameter, for
+ * deadlock prevention. Setting to zero causes 'automatic' ordering to be used.
+ * @return On success, a pointer to a _mali_osk_lock_t object. NULL on failure.
+ */
+_mali_osk_lock_t *_mali_osk_lock_init( _mali_osk_lock_flags_t flags, u32 initial, u32 order );
+
+/** @brief Wait for a lock to be signalled (obtained)
+
+ * After a thread has successfully waited on the lock, the lock is obtained by
+ * the thread, and is marked as unsignalled. The thread releases the lock by
+ * signalling it.
+ *
+ * In the case of Reader/Writer locks, multiple readers can obtain a lock in
+ * the absence of writers, which is a performance optimization (providing that
+ * the readers never write to the protected resource).
+ *
+ * To prevent deadlock, locks must always be obtained in the same order.
+ *
+ * For locks marked as _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, it is a
+ * programming error for the function to exit without obtaining the lock. This
+ * means that the error code must only be checked for interruptible locks.
+ *
+ * @param lock the lock to wait upon (obtain).
+ * @param mode the mode in which the lock should be obtained. Unless the lock
+ * was created with _MALI_OSK_LOCKFLAG_READERWRITER, this must be
+ * _MALI_OSK_LOCKMODE_RW.
+ * @return On success, _MALI_OSK_ERR_OK. For interruptible locks, a suitable
+ * _mali_osk_errcode_t will be returned on failure, and the lock will not be
+ * obtained. In this case, the error code must be propagated up to the U/K
+ * interface.
+ */
+_mali_osk_errcode_t _mali_osk_lock_wait( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode);
+
+
+/** @brief Signal (release) a lock
+ *
+ * Locks may only be signalled by the thread that originally waited upon the
+ * lock.
+ *
+ * @note In the OSU, a flag exists to allow any thread to signal a
+ * lock. Such functionality is not present in the OSK.
+ *
+ * @param lock the lock to signal (release).
+ * @param mode the mode in which the lock should be obtained. This must match
+ * the mode in which the lock was waited upon.
+ */
+void _mali_osk_lock_signal( _mali_osk_lock_t *lock, _mali_osk_lock_mode_t mode );
+
+/** @brief Terminate a lock
+ *
+ * This terminates a lock and frees all associated resources.
+ *
+ * It is a programming error to terminate the lock when it is held (unsignalled)
+ * by a thread.
+ *
+ * @param lock the lock to terminate.
+ */
+void _mali_osk_lock_term( _mali_osk_lock_t *lock );
+/** @} */ /* end group _mali_osk_lock */
+
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Issue a memory barrier
+ *
+ * This defines an arbitrary memory barrier operation, which forces an ordering constraint
+ * on memory read and write operations.
+ */
+void _mali_osk_mem_barrier( void );
+
+/** @brief Issue a write memory barrier
+ *
+ * This defines an write memory barrier operation which forces an ordering constraint
+ * on memory write operations.
+ */
+void _mali_osk_write_mem_barrier( void );
+
+/** @brief Map a physically contiguous region into kernel space
+ *
+ * This is primarily used for mapping in registers from resources, and Mali-MMU
+ * page tables. The mapping is only visable from kernel-space.
+ *
+ * Access has to go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @param phys CPU-physical base address of the memory to map in. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * map in
+ * @param description A textual description of the memory being mapped in.
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure.
+ */
+mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description );
+
+/** @brief Unmap a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_mapioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt an unmap twice
+ * - unmap only part of a range obtained through _mali_osk_mem_mapioregion
+ * - unmap more than the range obtained through _mali_osk_mem_mapioregion
+ * - unmap an address range that was not successfully mapped using
+ * _mali_osk_mem_mapioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in. This must be aligned to the system's page size, which is assumed
+ * to be 4K
+ * @param size The number of bytes that were originally mapped in.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Allocate and Map a physically contiguous region into kernel space
+ *
+ * This is used for allocating physically contiguous regions (such as Mali-MMU
+ * page tables) and mapping them into kernel space. The mapping is only
+ * visible from kernel-space.
+ *
+ * The alignment of the returned memory is guaranteed to be at least
+ * _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * Access must go through _mali_osk_mem_ioread32 and _mali_osk_mem_iowrite32
+ *
+ * @note This function is primarily to provide support for OSs that are
+ * incapable of separating the tasks 'allocate physically contiguous memory'
+ * and 'map it into kernel space'
+ *
+ * @param[out] phys CPU-physical base address of memory that was allocated.
+ * (*phys) will be guaranteed to be aligned to at least
+ * _MALI_OSK_CPU_PAGE_SIZE on success.
+ *
+ * @param[in] size the number of bytes of physically contiguous memory to
+ * allocate. This must be a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return On success, a Mali IO address through which the mapped-in
+ * memory/registers can be accessed. NULL on failure, and (*phys) is unmodified.
+ */
+mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size );
+
+/** @brief Free a physically contiguous address range from kernel space.
+ *
+ * The address range should be one previously mapped in through
+ * _mali_osk_mem_allocioregion.
+ *
+ * It is a programming error to do (but not limited to) the following:
+ * - attempt a free twice on the same ioregion
+ * - free only part of a range obtained through _mali_osk_mem_allocioregion
+ * - free more than the range obtained through _mali_osk_mem_allocioregion
+ * - free an address range that was not successfully mapped using
+ * _mali_osk_mem_allocioregion
+ * - provide a mapping that does not map to phys.
+ *
+ * @param phys CPU-physical base address of the memory that was originally
+ * mapped in, which was aligned to _MALI_OSK_CPU_PAGE_SIZE.
+ * @param size The number of bytes that were originally mapped in, which was
+ * a multiple of _MALI_OSK_CPU_PAGE_SIZE.
+ * @param mapping The Mali IO address through which the mapping is
+ * accessed.
+ */
+void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address mapping );
+
+/** @brief Request a region of physically contiguous memory
+ *
+ * This is used to ensure exclusive access to a region of physically contigous
+ * memory.
+ *
+ * It is acceptable to implement this as a stub. However, it is then the job
+ * of the System Integrator to ensure that no other device driver will be using
+ * the physical address ranges used by Mali, while the Mali device driver is
+ * loaded.
+ *
+ * @param phys CPU-physical base address of the memory to request. This must
+ * be aligned to the system's page size, which is assumed to be 4K.
+ * @param size the number of bytes of physically contiguous address space to
+ * request.
+ * @param description A textual description of the memory being requested.
+ * @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description );
+
+/** @brief Un-request a region of physically contiguous memory
+ *
+ * This is used to release a regious of physically contiguous memory previously
+ * requested through _mali_osk_mem_reqregion, so that other device drivers may
+ * use it. This will be called at time of Mali device driver termination.
+ *
+ * It is a programming error to attempt to:
+ * - unrequest a region twice
+ * - unrequest only part of a range obtained through _mali_osk_mem_reqregion
+ * - unrequest more than the range obtained through _mali_osk_mem_reqregion
+ * - unrequest an address range that was not successfully requested using
+ * _mali_osk_mem_reqregion
+ *
+ * @param phys CPU-physical base address of the memory to un-request. This must
+ * be aligned to the system's page size, which is assumed to be 4K
+ * @param size the number of bytes of physically contiguous address space to
+ * un-request.
+ */
+void _mali_osk_mem_unreqregion( u32 phys, u32 size );
+
+/** @brief Read from a location currently mapped in through
+ * _mali_osk_mem_mapioregion
+ *
+ * This reads a 32-bit word from a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to read from memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to read from
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @return the 32-bit word from the specified location.
+ */
+u32 _mali_osk_mem_ioread32( volatile mali_io_address mapping, u32 offset );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion without memory barriers
+ *
+ * This write a 32-bit word to a 32-bit aligned location without using memory barrier.
+ * It is a programming error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val );
+
+/** @brief Write to a location currently mapped in through
+ * _mali_osk_mem_mapioregion with write memory barrier
+ *
+ * This write a 32-bit word to a 32-bit aligned location. It is a programming
+ * error to provide unaligned locations, or to write to memory that is not
+ * mapped in, or not mapped through either _mali_osk_mem_mapioregion() or
+ * _mali_osk_mem_allocioregion().
+ *
+ * @param mapping Mali IO address to write to
+ * @param offset Byte offset from the given IO address to operate on, must be a multiple of 4
+ * @param val the 32-bit word to write.
+ */
+void _mali_osk_mem_iowrite32( volatile mali_io_address mapping, u32 offset, u32 val );
+
+/** @brief Flush all CPU caches
+ *
+ * This should only be implemented if flushing of the cache is required for
+ * memory mapped in through _mali_osk_mem_mapregion.
+ */
+void _mali_osk_cache_flushall( void );
+
+/** @brief Flush any caches necessary for the CPU and MALI to have the same view of a range of uncached mapped memory
+ *
+ * This should only be implemented if your OS doesn't do a full cache flush (inner & outer)
+ * after allocating uncached mapped memory.
+ *
+ * Some OS do not perform a full cache flush (including all outer caches) for uncached mapped memory.
+ * They zero the memory through a cached mapping, then flush the inner caches but not the outer caches.
+ * This is required for MALI to have the correct view of the memory.
+ */
+void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size );
+
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+/** @addtogroup _mali_osk_notification
+ *
+ * User space notification framework
+ *
+ * Communication with user space of asynchronous events is performed through a
+ * synchronous call to the \ref u_k_api.
+ *
+ * Since the events are asynchronous, the events have to be queued until a
+ * synchronous U/K API call can be made by user-space. A U/K API call might also
+ * be received before any event has happened. Therefore the notifications the
+ * different subsystems wants to send to user space has to be queued for later
+ * reception, or a U/K API call has to be blocked until an event has occured.
+ *
+ * Typical uses of notifications are after running of jobs on the hardware or
+ * when changes to the system is detected that needs to be relayed to user
+ * space.
+ *
+ * After an event has occured user space has to be notified using some kind of
+ * message. The notification framework supports sending messages to waiting
+ * threads or queueing of messages until a U/K API call is made.
+ *
+ * The notification queue is a FIFO. There are no restrictions on the numbers
+ * of readers or writers in the queue.
+ *
+ * A message contains what user space needs to identifiy how to handle an
+ * event. This includes a type field and a possible type specific payload.
+ *
+ * A notification to user space is represented by a
+ * \ref _mali_osk_notification_t object. A sender gets hold of such an object
+ * using _mali_osk_notification_create(). The buffer given by the
+ * _mali_osk_notification_t::result_buffer field in the object is used to store
+ * any type specific data. The other fields are internal to the queue system
+ * and should not be touched.
+ *
+ * @{ */
+
+/** @brief Create a notification object
+ *
+ * Returns a notification object which can be added to the queue of
+ * notifications pending for user space transfer.
+ *
+ * The implementation will initialize all members of the
+ * \ref _mali_osk_notification_t object. In particular, the
+ * _mali_osk_notification_t::result_buffer member will be initialized to point
+ * to \a size bytes of storage, and that storage will be suitably aligned for
+ * storage of any structure. That is, the created buffer meets the same
+ * requirements as _mali_osk_malloc().
+ *
+ * The notification object must be deleted when not in use. Use
+ * _mali_osk_notification_delete() for deleting it.
+ *
+ * @note You \b must \b not call _mali_osk_free() on a \ref _mali_osk_notification_t,
+ * object, or on a _mali_osk_notification_t::result_buffer. You must only use
+ * _mali_osk_notification_delete() to free the resources assocaited with a
+ * \ref _mali_osk_notification_t object.
+ *
+ * @param type The notification type
+ * @param size The size of the type specific buffer to send
+ * @return Pointer to a notification object with a suitable buffer, or NULL on error.
+ */
+_mali_osk_notification_t *_mali_osk_notification_create( u32 type, u32 size );
+
+/** @brief Delete a notification object
+ *
+ * This must be called to reclaim the resources of a notification object. This
+ * includes:
+ * - The _mali_osk_notification_t::result_buffer
+ * - The \ref _mali_osk_notification_t itself.
+ *
+ * A notification object \b must \b not be used after it has been deleted by
+ * _mali_osk_notification_delete().
+ *
+ * In addition, the notification object may not be deleted while it is in a
+ * queue. That is, if it has been placed on a queue with
+ * _mali_osk_notification_queue_send(), then it must not be deleted until
+ * it has been received by a call to _mali_osk_notification_queue_receive().
+ * Otherwise, the queue may be corrupted.
+ *
+ * @param object the notification object to delete.
+ */
+void _mali_osk_notification_delete( _mali_osk_notification_t *object );
+
+/** @brief Create a notification queue
+ *
+ * Creates a notification queue which can be used to queue messages for user
+ * delivery and get queued messages from
+ *
+ * The queue is a FIFO, and has no restrictions on the numbers of readers or
+ * writers.
+ *
+ * When the queue is no longer in use, it must be terminated with
+ * \ref _mali_osk_notification_queue_term(). Failure to do so will result in a
+ * memory leak.
+ *
+ * @return Pointer to a new notification queue or NULL on error.
+ */
+_mali_osk_notification_queue_t *_mali_osk_notification_queue_init( void );
+
+/** @brief Destroy a notification queue
+ *
+ * Destroys a notification queue and frees associated resources from the queue.
+ *
+ * A notification queue \b must \b not be destroyed in the following cases:
+ * - while there are \ref _mali_osk_notification_t objects in the queue.
+ * - while there are writers currently acting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_send() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_send() on the queue in the future.
+ * - while there are readers currently waiting upon the queue. That is, while
+ * a thread is currently calling \ref _mali_osk_notification_queue_receive() on
+ * the queue, or while a thread may call
+ * \ref _mali_osk_notification_queue_receive() on the queue in the future.
+ *
+ * Therefore, all \ref _mali_osk_notification_t objects must be flushed and
+ * deleted by the code that makes use of the notification queues, since only
+ * they know the structure of the _mali_osk_notification_t::result_buffer
+ * (even if it may only be a flat sturcture).
+ *
+ * @note Since the queue is a FIFO, the code using notification queues may
+ * create its own 'flush' type of notification, to assist in flushing the
+ * queue.
+ *
+ * Once the queue has been destroyed, it must not be used again.
+ *
+ * @param queue The queue to destroy
+ */
+void _mali_osk_notification_queue_term( _mali_osk_notification_queue_t *queue );
+
+/** @brief Schedule notification for delivery
+ *
+ * When a \ref _mali_osk_notification_t object has been created successfully
+ * and set up, it may be added to the queue of objects waiting for user space
+ * transfer.
+ *
+ * The sending will not block if the queue is full.
+ *
+ * A \ref _mali_osk_notification_t object \b must \b not be put on two different
+ * queues at the same time, or enqueued twice onto a single queue before
+ * reception. However, it is acceptable for it to be requeued \em after reception
+ * from a call to _mali_osk_notification_queue_receive(), even onto the same queue.
+ *
+ * Again, requeuing must also not enqueue onto two different queues at the same
+ * time, or enqueue onto the same queue twice before reception.
+ *
+ * @param queue The notification queue to add this notification to
+ * @param object The entry to add
+ */
+void _mali_osk_notification_queue_send( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t *object );
+
+#if MALI_STATE_TRACKING
+/** @brief Receive a notification from a queue
+ *
+ * Check if a notification queue is empty.
+ *
+ * @param queue The queue to check.
+ * @return MALI_TRUE if queue is empty, otherwise MALI_FALSE.
+ */
+mali_bool _mali_osk_notification_queue_is_empty( _mali_osk_notification_queue_t *queue );
+#endif
+
+/** @brief Receive a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the thread will sleep until one becomes ready.
+ * Therefore, notifications may not be received into an
+ * IRQ or 'atomic' context (that is, a context where sleeping is disallowed).
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_RESTARTSYSCALL if the sleep was interrupted.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_receive( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @brief Dequeues a notification from a queue
+ *
+ * Receives a single notification from the given queue.
+ *
+ * If no notifciations are ready the function call will return an error code.
+ *
+ * @param queue The queue to receive from
+ * @param result Pointer to storage of a pointer of type
+ * \ref _mali_osk_notification_t*. \a result will be written to such that the
+ * expression \a (*result) will evaluate to a pointer to a valid
+ * \ref _mali_osk_notification_t object, or NULL if none were received.
+ * @return _MALI_OSK_ERR_OK on success, _MALI_OSK_ERR_ITEM_NOT_FOUND if queue was empty.
+ */
+_mali_osk_errcode_t _mali_osk_notification_queue_dequeue( _mali_osk_notification_queue_t *queue, _mali_osk_notification_t **result );
+
+/** @} */ /* end group _mali_osk_notification */
+
+
+/** @addtogroup _mali_osk_timer
+ *
+ * Timers use the OS's representation of time, which are 'ticks'. This is to
+ * prevent aliasing problems between the internal timer time, and the time
+ * asked for.
+ *
+ * @{ */
+
+/** @brief Initialize a timer
+ *
+ * Allocates resources for a new timer, and initializes them. This does not
+ * start the timer.
+ *
+ * @return a pointer to the allocated timer object, or NULL on failure.
+ */
+_mali_osk_timer_t *_mali_osk_timer_init(void);
+
+/** @brief Start a timer
+ *
+ * It is an error to start a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * It is an error to use this to start an already started timer.
+ *
+ * The timer will expire in \a ticks_to_expire ticks, at which point, the
+ * callback function will be invoked with the callback-specific data,
+ * as registered by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to start
+ * @param ticks_to_expire the amount of time in ticks for the timer to run
+ * before triggering.
+ */
+void _mali_osk_timer_add( _mali_osk_timer_t *tim, u32 ticks_to_expire );
+
+/** @brief Modify a timer
+ *
+ * Set the absolute time at which a timer will expire, and start it if it is
+ * stopped. If \a expiry_tick is in the past (determined by
+ * _mali_osk_time_after() ), the timer fires immediately.
+ *
+ * It is an error to modify a timer without setting the callback via
+ * _mali_osk_timer_setcallback().
+ *
+ * The timer will expire at absolute time \a expiry_tick, at which point, the
+ * callback function will be invoked with the callback-specific data, as set
+ * by _mali_osk_timer_setcallback().
+ *
+ * @param tim the timer to modify, and start if necessary
+ * @param expiry_tick the \em absolute time in ticks at which this timer should
+ * trigger.
+ *
+ */
+void _mali_osk_timer_mod( _mali_osk_timer_t *tim, u32 expiry_tick);
+
+/** @brief Stop a timer, and block on its completion.
+ *
+ * Stop the timer. When the function returns, it is guaranteed that the timer's
+ * callback will not be running on any CPU core.
+ *
+ * Since stoping the timer blocks on compeletion of the callback, the callback
+ * may not obtain any mutexes that the caller holds. Otherwise, a deadlock will
+ * occur.
+ *
+ * @note While the callback itself is guaranteed to not be running, work
+ * enqueued on the IRQ work-queue by the timer (with
+ * \ref _mali_osk_irq_schedulework()) may still run. The timer callback and IRQ
+ * bottom-half handler must take this into account.
+ *
+ * It is legal to stop an already stopped timer.
+ *
+ * @param tim the timer to stop.
+ *
+ */
+void _mali_osk_timer_del( _mali_osk_timer_t *tim );
+
+/** @brief Set a timer's callback parameters.
+ *
+ * This must be called at least once before a timer is started/modified.
+ *
+ * After a timer has been stopped or expires, the callback remains set. This
+ * means that restarting the timer will call the same function with the same
+ * parameters on expiry.
+ *
+ * @param tim the timer to set callback on.
+ * @param callback Function to call when timer expires
+ * @param data Function-specific data to supply to the function on expiry.
+ */
+void _mali_osk_timer_setcallback( _mali_osk_timer_t *tim, _mali_osk_timer_callback_t callback, void *data );
+
+/** @brief Terminate a timer, and deallocate resources.
+ *
+ * The timer must first be stopped by calling _mali_osk_timer_del().
+ *
+ * It is a programming error for _mali_osk_timer_term() to be called on:
+ * - timer that is currently running
+ * - a timer that is currently executing its callback.
+ *
+ * @param tim the timer to deallocate.
+ */
+void _mali_osk_timer_term( _mali_osk_timer_t *tim );
+/** @} */ /* end group _mali_osk_timer */
+
+
+/** @defgroup _mali_osk_time OSK Time functions
+ *
+ * \ref _mali_osk_time use the OS's representation of time, which are
+ * 'ticks'. This is to prevent aliasing problems between the internal timer
+ * time, and the time asked for.
+ *
+ * OS tick time is measured as a u32. The time stored in a u32 may either be
+ * an absolute time, or a time delta between two events. Whilst it is valid to
+ * use math opeartors to \em change the tick value represented as a u32, it
+ * is often only meaningful to do such operations on time deltas, rather than
+ * on absolute time. However, it is meaningful to add/subtract time deltas to
+ * absolute times.
+ *
+ * Conversion between tick time and milliseconds (ms) may not be loss-less,
+ * and are \em implementation \em depenedant.
+ *
+ * Code use OS time must take this into account, since:
+ * - a small OS time may (or may not) be rounded
+ * - a large time may (or may not) overflow
+ *
+ * @{ */
+
+/** @brief Return whether ticka occurs after tickb
+ *
+ * Some OSs handle tick 'rollover' specially, and so can be more robust against
+ * tick counters rolling-over. This function must therefore be called to
+ * determine if a time (in ticks) really occurs after another time (in ticks).
+ *
+ * @param ticka ticka
+ * @param tickb tickb
+ * @return non-zero if ticka represents a time that occurs after tickb.
+ * Zero otherwise.
+ */
+int _mali_osk_time_after( u32 ticka, u32 tickb );
+
+/** @brief Convert milliseconds to OS 'ticks'
+ *
+ * @param ms time interval in milliseconds
+ * @return the corresponding time interval in OS ticks.
+ */
+u32 _mali_osk_time_mstoticks( u32 ms );
+
+/** @brief Convert OS 'ticks' to milliseconds
+ *
+ * @param ticks time interval in OS ticks.
+ * @return the corresponding time interval in milliseconds
+ */
+u32 _mali_osk_time_tickstoms( u32 ticks );
+
+
+/** @brief Get the current time in OS 'ticks'.
+ * @return the current time in OS 'ticks'.
+ */
+u32 _mali_osk_time_tickcount( void );
+
+/** @brief Cause a microsecond delay
+ *
+ * The delay will have microsecond resolution, and is necessary for correct
+ * operation of the driver. At worst, the delay will be \b at least \a usecs
+ * microseconds, and so may be (significantly) more.
+ *
+ * This function may be implemented as a busy-wait, which is the most sensible
+ * implementation. On OSs where there are situations in which a thread must not
+ * sleep, this is definitely implemented as a busy-wait.
+ *
+ * @param usecs the number of microseconds to wait for.
+ */
+void _mali_osk_time_ubusydelay( u32 usecs );
+
+/** @brief Return time in nano seconds, since any given reference.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_time_get_ns( void );
+
+
+/** @} */ /* end group _mali_osk_time */
+
+/** @defgroup _mali_osk_math OSK Math
+ * @{ */
+
+/** @brief Count Leading Zeros (Little-endian)
+ *
+ * @note This function must be implemented to support the reference
+ * implementation of _mali_osk_find_first_zero_bit, as defined in
+ * mali_osk_bitops.h.
+ *
+ * @param val 32-bit words to count leading zeros on
+ * @return the number of leading zeros.
+ */
+u32 _mali_osk_clz( u32 val );
+/** @} */ /* end group _mali_osk_math */
+
+/** @defgroup _mali_osk_wait_queue OSK Wait Queue functionality
+ * @{ */
+/** @brief Private type for wait queue objects */
+typedef struct _mali_osk_wait_queue_t_struct _mali_osk_wait_queue_t;
+
+/** @brief Initialize an empty Wait Queue */
+_mali_osk_wait_queue_t* _mali_osk_wait_queue_init( void );
+
+/** @brief Sleep if condition is false
+ *
+ * @param queue the queue to use
+ * @param condition function pointer to a boolean function
+ *
+ * Put thread to sleep if the given \a codition function returns false. When
+ * being asked to wake up again, the condition will be re-checked and the
+ * thread only woken up if the condition is now true.
+ */
+void _mali_osk_wait_queue_wait_event( _mali_osk_wait_queue_t *queue, mali_bool (*condition)(void) );
+
+/** @brief Wake up all threads in wait queue if their respective conditions are
+ * true
+ *
+ * @param queue the queue whose threads should be woken up
+ *
+ * Wake up all threads in wait queue \a queue whose condition is now true.
+ */
+void _mali_osk_wait_queue_wake_up( _mali_osk_wait_queue_t *queue );
+
+/** @brief terminate a wait queue
+ *
+ * @param queue the queue to terminate.
+ */
+void _mali_osk_wait_queue_term( _mali_osk_wait_queue_t *queue );
+/** @} */ /* end group _mali_osk_wait_queue */
+
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Output a device driver debug message.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ */
+void _mali_osk_dbgmsg( const char *fmt, ... );
+
+/** @brief Print fmt into buf.
+ *
+ * The interpretation of \a fmt is the same as the \c format parameter in
+ * _mali_osu_vsnprintf().
+ *
+ * @param buf a pointer to the result buffer
+ * @param size the total number of bytes allowed to write to \a buf
+ * @param fmt a _mali_osu_vsnprintf() style format string
+ * @param ... a variable-number of parameters suitable for \a fmt
+ * @return The number of bytes written to \a buf
+ */
+u32 _mali_osk_snprintf( char *buf, u32 size, const char *fmt, ... );
+
+/** @brief Abnormal process abort.
+ *
+ * Terminates the caller-process if this function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h.
+ *
+ * This function will never return - because to continue from a Debug assert
+ * could cause even more problems, and hinder debugging of the initial problem.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_abort(void);
+
+/** @brief Sets breakpoint at point where function is called.
+ *
+ * This function will be called from Debug assert-macros in mali_kernel_common.h,
+ * to assist in debugging. If debugging at this level is not required, then this
+ * function may be implemented as a stub.
+ *
+ * This function is only used in Debug builds, and is not used in Release builds.
+ */
+void _mali_osk_break(void);
+
+/** @brief Return an identificator for calling process.
+ *
+ * @return Identificator for calling process.
+ */
+u32 _mali_osk_get_pid(void);
+
+/** @brief Return an identificator for calling thread.
+ *
+ * @return Identificator for calling thread.
+ */
+u32 _mali_osk_get_tid(void);
+
+/** @brief Enable OS controlled runtime power management
+ */
+void _mali_osk_pm_dev_enable(void);
+
+/** @brief Tells the OS that device is now idle
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_idle(void);
+
+/** @brief Tells the OS that the device is about to become active
+ */
+_mali_osk_errcode_t _mali_osk_pm_dev_activate(void);
+
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @} */ /* end group osuapi */
+
+/** @} */ /* end group uddapi */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#include "mali_osk_specific.h" /* include any per-os specifics */
+
+/* Check standard inlines */
+#ifndef MALI_STATIC_INLINE
+ #error MALI_STATIC_INLINE not defined on your OS
+#endif
+
+#ifndef MALI_NON_STATIC_INLINE
+ #error MALI_NON_STATIC_INLINE not defined on your OS
+#endif
+
+#endif /* __MALI_OSK_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_osk_bitops.h b/drivers/media/video/samsung/mali/common/mali_osk_bitops.h
new file mode 100644
index 0000000..ada1488
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk_bitops.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2010, 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_bitops.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_BITOPS_H__
+#define __MALI_OSK_BITOPS_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void _mali_internal_clear_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) &= ~(1 << bit);
+}
+
+MALI_STATIC_INLINE void _mali_internal_set_bit( u32 bit, u32 *addr )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ MALI_DEBUG_ASSERT( NULL != addr );
+
+ (*addr) |= (1 << bit);
+}
+
+MALI_STATIC_INLINE u32 _mali_internal_test_bit( u32 bit, u32 value )
+{
+ MALI_DEBUG_ASSERT( bit < 32 );
+ return value & (1 << bit);
+}
+
+MALI_STATIC_INLINE int _mali_internal_find_first_zero_bit( u32 value )
+{
+ u32 inverted;
+ u32 negated;
+ u32 isolated;
+ u32 leading_zeros;
+
+ /* Begin with xxx...x0yyy...y, where ys are 1, number of ys is in range 0..31 */
+ inverted = ~value; /* zzz...z1000...0 */
+ /* Using count_trailing_zeros on inverted value -
+ * See ARM System Developers Guide for details of count_trailing_zeros */
+
+ /* Isolate the zero: it is preceeded by a run of 1s, so add 1 to it */
+ negated = (u32)-inverted ; /* -a == ~a + 1 (mod 2^n) for n-bit numbers */
+ /* negated = xxx...x1000...0 */
+
+ isolated = negated & inverted ; /* xxx...x1000...0 & zzz...z1000...0, zs are ~xs */
+ /* And so the first zero bit is in the same position as the 1 == number of 1s that preceeded it
+ * Note that the output is zero if value was all 1s */
+
+ leading_zeros = _mali_osk_clz( isolated );
+
+ return 31 - leading_zeros;
+}
+
+
+/** @defgroup _mali_osk_bitops OSK Non-atomic Bit-operations
+ * @{ */
+
+/**
+ * These bit-operations do not work atomically, and so locks must be used if
+ * atomicity is required.
+ *
+ * Reference implementations for Little Endian are provided, and so it should
+ * not normally be necessary to re-implement these. Efficient bit-twiddling
+ * techniques are used where possible, implemented in portable C.
+ *
+ * Note that these reference implementations rely on _mali_osk_clz() being
+ * implemented.
+ */
+
+/** @brief Clear a bit in a sequence of 32-bit words
+ * @param nr bit number to clear, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_clear_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_clear_bit( nr, addr );
+}
+
+/** @brief Set a bit in a sequence of 32-bit words
+ * @param nr bit number to set, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ */
+MALI_STATIC_INLINE void _mali_osk_set_nonatomic_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ _mali_internal_set_bit( nr, addr );
+}
+
+/** @brief Test a bit in a sequence of 32-bit words
+ * @param nr bit number to test, starting from the (Little-endian) least
+ * significant bit
+ * @param addr starting point for counting.
+ * @return zero if bit was clear, non-zero if set. Do not rely on the return
+ * value being related to the actual word under test.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_test_bit( u32 nr, u32 *addr )
+{
+ addr += nr >> 5; /* find the correct word */
+ nr = nr & ((1 << 5)-1); /* The bit number within the word */
+
+ return _mali_internal_test_bit( nr, *addr );
+}
+
+/* Return maxbit if not found */
+/** @brief Find the first zero bit in a sequence of 32-bit words
+ * @param addr starting point for search.
+ * @param maxbit the maximum number of bits to search
+ * @return the number of the first zero bit found, or maxbit if none were found
+ * in the specified range.
+ */
+MALI_STATIC_INLINE u32 _mali_osk_find_first_zero_bit( const u32 *addr, u32 maxbit )
+{
+ u32 total;
+
+ for ( total = 0; total < maxbit; total += 32, ++addr )
+ {
+ int result;
+ result = _mali_internal_find_first_zero_bit( *addr );
+
+ /* non-negative signifies the bit was found */
+ if ( result >= 0 )
+ {
+ total += (u32)result;
+ break;
+ }
+ }
+
+ /* Now check if we reached maxbit or above */
+ if ( total >= maxbit )
+ {
+ total = maxbit;
+ }
+
+ return total; /* either the found bit nr, or maxbit if not found */
+}
+/** @} */ /* end group _mali_osk_bitops */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_BITOPS_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_osk_list.h b/drivers/media/video/samsung/mali/common/mali_osk_list.h
new file mode 100644
index 0000000..5987b0a
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk_list.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_list.h
+ * Implementation of the OS abstraction layer for the kernel device driver
+ */
+
+#ifndef __MALI_OSK_LIST_H__
+#define __MALI_OSK_LIST_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+MALI_STATIC_INLINE void __mali_osk_list_add(_mali_osk_list_t *new_entry, _mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = new_entry;
+ new_entry->next = next;
+ new_entry->prev = prev;
+ prev->next = new_entry;
+}
+
+MALI_STATIC_INLINE void __mali_osk_list_del(_mali_osk_list_t *prev, _mali_osk_list_t *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/** @addtogroup _mali_osk_list
+ * @{ */
+
+/** Reference implementations of Doubly-linked Circular Lists are provided.
+ * There is often no need to re-implement these.
+ *
+ * @note The implementation may differ subtly from any lists the OS provides.
+ * For this reason, these lists should not be mixed with OS-specific lists
+ * inside the OSK/UKK implementation. */
+
+/** @brief Initialize a list element.
+ *
+ * All list elements must be initialized before use.
+ *
+ * Do not use on any list element that is present in a list without using
+ * _mali_osk_list_del first, otherwise this will break the list.
+ *
+ * @param list the list element to initialize
+ */
+MALI_STATIC_INLINE void _mali_osk_list_init( _mali_osk_list_t *list )
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/** @brief Insert a single list element after an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the first element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the next
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_add( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list, list->next);
+}
+
+/** @brief Insert a single list element before an entry in a list
+ *
+ * As an example, if this is inserted to the head of a list, then this becomes
+ * the last element of the list.
+ *
+ * Do not use to move list elements from one list to another, as it will break
+ * the originating list.
+ *
+ * @param newlist the list element to insert
+ * @param list the list in which to insert. The new element will be the previous
+ * entry in this list
+ */
+MALI_STATIC_INLINE void _mali_osk_list_addtail( _mali_osk_list_t *new_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_add(new_entry, list->prev, list);
+}
+
+/** @brief Remove a single element from a list
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will be uninitialized, and so should not be traversed. It must be
+ * initialized before further use.
+ *
+ * @param list the list element to remove.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_del( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+}
+
+/** @brief Remove a single element from a list, and re-initialize it
+ *
+ * The element will no longer be present in the list. The removed list element
+ * will initialized, and so can be used as normal.
+ *
+ * @param list the list element to remove and initialize.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_delinit( _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(list->prev, list->next);
+ _mali_osk_list_init(list);
+}
+
+/** @brief Determine whether a list is empty.
+ *
+ * An empty list is one that contains a single element that points to itself.
+ *
+ * @param list the list to check.
+ * @return non-zero if the list is empty, and zero otherwise.
+ */
+MALI_STATIC_INLINE int _mali_osk_list_empty( _mali_osk_list_t *list )
+{
+ return list->next == list;
+}
+
+/** @brief Move a list element from one list to another.
+ *
+ * The list element must be initialized.
+ *
+ * As an example, moving a list item to the head of a new list causes this item
+ * to be the first element in the new list.
+ *
+ * @param move the list element to move
+ * @param list the new list into which the element will be inserted, as the next
+ * element in the list.
+ */
+MALI_STATIC_INLINE void _mali_osk_list_move( _mali_osk_list_t *move_entry, _mali_osk_list_t *list )
+{
+ __mali_osk_list_del(move_entry->prev, move_entry->next);
+ _mali_osk_list_add(move_entry, list);
+}
+
+/** @brief Join two lists
+ *
+ * The list element must be initialized.
+ *
+ * Allows you to join a list into another list at a specific location
+ *
+ * @param list the new list to add
+ * @param at the location in a list to add the new list into
+ */
+MALI_STATIC_INLINE void _mali_osk_list_splice( _mali_osk_list_t *list, _mali_osk_list_t *at )
+{
+ if (!_mali_osk_list_empty(list))
+ {
+ /* insert all items from 'list' after 'at' */
+ _mali_osk_list_t *first = list->next;
+ _mali_osk_list_t *last = list->prev;
+ _mali_osk_list_t *split = at->next;
+
+ first->prev = at;
+ at->next = first;
+
+ last->next = split;
+ split->prev = last;
+ }
+}
+/** @} */ /* end group _mali_osk_list */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_LIST_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_osk_mali.h b/drivers/media/video/samsung/mali/common/mali_osk_mali.h
new file mode 100644
index 0000000..427fcc8
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk_mali.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_osk_mali.h
+ * Defines the OS abstraction layer which is specific for the Mali kernel device driver (OSK)
+ */
+
+#ifndef __MALI_OSK_MALI_H__
+#define __MALI_OSK_MALI_H__
+
+#include <mali_osk.h>
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/** @addtogroup _mali_osk_miscellaneous
+ * @{ */
+
+/** @brief Read the Mali Resource configuration
+ *
+ * Populates a _mali_arch_resource_t array from configuration settings, which
+ * are stored in an OS-specific way.
+ *
+ * For example, these may be compiled in to a static structure, or read from
+ * the filesystem at startup.
+ *
+ * On failure, do not call _mali_osk_resources_term.
+ *
+ * @param arch_config a pointer to the store the pointer to the resources
+ * @param num_resources the number of resources read
+ * @return _MALI_OSK_ERR_OK on success. _MALI_OSK_ERR_NOMEM on allocation
+ * error. For other failures, a suitable _mali_osk_errcode_t is returned.
+ */
+_mali_osk_errcode_t _mali_osk_resources_init( _mali_osk_resource_t **arch_config, u32 *num_resources );
+
+/** @brief Free resources allocated by _mali_osk_resources_init.
+ *
+ * Frees the _mali_arch_resource_t array allocated by _mali_osk_resources_init
+ *
+ * @param arch_config a pointer to the stored the pointer to the resources
+ * @param num_resources the number of resources in the array
+ */
+void _mali_osk_resources_term( _mali_osk_resource_t **arch_config, u32 num_resources);
+/** @} */ /* end group _mali_osk_miscellaneous */
+
+/** @addtogroup _mali_osk_low_level_memory
+ * @{ */
+
+/** @brief Initialize a user-space accessible memory range
+ *
+ * This initializes a virtual address range such that it is reserved for the
+ * current process, but does not map any physical pages into this range.
+ *
+ * This function may initialize or adjust any members of the
+ * mali_memory_allocation \a descriptor supplied, before the physical pages are
+ * mapped in with _mali_osk_mem_mapregion_map().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The \a descriptor's process_addr_mapping_info member can be modified to
+ * allocate OS-specific information. Note that on input, this will be a
+ * ukk_private word from the U/K inteface, as inserted by _mali_ukk_mem_mmap().
+ * This is used to pass information from the U/K interface to the OSK interface,
+ * if necessary. The precise usage of the process_addr_mapping_info member
+ * depends on the U/K implementation of _mali_ukk_mem_mmap().
+ *
+ * Therefore, the U/K implementation of _mali_ukk_mem_mmap() and the OSK
+ * implementation of _mali_osk_mem_mapregion_init() must agree on the meaning and
+ * usage of the ukk_private word and process_addr_mapping_info member.
+ *
+ * Refer to \ref u_k_api for more information on the U/K interface.
+ *
+ * On successful return, \a descriptor's mapping member will be correct for
+ * use with _mali_osk_mem_mapregion_term() and _mali_osk_mem_mapregion_map().
+ *
+ * @param descriptor the mali_memory_allocation to initialize.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor );
+
+/** @brief Terminate a user-space accessible memory range
+ *
+ * This terminates a virtual address range reserved in the current user process,
+ * where none, some or all of the virtual address ranges have mappings to
+ * physical pages.
+ *
+ * It will unmap any physical pages that had been mapped into a reserved
+ * virtual address range for the current process, and then releases the virtual
+ * address range. Any extra book-keeping information or resources allocated
+ * during _mali_osk_mem_mapregion_init() will also be released.
+ *
+ * The \a descriptor itself is not freed - this must be handled by the caller of
+ * _mali_osk_mem_mapregion_term().
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param descriptor the mali_memory_allocation to terminate.
+ */
+void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor );
+
+/** @brief Map physical pages into a user process's virtual address range
+ *
+ * This is used to map a number of physically contigous pages into a
+ * user-process's virtual address range, which was previously reserved by a
+ * call to _mali_osk_mem_mapregion_init().
+ *
+ * This need not provide a mapping for the entire virtual address range
+ * reserved for \a descriptor - it may be used to map single pages per call.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * The function may supply \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC.
+ * In this case, \a size must be set to \ref _MALI_OSK_CPU_PAGE_SIZE, and the function
+ * will allocate the physical page itself. The physical address of the
+ * allocated page will be returned through \a phys_addr.
+ *
+ * It is an error to set \a size != \ref _MALI_OSK_CPU_PAGE_SIZE while
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC,
+ * since it is not always possible for OSs to support such a setting through this
+ * interface.
+ *
+ * @note \b IMPORTANT: This code must validate the input parameters. If the
+ * range defined by \a offset and \a size is outside the range allocated in
+ * \a descriptor, then this function \b MUST not attempt any mapping, and must
+ * instead return a suitable \ref _mali_osk_errcode_t \b failure code.
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor, and not the \a phys_addr parameter.
+ * It must be a multiple of \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in,out] phys_addr a pointer to the physical base address to begin the
+ * mapping from. If \a size == \ref _MALI_OSK_CPU_PAGE_SIZE and
+ * \a *phys_addr == \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, then this
+ * function will allocate the physical page itself, and return the
+ * physical address of the page through \a phys_addr, which will be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE. Otherwise, \a *phys_addr must be aligned to
+ * \ref _MALI_OSK_CPU_PAGE_SIZE, and is unmodified after the call.
+ * \a phys_addr is unaffected by the \a offset parameter.
+ *
+ * @param[in] size the number of bytes to map in. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @return _MALI_OSK_ERR_OK on sucess, otherwise a _mali_osk_errcode_t value
+ * on failure
+ *
+ * @note could expand to use _mali_osk_mem_mapregion_flags_t instead of
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC, but note that we must
+ * also modify the mali process address manager in the mmu/memory engine code.
+ */
+_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size );
+
+
+/** @brief Unmap physical pages from a user process's virtual address range
+ *
+ * This is used to unmap a number of physically contigous pages from a
+ * user-process's virtual address range, which were previously mapped by a
+ * call to _mali_osk_mem_mapregion_map(). If the range specified was allocated
+ * from OS memory, then that memory will be returned to the OS. Whilst pages
+ * will be mapped out, the Virtual address range remains reserved, and at the
+ * same base address.
+ *
+ * When this function is used to unmap pages from OS memory
+ * (_mali_osk_mem_mapregion_map() was called with *phys_addr ==
+ * \ref MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC), then the \a flags must
+ * include \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR. This is because
+ * it is not always easy for an OS implementation to discover whether the
+ * memory was OS allocated or not (and so, how it should release the memory).
+ *
+ * For this reason, only a range of pages of the same allocation type (all OS
+ * allocated, or none OS allocacted) may be unmapped in one call. Multiple
+ * calls must be made if allocations of these different types exist across the
+ * entire region described by the \a descriptor.
+ *
+ * The function will always be called with MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE
+ * set in \a descriptor->flags. It is an error to call this function without
+ * setting this flag. Otherwise, \a descriptor->flags bits are reserved for
+ * future expansion
+ *
+ * @param[in,out] descriptor the mali_memory_allocation representing the
+ * user-process's virtual address range to map into.
+ *
+ * @param[in] offset the offset into the virtual address range. This is only added
+ * to the mapping member of the \a descriptor. \a offset must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] size the number of bytes to unmap. This must be a multiple of
+ * \ref _MALI_OSK_CPU_PAGE_SIZE.
+ *
+ * @param[in] flags specifies how the memory should be unmapped. For a range
+ * of pages that were originally OS allocated, this must have
+ * \ref _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR set.
+ */
+void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags );
+/** @} */ /* end group _mali_osk_low_level_memory */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_OSK_MALI_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_osk_profiling.h b/drivers/media/video/samsung/mali/common/mali_osk_profiling.h
new file mode 100644
index 0000000..fd9a8fb
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_osk_profiling.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_OSK_PROFILING_H__
+#define __MALI_OSK_PROFILING_H__
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+
+#if defined (CONFIG_TRACEPOINTS) && !MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+#include "mali_linux_trace.h"
+#endif /* CONFIG_TRACEPOINTS && !MALI_INTERNAL_TIMELINE_PROFILING_ENABLED */
+
+#include "mali_profiling_events.h"
+
+#define MALI_PROFILING_MAX_BUFFER_ENTRIES 1048576
+
+#define MALI_PROFILING_NO_HW_COUNTER = ((u32)-1)
+
+/** @defgroup _mali_osk_profiling External profiling connectivity
+ * @{ */
+
+/**
+ * Initialize the profiling module.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start);
+
+/*
+ * Terminate the profiling module.
+ */
+void _mali_osk_profiling_term(void);
+
+/**
+ * Start recording profiling data
+ *
+ * The specified limit will determine how large the capture buffer is.
+ * MALI_PROFILING_MAX_BUFFER_ENTRIES determines the maximum size allowed by the device driver.
+ *
+ * @param limit The desired maximum number of events to record on input, the actual maximum on output.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit);
+
+/**
+ * Add an profiling event
+ *
+ * @param event_id The event identificator.
+ * @param data0 First data parameter, depending on event_id specified.
+ * @param data1 Second data parameter, depending on event_id specified.
+ * @param data2 Third data parameter, depending on event_id specified.
+ * @param data3 Fourth data parameter, depending on event_id specified.
+ * @param data4 Fifth data parameter, depending on event_id specified.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+#if defined (CONFIG_TRACEPOINTS) && !MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_add_event(event_id, data0, data1, data2, data3, data4) trace_mali_timeline_event((event_id), (data0), (data1), (data2), (data3), (data4))
+#else
+/* Internal profiling is handled like a plain function call */
+void _mali_osk_profiling_add_event(u32 event_id, u32 data0, u32 data1, u32 data2, u32 data3, u32 data4);
+#endif
+
+/**
+ * Report a hardware counter event.
+ *
+ * @param counter_id The ID of the counter.
+ * @param value The value of the counter.
+ */
+
+#if defined (CONFIG_TRACEPOINTS) && !MALI_INTERNAL_TIMELINE_PROFILING_ENABLED
+/* Call Linux tracepoint directly */
+#define _mali_osk_profiling_report_hw_counter(counter_id, value) trace_mali_hw_counter(counter_id, value)
+#else
+/* Internal profiling is handled like a plain function call */
+void _mali_osk_profiling_report_hw_counter(u32 counter_id, u32 value);
+#endif
+
+/**
+ * Report SW counters
+ *
+ * @param counters array of counter values
+ */
+void _mali_osk_profiling_report_sw_counters(u32 *counters);
+
+/**
+ * Stop recording profiling data
+ *
+ * @param count Returns the number of recorded events.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_stop(u32 * count);
+
+/**
+ * Retrieves the number of events that can be retrieved
+ *
+ * @return The number of recorded events that can be retrieved.
+ */
+u32 _mali_osk_profiling_get_count(void);
+
+/**
+ * Retrieve an event
+ *
+ * @param index Event index (start with 0 and continue until this function fails to retrieve all events)
+ * @param timestamp The timestamp for the retrieved event will be stored here.
+ * @param event_id The event ID for the retrieved event will be stored here.
+ * @param data The 5 data values for the retrieved event will be stored here.
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5]);
+
+/**
+ * Clear the recorded buffer.
+ *
+ * This is needed in order to start another recording.
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_profiling_clear(void);
+
+/**
+ * Checks if a recording of profiling data is in progress
+ *
+ * @return MALI_TRUE if recording of profiling data is in progress, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_is_recording(void);
+
+/**
+ * Checks if profiling data is available for retrival
+ *
+ * @return MALI_TRUE if profiling data is avaiable, MALI_FALSE if not
+ */
+mali_bool _mali_osk_profiling_have_recording(void);
+
+/** @} */ /* end group _mali_osk_profiling */
+
+#endif /* MALI_TIMELINE_PROFILING_ENABLED */
+
+#endif /* __MALI_OSK_PROFILING_H__ */
+
+
diff --git a/drivers/media/video/samsung/mali/common/mali_pm.c b/drivers/media/video/samsung/mali/common/mali_pm.c
new file mode 100644
index 0000000..933e54e
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pm.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pm.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_gp_scheduler.h"
+#include "mali_pp_scheduler.h"
+#include "mali_platform.h"
+#include "mali_kernel_utilization.h"
+#include "mali_kernel_core.h"
+#include "mali_group.h"
+
+#define MALI_PM_LIGHT_SLEEP_TIMEOUT 1000
+
+enum mali_pm_scheme
+{
+ MALI_PM_SCHEME_DYNAMIC,
+ MALI_PM_SCHEME_OS_SUSPENDED,
+ MALI_PM_SCHEME_ALWAYS_ON
+};
+
+enum mali_pm_level
+{
+ MALI_PM_LEVEL_1_ON,
+ MALI_PM_LEVEL_2_STANDBY,
+ MALI_PM_LEVEL_3_LIGHT_SLEEP,
+ MALI_PM_LEVEL_4_DEEP_SLEEP
+};
+static _mali_osk_lock_t *mali_pm_lock_set_next_state;
+static _mali_osk_lock_t *mali_pm_lock_set_core_states;
+static _mali_osk_lock_t *mali_pm_lock_execute_state_change;
+static _mali_osk_irq_t *wq_irq;
+
+static _mali_osk_timer_t *idle_timer = NULL;
+static mali_bool idle_timer_running = MALI_FALSE;
+static u32 mali_pm_event_number = 0;
+
+static u32 num_active_gps = 0;
+static u32 num_active_pps = 0;
+
+static enum mali_pm_scheme current_scheme = MALI_PM_SCHEME_DYNAMIC;
+static enum mali_pm_level current_level = MALI_PM_LEVEL_1_ON;
+static enum mali_pm_level next_level_dynamic = MALI_PM_LEVEL_2_STANDBY; /* Should be the state we go to when we go out of MALI_PM_SCHEME_ALWAYS_ON during init */
+
+
+
+static _mali_osk_errcode_t mali_pm_upper_half(void *data);
+static void mali_pm_bottom_half(void *data);
+static void mali_pm_powerup(void);
+static void mali_pm_powerdown(mali_power_mode power_mode);
+
+static void timeout_light_sleep(void* arg);
+#if 0
+/* Deep sleep timout not supported */
+static void timeout_deep_sleep(void* arg);
+#endif
+static u32 mali_pm_event_number_get(void);
+static void mali_pm_event(enum mali_pm_event pm_event, mali_bool schedule_work, u32 timer_time );
+
+_mali_osk_errcode_t mali_pm_initialize(void)
+{
+ mali_pm_lock_execute_state_change = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_ORDERED |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PM_EXECUTE);
+
+ if (NULL != mali_pm_lock_execute_state_change )
+ {
+ mali_pm_lock_set_next_state = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ONELOCK| _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_LAST);
+
+ if (NULL != mali_pm_lock_set_next_state)
+ {
+ mali_pm_lock_set_core_states = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_PM_CORE_STATE);
+
+ if (NULL != mali_pm_lock_set_core_states)
+ {
+ idle_timer = _mali_osk_timer_init();
+ if (NULL != idle_timer)
+ {
+ wq_irq = _mali_osk_irq_init(_MALI_OSK_IRQ_NUMBER_PMM,
+ mali_pm_upper_half,
+ mali_pm_bottom_half,
+ NULL,
+ NULL,
+ (void *)NULL,
+ "Mali PM deferred work");
+ if (NULL != wq_irq)
+ {
+ if (_MALI_OSK_ERR_OK == mali_platform_init())
+ {
+#if MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ _mali_osk_pm_dev_enable();
+ mali_pm_powerup();
+#endif
+ return _MALI_OSK_ERR_OK;
+ }
+
+ _mali_osk_irq_term(wq_irq);
+ }
+
+ _mali_osk_timer_del(idle_timer);
+ _mali_osk_timer_term(idle_timer);
+ }
+ _mali_osk_lock_term(mali_pm_lock_set_core_states);
+ }
+ _mali_osk_lock_term(mali_pm_lock_set_next_state);
+ }
+ _mali_osk_lock_term(mali_pm_lock_execute_state_change);
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+void mali_pm_terminate(void)
+{
+ mali_platform_deinit();
+ _mali_osk_irq_term(wq_irq);
+ _mali_osk_timer_del(idle_timer);
+ _mali_osk_timer_term(idle_timer);
+ _mali_osk_lock_term(mali_pm_lock_execute_state_change);
+ _mali_osk_lock_term(mali_pm_lock_set_next_state);
+ _mali_osk_lock_term(mali_pm_lock_set_core_states);
+}
+
+
+inline void mali_pm_lock(void)
+{
+ _mali_osk_lock_wait(mali_pm_lock_set_next_state, _MALI_OSK_LOCKMODE_RW);
+}
+
+inline void mali_pm_unlock(void)
+{
+ _mali_osk_lock_signal(mali_pm_lock_set_next_state, _MALI_OSK_LOCKMODE_RW);
+}
+
+inline void mali_pm_execute_state_change_lock(void)
+{
+ _mali_osk_lock_wait(mali_pm_lock_execute_state_change,_MALI_OSK_LOCKMODE_RW);
+}
+
+inline void mali_pm_execute_state_change_unlock(void)
+{
+ _mali_osk_lock_signal(mali_pm_lock_execute_state_change, _MALI_OSK_LOCKMODE_RW);
+}
+
+static void mali_pm_powerup(void)
+{
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ MALI_DEBUG_PRINT(3, ("Mali PM: Setting GPU power mode to MALI_POWER_MODE_ON\n"));
+ mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+#else
+ /* Aquire our reference */
+ _mali_osk_pm_dev_activate();
+#endif
+ mali_group_power_on();
+}
+
+static void mali_pm_powerdown(mali_power_mode power_mode)
+{
+ if ( (MALI_PM_LEVEL_1_ON == current_level) || (MALI_PM_LEVEL_2_STANDBY == current_level) )
+ {
+ mali_group_power_off();
+ }
+
+#if !MALI_PMM_RUNTIME_JOB_CONTROL_ON
+ mali_platform_power_mode_change(power_mode);
+#else
+ _mali_osk_pm_dev_idle();
+
+ if (MALI_POWER_MODE_DEEP_SLEEP == power_mode)
+ {
+ mali_platform_power_mode_change(power_mode);
+ }
+#endif
+}
+
+mali_bool mali_pm_is_powered_on(void)
+{
+ mali_bool is_on = MALI_TRUE;
+
+ if( ! (MALI_PM_SCHEME_ALWAYS_ON == current_scheme || MALI_PM_SCHEME_DYNAMIC == current_scheme) )
+ {
+ is_on = MALI_FALSE;
+ }
+ else if ( ! (MALI_PM_LEVEL_1_ON == current_level || MALI_PM_LEVEL_2_STANDBY == current_level))
+ {
+ is_on = MALI_FALSE;
+ }
+ else if ( ! (MALI_PM_LEVEL_1_ON == next_level_dynamic || MALI_PM_LEVEL_2_STANDBY == next_level_dynamic))
+ {
+ is_on = MALI_FALSE;
+ }
+
+ return is_on;
+}
+
+MALI_DEBUG_CODE(
+static const char *state_as_string(enum mali_pm_level level)
+{
+ switch(level)
+ {
+ case MALI_PM_LEVEL_1_ON:
+ return "MALI_PM_LEVEL_1_ON";
+ case MALI_PM_LEVEL_2_STANDBY:
+ return "MALI_PM_LEVEL_2_STANDBY";
+ case MALI_PM_LEVEL_3_LIGHT_SLEEP:
+ return "MALI_PM_LEVEL_3_LIGHT_SLEEP";
+ case MALI_PM_LEVEL_4_DEEP_SLEEP:
+ return "MALI_PM_LEVEL_4_DEEP_SLEEP";
+ default:
+ return "UNKNOWN LEVEL";
+ }
+});
+
+/* This could be used from another thread (work queue), if we need that */
+static void mali_pm_process_next(void)
+{
+ enum mali_pm_level pm_level_to_set;
+
+ _mali_osk_lock_wait(mali_pm_lock_execute_state_change, _MALI_OSK_LOCKMODE_RW);
+
+ pm_level_to_set = current_level;
+
+ if (MALI_PM_SCHEME_DYNAMIC == current_scheme)
+ {
+ pm_level_to_set = next_level_dynamic;
+
+ MALI_DEBUG_PRINT(4, ("Mali PM: Dynamic scheme; Changing Mali GPU power state from %s to: %s\n", state_as_string(current_level), state_as_string(pm_level_to_set)));
+
+ if (current_level == pm_level_to_set)
+ {
+ goto end_function; /* early out, no change in power level */
+ }
+
+ /* Start timers according to new state, so we get STANDBY -> LIGHT_SLEEP -> DEEP_SLEEP */
+
+ if (MALI_TRUE == idle_timer_running)
+ {
+ /* There is an existing timeout, so delete it */
+ _mali_osk_timer_del(idle_timer);
+ idle_timer_running = MALI_FALSE;
+ }
+
+ /* Making sure that we turn on through the platform file
+ Since it was turned OFF directly through the platform file.
+ This might lead to double turn-on, but the plaform file supports that.*/
+ if ( current_level == MALI_PM_LEVEL_4_DEEP_SLEEP)
+ {
+ mali_pm_powerup();
+ mali_kernel_core_wakeup();
+
+ }
+ if (MALI_PM_LEVEL_1_ON == pm_level_to_set)
+ {
+ if (MALI_PM_LEVEL_2_STANDBY != current_level)
+ {
+ /* We only need to do anything if we came from one of the sleeping states */
+ mali_pm_powerup();
+
+ /* Wake up Mali cores since we came from a sleep state */
+ mali_kernel_core_wakeup();
+ }
+ }
+ else if (MALI_PM_LEVEL_2_STANDBY == pm_level_to_set)
+ {
+ /* This is just an internal state, so we don't bother to report it to the platform file */
+ idle_timer_running = MALI_TRUE;
+ _mali_osk_timer_setcallback(idle_timer, timeout_light_sleep, (void*) mali_pm_event_number_get());
+ _mali_osk_timer_add(idle_timer, _mali_osk_time_mstoticks(MALI_PM_LIGHT_SLEEP_TIMEOUT));
+ }
+ else if (MALI_PM_LEVEL_3_LIGHT_SLEEP == pm_level_to_set)
+ {
+ mali_pm_powerdown(MALI_POWER_MODE_LIGHT_SLEEP);
+ }
+ else if (MALI_PM_LEVEL_4_DEEP_SLEEP == pm_level_to_set)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali PM: Setting GPU power mode to MALI_POWER_MODE_DEEP_SLEEP\n"));
+ mali_pm_powerdown(MALI_POWER_MODE_DEEP_SLEEP);
+ }
+ }
+ else if (MALI_PM_SCHEME_OS_SUSPENDED == current_scheme)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PM: OS scheme; Changing Mali GPU power state from %s to: %s\n", state_as_string(current_level), state_as_string(MALI_PM_LEVEL_4_DEEP_SLEEP)));
+
+ pm_level_to_set = MALI_PM_LEVEL_4_DEEP_SLEEP;
+
+ if (current_level == pm_level_to_set)
+ {
+ goto end_function; /* early out, no change in power level */
+ }
+
+ /* Cancel any timers */
+ if (MALI_TRUE == idle_timer_running)
+ {
+ /* There is an existing timeout, so delete it */
+ _mali_osk_timer_del(idle_timer);
+ idle_timer_running = MALI_FALSE;
+ }
+
+ MALI_DEBUG_PRINT(2, ("Mali PM: Setting GPU power mode to MALI_POWER_MODE_DEEP_SLEEP\n"));
+ mali_pm_powerdown(MALI_POWER_MODE_DEEP_SLEEP);
+ next_level_dynamic = current_level;
+ }
+ else if (MALI_PM_SCHEME_ALWAYS_ON == current_scheme)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PM: Always on scheme; Changing Mali GPU power state from %s to: %s\n", state_as_string(current_level), state_as_string(MALI_PM_LEVEL_1_ON)));
+
+ pm_level_to_set = MALI_PM_LEVEL_1_ON;
+ if (current_level == pm_level_to_set)
+ {
+ goto end_function; /* early out, no change in power level */
+ }
+
+ MALI_DEBUG_PRINT(2, ("Mali PM: Setting GPU power mode to MALI_POWER_MODE_ON\n"));
+ mali_pm_powerup();
+ if (MALI_PM_LEVEL_2_STANDBY != current_level)
+ {
+ /* Wake up Mali cores since we came from a sleep state */
+ mali_kernel_core_wakeup();
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("MALI PM: Illegal scheme"));
+ }
+
+ current_level = pm_level_to_set;
+
+end_function:
+ _mali_osk_lock_signal(mali_pm_lock_execute_state_change, _MALI_OSK_LOCKMODE_RW);
+
+}
+
+void mali_pm_always_on(mali_bool enable)
+{
+ if (MALI_TRUE == enable)
+ {
+ /* The event is processed in current thread synchronously */
+ mali_pm_event(MALI_PM_EVENT_SCHEME_ALWAYS_ON, MALI_FALSE, 0 );
+ }
+ else
+ {
+ /* The event is processed in current thread synchronously */
+ mali_pm_event(MALI_PM_EVENT_SCHEME_DYNAMIC_CONTROLL, MALI_FALSE, 0 );
+ }
+}
+
+static _mali_osk_errcode_t mali_pm_upper_half(void *data)
+{
+ /* not used */
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_pm_bottom_half(void *data)
+{
+ mali_pm_process_next();
+}
+
+static u32 mali_pm_event_number_get(void)
+{
+ u32 retval;
+
+ mali_pm_lock(); /* spinlock: mali_pm_lock_set_next_state */
+ retval = ++mali_pm_event_number;
+ if (0==retval ) retval = ++mali_pm_event_number;
+ mali_pm_unlock();
+
+ return retval;
+}
+
+static void mali_pm_event(enum mali_pm_event pm_event, mali_bool schedule_work, u32 timer_time )
+{
+ mali_pm_lock(); /* spinlock: mali_pm_lock_set_next_state */
+ /* Only timer events should set this variable, all other events must set it to zero. */
+ if ( 0 != timer_time )
+ {
+ MALI_DEBUG_ASSERT( (pm_event==MALI_PM_EVENT_TIMER_LIGHT_SLEEP) || (pm_event==MALI_PM_EVENT_TIMER_DEEP_SLEEP) );
+ if ( mali_pm_event_number != timer_time )
+ {
+ /* In this case there have been processed newer events since the timer event was set up.
+ If so we always ignore the timing event */
+ mali_pm_unlock();
+ return;
+ }
+ }
+ else
+ {
+ /* Delete possible ongoing timers
+ if ( (MALI_PM_LEVEL_2_STANDBY==current_level) || (MALI_PM_LEVEL_3_LIGHT_SLEEP==current_level) )
+ {
+ _mali_osk_timer_del(idle_timer);
+ }
+ */
+ }
+ mali_pm_event_number++;
+ switch (pm_event)
+ {
+ case MALI_PM_EVENT_CORES_WORKING:
+ next_level_dynamic = MALI_PM_LEVEL_1_ON;
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ break;
+ case MALI_PM_EVENT_CORES_IDLE:
+ next_level_dynamic = MALI_PM_LEVEL_2_STANDBY;
+ /*MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );*/
+ break;
+ case MALI_PM_EVENT_TIMER_LIGHT_SLEEP:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_ALWAYS_ON != current_scheme );
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ next_level_dynamic = MALI_PM_LEVEL_3_LIGHT_SLEEP;
+ break;
+ case MALI_PM_EVENT_TIMER_DEEP_SLEEP:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_ALWAYS_ON != current_scheme );
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ next_level_dynamic = MALI_PM_LEVEL_4_DEEP_SLEEP;
+ break;
+ case MALI_PM_EVENT_OS_SUSPEND:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_ALWAYS_ON != current_scheme );
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ current_scheme = MALI_PM_SCHEME_OS_SUSPENDED;
+ next_level_dynamic = MALI_PM_LEVEL_4_DEEP_SLEEP; /* Dynamic scheme will go into level when we are resumed */
+ break;
+ case MALI_PM_EVENT_OS_RESUME:
+ MALI_DEBUG_ASSERT(MALI_PM_SCHEME_OS_SUSPENDED == current_scheme );
+ current_scheme = MALI_PM_SCHEME_DYNAMIC;
+ break;
+ case MALI_PM_EVENT_SCHEME_ALWAYS_ON:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_OS_SUSPENDED != current_scheme );
+ current_scheme = MALI_PM_SCHEME_ALWAYS_ON;
+ break;
+ case MALI_PM_EVENT_SCHEME_DYNAMIC_CONTROLL:
+ MALI_DEBUG_ASSERT( MALI_PM_SCHEME_ALWAYS_ON == current_scheme || MALI_PM_SCHEME_DYNAMIC == current_scheme );
+ current_scheme = MALI_PM_SCHEME_DYNAMIC;
+ break;
+ default:
+ MALI_DEBUG_PRINT_ERROR(("Unknown next state."));
+ mali_pm_unlock();
+ return;
+ }
+ mali_pm_unlock();
+
+ if (MALI_TRUE == schedule_work)
+ {
+ _mali_osk_irq_schedulework(wq_irq);
+ }
+ else
+ {
+ mali_pm_process_next();
+ }
+}
+
+static void timeout_light_sleep(void* arg)
+{
+ /* State change only if no newer power events have happend from the time in arg.
+ Actual work will be scheduled on worker thread. */
+ mali_pm_event(MALI_PM_EVENT_TIMER_LIGHT_SLEEP, MALI_TRUE, (u32) arg);
+}
+
+void mali_pm_core_event(enum mali_core_event core_event)
+{
+ mali_bool transition_working = MALI_FALSE;
+ mali_bool transition_idle = MALI_FALSE;
+
+ _mali_osk_lock_wait(mali_pm_lock_set_core_states, _MALI_OSK_LOCKMODE_RW);
+
+ switch (core_event)
+ {
+ case MALI_CORE_EVENT_GP_START:
+ if (num_active_pps + num_active_gps == 0)
+ {
+ transition_working = MALI_TRUE;
+ }
+ num_active_gps++;
+ break;
+ case MALI_CORE_EVENT_GP_STOP:
+ if (num_active_pps + num_active_gps == 1)
+ {
+ transition_idle = MALI_TRUE;
+ }
+ num_active_gps--;
+ break;
+ case MALI_CORE_EVENT_PP_START:
+ if (num_active_pps + num_active_gps == 0)
+ {
+ transition_working = MALI_TRUE;
+ }
+ num_active_pps++;
+ break;
+ case MALI_CORE_EVENT_PP_STOP:
+ if (num_active_pps + num_active_gps == 1)
+ {
+ transition_idle = MALI_TRUE;
+ }
+ num_active_pps--;
+ break;
+ }
+
+ if (transition_working == MALI_TRUE)
+ {
+#ifdef CONFIG_MALI400_GPU_UTILIZATION
+ mali_utilization_core_start(_mali_osk_time_get_ns());
+#endif
+ mali_pm_event(MALI_PM_EVENT_CORES_WORKING, MALI_FALSE, 0); /* process event in same thread */
+ }
+ else if (transition_idle == MALI_TRUE)
+ {
+#ifdef CONFIG_MALI400_GPU_UTILIZATION
+ mali_utilization_core_end(_mali_osk_time_get_ns());
+#endif
+ mali_pm_event(MALI_PM_EVENT_CORES_IDLE, MALI_FALSE, 0); /* process event in same thread */
+ }
+
+ _mali_osk_lock_signal(mali_pm_lock_set_core_states, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_pm_os_suspend(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS suspending...\n"));
+
+ mali_gp_scheduler_suspend();
+ mali_pp_scheduler_suspend();
+ mali_pm_event(MALI_PM_EVENT_OS_SUSPEND, MALI_FALSE, 0); /* process event in same thread */
+
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS suspend completed\n"));
+}
+
+void mali_pm_os_resume(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS resuming...\n"));
+
+ mali_pm_event(MALI_PM_EVENT_OS_RESUME, MALI_FALSE, 0); /* process event in same thread */
+ mali_gp_scheduler_resume();
+ mali_pp_scheduler_resume();
+
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS resume completed\n"));
+}
+
+void mali_pm_runtime_suspend(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS runtime suspended\n"));
+ mali_platform_power_mode_change(MALI_POWER_MODE_LIGHT_SLEEP);
+}
+
+void mali_pm_runtime_resume(void)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PM: OS runtime resumed\n"));
+ mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_pm.h b/drivers/media/video/samsung/mali/common/mali_pm.h
new file mode 100644
index 0000000..d4ccfde
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pm.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PM_H__
+#define __MALI_PM_H__
+
+#include "mali_osk.h"
+
+enum mali_core_event
+{
+ MALI_CORE_EVENT_GP_START,
+ MALI_CORE_EVENT_GP_STOP,
+ MALI_CORE_EVENT_PP_START,
+ MALI_CORE_EVENT_PP_STOP
+};
+
+enum mali_pm_event
+{
+ MALI_PM_EVENT_CORES_WORKING,
+ MALI_PM_EVENT_CORES_IDLE,
+ MALI_PM_EVENT_TIMER_LIGHT_SLEEP,
+ MALI_PM_EVENT_TIMER_DEEP_SLEEP,
+ MALI_PM_EVENT_OS_SUSPEND,
+ MALI_PM_EVENT_OS_RESUME,
+ MALI_PM_EVENT_SCHEME_ALWAYS_ON,
+ MALI_PM_EVENT_SCHEME_DYNAMIC_CONTROLL,
+};
+
+_mali_osk_errcode_t mali_pm_initialize(void);
+void mali_pm_terminate(void);
+void mali_pm_always_on(mali_bool enable);
+
+void mali_pm_lock(void);
+void mali_pm_unlock(void);
+void mali_pm_execute_state_change_lock(void);
+
+void mali_pm_execute_state_change_unlock(void);
+
+mali_bool mali_pm_is_powered_on(void);
+
+void mali_pm_core_event(enum mali_core_event core_event);
+
+void mali_pm_os_suspend(void);
+void mali_pm_os_resume(void);
+void mali_pm_runtime_suspend(void);
+void mali_pm_runtime_resume(void);
+
+
+#endif /* __MALI_PM_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_pmu.c b/drivers/media/video/samsung/mali/common/mali_pmu.c
new file mode 100644
index 0000000..348b5dc
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pmu.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_pmu.c
+ * Mali driver functions for Mali 400 PMU hardware
+ */
+#include "mali_hw_core.h"
+#include "mali_pmu.h"
+#include "mali_pp.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static u32 mali_pmu_detect_mask(u32 number_of_pp_cores, u32 number_of_l2_caches);
+
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core
+{
+ struct mali_hw_core hw_core;
+ u32 mali_registered_cores_power_mask;
+};
+
+static struct mali_pmu_core *mali_global_pmu_core = NULL;
+
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x10, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
+
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches)
+{
+ struct mali_pmu_core* pmu;
+
+ MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
+ MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
+
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+ if (NULL != pmu)
+ {
+ pmu->mali_registered_cores_power_mask = mali_pmu_detect_mask(number_of_pp_cores, number_of_l2_caches);
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE))
+ {
+ if (_MALI_OSK_ERR_OK == mali_pmu_reset(pmu))
+ {
+ mali_global_pmu_core = pmu;
+ return pmu;
+ }
+ mali_hw_core_delete(&pmu->hw_core);
+ }
+ _mali_osk_free(pmu);
+ }
+
+ return NULL;
+}
+
+void mali_pmu_delete(struct mali_pmu_core *pmu)
+{
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+
+ mali_hw_core_delete(&pmu->hw_core);
+ _mali_osk_free(pmu);
+ pmu = NULL;
+}
+
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
+{
+ /* Don't use interrupts - just poll status */
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_powerdown_all(struct mali_pmu_core *pmu)
+{
+ u32 stat;
+ u32 timeout;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT( pmu->mali_registered_cores_power_mask != 0 );
+ MALI_DEBUG_PRINT( 4, ("Mali PMU: power down (0x%08X)\n", pmu->mali_registered_cores_power_mask) );
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, pmu->mali_registered_cores_power_mask);
+
+ /* Wait for cores to be powered down (100 x 100us = 100ms) */
+ timeout = 100;
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->mali_registered_cores_power_mask;
+ if( stat == pmu->mali_registered_cores_power_mask ) break; /* All cores we wanted are now asleep */
+ _mali_osk_time_ubusydelay(100);
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 )
+ {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pmu_powerup_all(struct mali_pmu_core *pmu)
+{
+ u32 stat;
+ u32 timeout;
+
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT( pmu->mali_registered_cores_power_mask != 0 ); /* Shouldn't be zero */
+ MALI_DEBUG_PRINT( 4, ("Mali PMU: power up (0x%08X)\n", pmu->mali_registered_cores_power_mask) );
+
+ mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, pmu->mali_registered_cores_power_mask);
+
+ /* Wait for cores to be powered up (100 x 100us = 100ms) */
+ timeout = 100;
+ do
+ {
+ /* Get status of sleeping cores */
+ stat = mali_hw_core_register_read(&pmu->hw_core,PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->mali_registered_cores_power_mask;
+ if( stat == 0 ) break; /* All cores we wanted are now awake */
+ _mali_osk_time_ubusydelay(100);
+ timeout--;
+ } while( timeout > 0 );
+
+ if( timeout == 0 )
+ {
+ return _MALI_OSK_ERR_TIMEOUT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
+
+static u32 mali_pmu_detect_mask(u32 number_of_pp_cores, u32 number_of_l2_caches)
+{
+ u32 mask = 0;
+
+ if (number_of_l2_caches == 1)
+ {
+ /* Mali-300 or Mali-400 */
+ u32 i;
+
+ /* GP */
+ mask = 0x01;
+
+ /* L2 cache */
+ mask |= 0x01<<1;
+
+ /* Set bit for each PP core */
+ for (i = 0; i < number_of_pp_cores; i++)
+ {
+ mask |= 0x01<<(i+2);
+ }
+ }
+ else if (number_of_l2_caches > 1)
+ {
+ /* Mali-450 */
+
+ /* GP (including its L2 cache) */
+ mask = 0x01;
+
+ /* There is always at least one PP (including its L2 cache) */
+ mask |= 0x01<<1;
+
+ /* Additional PP cores in same L2 cache */
+ if (number_of_pp_cores >= 2)
+ {
+ mask |= 0x01<<2;
+ }
+
+ /* Additional PP cores in a third L2 cache */
+ if (number_of_pp_cores >= 5)
+ {
+ mask |= 0x01<<3;
+ }
+ }
+
+ MALI_DEBUG_PRINT(4, ("Mali PMU: Power mask is 0x%08X (%u + %u)\n", mask, number_of_pp_cores, number_of_l2_caches));
+
+ return mask;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_pmu.h b/drivers/media/video/samsung/mali/common/mali_pmu.h
new file mode 100644
index 0000000..fd10c08
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pmu.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_platform.h
+ * Platform specific Mali driver functions
+ */
+
+#include "mali_osk.h"
+
+struct mali_pmu_core;
+
+/** @brief Initialisation of MALI PMU
+ *
+ * This is called from entry point of the driver in order to create and intialize the PMU resource
+ *
+ * @param resource it will be a pointer to a PMU resource
+ * @param number_of_pp_cores Number of found PP resources in configuration
+ * @param number_of_l2_caches Number of found L2 cache resources in configuration
+ * @return The created PMU object, or NULL in case of failure.
+ */
+struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource, u32 number_of_pp_cores, u32 number_of_l2_caches);
+
+/** @brief It deallocates the PMU resource
+ *
+ * This is called on the exit of the driver to terminate the PMU resource
+ *
+ * @param pmu Pointer to PMU core object to delete
+ */
+void mali_pmu_delete(struct mali_pmu_core *pmu);
+
+/** @brief Reset PMU core
+ *
+ * @param pmu Pointer to PMU core object to reset
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+
+/** @brief MALI GPU power down using MALI in-built PMU
+ *
+ * called to power down all cores
+ *
+ * @param pmu Pointer to PMU core object to power down
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_powerdown_all(struct mali_pmu_core *pmu);
+
+
+/** @brief MALI GPU power up using MALI in-built PMU
+ *
+ * called to power up all cores
+ *
+ * @param pmu Pointer to PMU core object to power up
+ * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ */
+_mali_osk_errcode_t mali_pmu_powerup_all(struct mali_pmu_core *pmu);
+
+
+/** @brief Retrieves the Mali PMU core object (if any)
+ *
+ * @return The Mali PMU object, or NULL if no PMU exists.
+ */
+struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
diff --git a/drivers/media/video/samsung/mali/common/mali_pp.c b/drivers/media/video/samsung/mali/common/mali_pp.c
new file mode 100644
index 0000000..5549f82
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pp.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp.h"
+#include "mali_hw_core.h"
+#include "mali_group.h"
+#include "mali_osk.h"
+#include "regs/mali_200_regs.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#if MALI_TIMELINE_PROFILING_ENABLED
+#include "mali_osk_profiling.h"
+#endif
+
+/* See mali_gp.c file for description on how to handle the interrupt mask.
+ * This is how to do it on PP: mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ */
+
+#define MALI_MAX_NUMBER_OF_PP_CORES 8
+
+/**
+ * Definition of the PP core struct
+ * Used to track a PP core in the system.
+ */
+struct mali_pp_core
+{
+ struct mali_hw_core hw_core; /**< Common for all HW cores */
+ struct mali_group *group; /**< Parent group for this core */
+ _mali_osk_irq_t *irq; /**< IRQ handler */
+ u32 core_id; /**< Unique core ID */
+ struct mali_pp_job *running_job; /**< Current running (super) job */
+ u32 running_sub_job; /**< Current running sub job */
+ _mali_osk_timer_t *timeout_timer; /**< timeout timer for this core */
+ u32 timeout_job_id; /**< job id for the timed out job - relevant only if pp_core_timed_out == MALI_TRUE */
+ mali_bool core_timed_out; /**< if MALI_TRUE, this pp core has timed out; if MALI_FALSE, no timeout on this pp core */
+ u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src0_used; /**< The selected performance counter 0 when a job is running */
+ u32 counter_src1_used; /**< The selected performance counter 1 when a job is running */
+};
+
+static struct mali_pp_core* mali_global_pp_cores[MALI_MAX_NUMBER_OF_PP_CORES];
+static u32 mali_global_num_pp_cores = 0;
+
+/* Interrupt handlers */
+static _mali_osk_errcode_t mali_pp_upper_half(void *data);
+static void mali_pp_bottom_half(void *data);
+static void mali_pp_irq_probe_trigger(void *data);
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data);
+static void mali_pp_post_process_job(struct mali_pp_core *core);
+static void mali_pp_timeout(void *data);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t *resource, struct mali_group *group)
+{
+ struct mali_pp_core* core = NULL;
+
+ MALI_DEBUG_PRINT(2, ("Mali PP: Creating Mali PP core: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Base address of PP core: 0x%x\n", resource->base));
+
+ if (mali_global_num_pp_cores >= MALI_MAX_NUMBER_OF_PP_CORES)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Too many PP core objects created\n"));
+ return NULL;
+ }
+
+ core = _mali_osk_malloc(sizeof(struct mali_pp_core));
+ if (NULL != core)
+ {
+ core->group = group;
+ core->core_id = mali_global_num_pp_cores;
+ core->running_job = NULL;
+ core->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src0_used = MALI_HW_CORE_NO_COUNTER;
+ core->counter_src1_used = MALI_HW_CORE_NO_COUNTER;
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&core->hw_core, resource, MALI200_REG_SIZEOF_REGISTER_BANK))
+ {
+ _mali_osk_errcode_t ret;
+
+ mali_group_lock(group);
+ ret = mali_pp_reset(core);
+ mali_group_unlock(group);
+
+ if (_MALI_OSK_ERR_OK == ret)
+ {
+ /* Setup IRQ handlers (which will do IRQ probing if needed) */
+ core->irq = _mali_osk_irq_init(resource->irq,
+ mali_pp_upper_half,
+ mali_pp_bottom_half,
+ mali_pp_irq_probe_trigger,
+ mali_pp_irq_probe_ack,
+ core,
+ "mali_pp_irq_handlers");
+ if (NULL != core->irq)
+ {
+ /* Initialise the timeout timer */
+ core->timeout_timer = _mali_osk_timer_init();
+ if(NULL != core->timeout_timer)
+ {
+ _mali_osk_timer_setcallback(core->timeout_timer, mali_pp_timeout, (void *)core);
+
+ mali_global_pp_cores[mali_global_num_pp_cores] = core;
+ mali_global_num_pp_cores++;
+
+ return core;
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Failed to setup timeout timer for PP core %s\n", core->hw_core.description));
+ /* Release IRQ handlers */
+ _mali_osk_irq_term(core->irq);
+ }
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to setup interrupt handlers for PP core %s\n", core->hw_core.description));
+ }
+ }
+ mali_hw_core_delete(&core->hw_core);
+ }
+
+ _mali_osk_free(core);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to allocate memory for PP core\n"));
+ }
+
+ return NULL;
+}
+
+void mali_pp_delete(struct mali_pp_core *core)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ _mali_osk_timer_term(core->timeout_timer);
+ _mali_osk_irq_term(core->irq);
+ mali_hw_core_delete(&core->hw_core);
+
+ /* Remove core from global list */
+ for (i = 0; i < mali_global_num_pp_cores; i++)
+ {
+ if (mali_global_pp_cores[i] == core)
+ {
+ mali_global_pp_cores[i] = NULL;
+ mali_global_num_pp_cores--;
+ break;
+ }
+ }
+
+ _mali_osk_free(core);
+}
+
+void mali_pp_stop_bus(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ /* Will only send the stop bus command, and not wait for it to complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+}
+
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core)
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ /* Send the stop bus command. */
+ mali_pp_stop_bus(core);
+
+ /* Wait for bus to be stopped */
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ break;
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus on %s. Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core)
+{
+ /* Bus must be stopped before calling this function */
+ const int reset_finished_loop_count = 15;
+ const u32 reset_invalid_value = 0xC0FFE000;
+ const u32 reset_check_value = 0xC01A0000;
+ int i;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Hard reset of core %s\n", core->hw_core.description));
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_pp_post_process_job(core); /* @@@@ is there some cases where it is unsafe to post process the job here? */
+
+ /* Set register to a bogus value. The register will be used to detect when reset is complete */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_invalid_value);
+
+ /* Force core to reset */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_FORCE_RESET);
+
+ /* Wait for reset to be complete */
+ for (i = 0; i < reset_finished_loop_count; i++)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, reset_check_value);
+ if (reset_check_value == mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW))
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (i == reset_finished_loop_count)
+ {
+ MALI_PRINT_ERROR(("Mali PP: The hard reset loop didn't work, unable to recover\n"));
+ }
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_WRITE_BOUNDARY_LOW, 0x00000000); /* set it back to the default */
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core)
+{
+ int i;
+ const int request_loop_count = 20;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_DEBUG_PRINT(4, ("Mali PP: Reset of core %s\n", core->hw_core.description));
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_pp_post_process_job(core); /* @@@@ is there some cases where it is unsafe to post process the job here? */
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, 0); /* disable the IRQs */
+
+#if defined(USING_MALI200)
+
+ /* On Mali-200, stop the bus, then do a hard reset of the core */
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_STOP_BUS);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) & MALI200_REG_VAL_STATUS_BUS_STOPPED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_PRINT_ERROR(("Mali PP: Failed to stop bus for core %s, unable to recover\n", core->hw_core.description));
+ return _MALI_OSK_ERR_FAULT ;
+ }
+
+ /* the bus was stopped OK, do the hard reset */
+ mali_pp_hard_reset(core);
+
+#elif defined(USING_MALI400)
+
+ /* Mali-300 and Mali-400 have a safe reset command which we use */
+
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI400PP_REG_VAL_IRQ_RESET_COMPLETED);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI400PP_REG_VAL_CTRL_MGMT_SOFT_RESET);
+
+ for (i = 0; i < request_loop_count; i++)
+ {
+ if (mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI400PP_REG_VAL_IRQ_RESET_COMPLETED)
+ {
+ break;
+ }
+ _mali_osk_time_ubusydelay(10);
+ }
+
+ if (request_loop_count == i)
+ {
+ MALI_DEBUG_PRINT(2, ("Mali PP: Failed to reset core %s, Status: 0x%08x\n", core->hw_core.description, mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+#error "no supported mali core defined"
+#endif
+
+ /* Re-enable interrupts */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_MASK_ALL);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
+{
+ u32 *frame_registers = mali_pp_job_get_frame_registers(job);
+ u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
+ u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
+ u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
+ core->counter_src0_used = core->counter_src0;
+ core->counter_src1_used = core->counter_src1;
+
+ MALI_DEBUG_ASSERT_POINTER(core);
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, frame_registers, MALI200_NUM_REGS_FRAME);
+ if (0 != sub_job)
+ {
+ /*
+ * There are two frame registers which are different for each sub job.
+ * For the first sub job, these are correctly represented in the frame register array,
+ * but we need to patch these for all other sub jobs
+ */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job));
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job));
+ }
+
+ if (wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, MALI200_NUM_REGS_WBx);
+ }
+
+ if (wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, MALI200_NUM_REGS_WBx);
+ }
+
+ if (wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */
+ {
+ mali_hw_core_register_write_array_relaxed(&core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, MALI200_NUM_REGS_WBx);
+ }
+
+ /* This selects which performance counters we are reading */
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used || MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ /* global_config has enabled HW counters, this will override anything specified by user space */
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
+ }
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
+ }
+ }
+ else
+ {
+ /* Use HW counters from job object, if any */
+ u32 perf_counter_flag = mali_pp_job_get_perf_counter_flag(job);
+ if (0 != perf_counter_flag)
+ {
+ if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE)
+ {
+ core->counter_src0_used = mali_pp_job_get_perf_counter_src0(job);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, core->counter_src0_used);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
+ }
+
+ if (perf_counter_flag & _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE)
+ {
+ core->counter_src1_used = mali_pp_job_get_perf_counter_src1(job);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, core->counter_src1_used);
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE);
+ }
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: Starting job 0x%08X part %u/%u on PP core %s\n", job, sub_job + 1, mali_pp_job_get_sub_job_count(job), core->hw_core.description));
+
+ /* Adding barrier to make sure all rester writes are finished */
+ _mali_osk_write_mem_barrier();
+
+ /* This is the command that starts the core. */
+ mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
+
+ /* Adding barrier to make sure previous rester writes is finished */
+ _mali_osk_write_mem_barrier();
+
+ /* Setup the timeout timer value and save the job id for the job running on the pp core */
+ _mali_osk_timer_add(core->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+ core->timeout_job_id = mali_pp_job_get_id(job);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH, job->frame_builder_id, job->flush_id, 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id), job->pid, job->tid, 0, 0, 0);
+#endif
+
+ core->running_job = job;
+ core->running_sub_job = sub_job;
+}
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION);
+}
+
+u32 mali_pp_core_get_id(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->core_id;
+}
+
+mali_bool mali_pp_core_set_counter_src0(struct mali_pp_core *core, u32 counter)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ core->counter_src0 = counter;
+ return MALI_TRUE;
+}
+
+mali_bool mali_pp_core_set_counter_src1(struct mali_pp_core *core, u32 counter)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+
+ core->counter_src1 = counter;
+ return MALI_TRUE;
+}
+
+u32 mali_pp_core_get_counter_src0(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->counter_src0;
+}
+
+u32 mali_pp_core_get_counter_src1(struct mali_pp_core *core)
+{
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return core->counter_src1;
+}
+
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index)
+{
+ if (MALI_MAX_NUMBER_OF_PP_CORES > index)
+ {
+ return mali_global_pp_cores[index];
+ }
+
+ return NULL;
+}
+
+u32 mali_pp_get_glob_num_pp_cores(void)
+{
+ return mali_global_num_pp_cores;
+}
+
+u32 mali_pp_get_max_num_pp_cores(void)
+{
+ return MALI_MAX_NUMBER_OF_PP_CORES;
+}
+
+/* ------------- interrupt handling below ------------------ */
+static _mali_osk_errcode_t mali_pp_upper_half(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout)
+ {
+ /* Mask out all IRQs from this core until IRQ is handled */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id)|MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+#endif
+
+ /* We do need to handle this in a bottom half */
+ _mali_osk_irq_schedulework(core->irq);
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+static void mali_pp_bottom_half(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+ u32 irq_errors;
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#if 0 /* Bottom half TLP logging is currently not supported */
+ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_START| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+#endif
+#endif
+
+ mali_group_lock(core->group); /* Group lock grabbed in core handlers, but released in common group handler */
+
+ if ( MALI_FALSE == mali_group_power_is_on(core->group) )
+ {
+ MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", core->hw_core.description));
+ mali_group_unlock(core->group);
+ return;
+ }
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+
+ MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, core->hw_core.description));
+
+ if (irq_readout & MALI200_REG_VAL_IRQ_END_OF_FRAME)
+ {
+ mali_pp_post_process_job(core);
+ MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
+ mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_COMPLETED); /* Will release group lock */
+ return;
+ }
+
+ /*
+ * Now lets look at the possible error cases (IRQ indicating error or timeout)
+ * END_OF_FRAME and HANG interrupts are not considered error.
+ */
+ irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME|MALI200_REG_VAL_IRQ_HANG);
+ if (0 != irq_errors)
+ {
+ mali_pp_post_process_job(core);
+ MALI_PRINT_ERROR(("Mali PP: Unknown interrupt 0x%08X from core %s, aborting job\n",
+ irq_readout, core->hw_core.description));
+ mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_FAILED); /* Will release group lock */
+ return;
+ }
+ else if (MALI_TRUE == core->core_timed_out) /* SW timeout */
+ {
+ if (core->timeout_job_id == mali_pp_job_get_id(core->running_job))
+ {
+ mali_pp_post_process_job(core);
+ MALI_DEBUG_PRINT(2, ("Mali PP: Job %d timed out on core %s\n",
+ mali_pp_job_get_id(core->running_job), core->hw_core.description));
+ mali_group_bottom_half(core->group, GROUP_EVENT_PP_JOB_TIMED_OUT); /* Will release group lock */
+ }
+ else
+ {
+ mali_group_unlock(core->group);
+ }
+ core->core_timed_out = MALI_FALSE;
+ return;
+ }
+ else if (irq_readout & MALI200_REG_VAL_IRQ_HANG)
+ {
+ /* Just ignore hang interrupts, the job timer will detect hanging jobs anyways */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
+ }
+
+ /*
+ * The only way to get here is if we got a HANG interrupt, which we ignore.
+ * Re-enable interrupts and let core continue to run
+ */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
+ mali_group_unlock(core->group);
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+#if 0 /* Bottom half TLP logging is currently not supported */
+ _mali_osk_profiling_add_event( MALI_PROFILING_EVENT_TYPE_STOP| MALI_PROFILING_EVENT_CHANNEL_SOFTWARE , _mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
+#endif
+#endif
+}
+
+static void mali_pp_irq_probe_trigger(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED); /* @@@@ This should not be needed */
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+}
+
+static _mali_osk_errcode_t mali_pp_irq_probe_ack(void *data)
+{
+ struct mali_pp_core *core = (struct mali_pp_core *)data;
+ u32 irq_readout;
+
+ irq_readout = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ if (MALI200_REG_VAL_IRQ_FORCE_HANG & irq_readout)
+ {
+ mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_FORCE_HANG);
+ _mali_osk_mem_barrier();
+ return _MALI_OSK_ERR_OK;
+ }
+
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+/* ------ local helper functions below --------- */
+static void mali_pp_post_process_job(struct mali_pp_core *core)
+{
+ MALI_ASSERT_GROUP_LOCKED(core->group);
+
+ if (NULL != core->running_job)
+ {
+ u32 val0 = 0;
+ u32 val1 = 0;
+#if MALI_TIMELINE_PROFILING_ENABLED
+ int counter_index = COUNTER_FP0_C0 + (2 * core->core_id);
+#endif
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src0_used)
+ {
+ val0 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE);
+ if (mali_pp_job_get_perf_counter_flag(core->running_job) &&
+ _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE && mali_pp_job_get_perf_counter_src0(core->running_job) == core->counter_src0_used)
+ {
+ /* We retrieved the counter that user space asked for, so return the value through the job object */
+ mali_pp_job_set_perf_counter_value0(core->running_job, core->running_sub_job, val0);
+ }
+ else
+ {
+ /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */
+ mali_pp_job_set_perf_counter_value0(core->running_job, core->running_sub_job, MALI_HW_CORE_INVALID_VALUE);
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_report_hw_counter(counter_index, val0);
+#endif
+ }
+
+ if (MALI_HW_CORE_NO_COUNTER != core->counter_src1_used)
+ {
+ val1 = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE);
+ if (mali_pp_job_get_perf_counter_flag(core->running_job) &&
+ _MALI_PERFORMANCE_COUNTER_FLAG_SRC1_ENABLE && mali_pp_job_get_perf_counter_src1(core->running_job) == core->counter_src1_used)
+ {
+ /* We retrieved the counter that user space asked for, so return the value through the job object */
+ mali_pp_job_set_perf_counter_value1(core->running_job, core->running_sub_job, val1);
+ }
+ else
+ {
+ /* User space asked for a counter, but this is not what we retrived (overridden by counter src set on core) */
+ mali_pp_job_set_perf_counter_value1(core->running_job, core->running_sub_job, MALI_HW_CORE_INVALID_VALUE);
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_report_hw_counter(counter_index + 1, val1);
+#endif
+ }
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP|MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id),
+ val0, val1, core->counter_src0_used | (core->counter_src1_used << 8), 0, 0);
+#endif
+
+ /* We are no longer running a job... */
+ core->running_job = NULL;
+ _mali_osk_timer_del(core->timeout_timer);
+ }
+}
+
+/* callback function for pp core timeout */
+static void mali_pp_timeout(void *data)
+{
+ struct mali_pp_core * core = ((struct mali_pp_core *)data);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP: TIMEOUT callback \n"));
+ core->core_timed_out = MALI_TRUE;
+ _mali_osk_irq_schedulework(core->irq);
+}
+
+#if 0
+static void mali_pp_print_registers(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_VERSION = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_VERSION)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_CURRENT_REND_LIST_ADDR)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_RAWSTAT = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_MASK = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_INT_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_BUS_ERROR_STATUS)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_VALUE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC)));
+ MALI_DEBUG_PRINT(2, ("Mali PP: Register MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE = 0x%08X\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_VALUE)));
+}
+#endif
+
+#if 0
+void mali_pp_print_state(struct mali_pp_core *core)
+{
+ MALI_DEBUG_PRINT(2, ("Mali PP: State: 0x%08x\n", mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS) ));
+}
+#endif
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\tPP #%d: %s\n", core->core_id, core->hw_core.description);
+
+ return n;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_pp.h b/drivers/media/video/samsung/mali/common/mali_pp.h
new file mode 100644
index 0000000..9b425a0
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pp.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_H__
+#define __MALI_PP_H__
+
+#include "mali_osk.h"
+#include "mali_pp_job.h"
+
+struct mali_pp_core;
+struct mali_group;
+
+_mali_osk_errcode_t mali_pp_initialize(void);
+void mali_pp_terminate(void);
+
+struct mali_pp_core *mali_pp_create(const _mali_osk_resource_t * resource, struct mali_group *group);
+void mali_pp_delete(struct mali_pp_core *core);
+
+void mali_pp_stop_bus(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_stop_bus_wait(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
+_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
+
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job);
+
+u32 mali_pp_core_get_version(struct mali_pp_core *core);
+
+u32 mali_pp_core_get_id(struct mali_pp_core *core);
+
+mali_bool mali_pp_core_set_counter_src0(struct mali_pp_core *core, u32 counter);
+mali_bool mali_pp_core_set_counter_src1(struct mali_pp_core *core, u32 counter);
+u32 mali_pp_core_get_counter_src0(struct mali_pp_core *core);
+u32 mali_pp_core_get_counter_src1(struct mali_pp_core *core);
+struct mali_pp_core* mali_pp_get_global_pp_core(u32 index);
+u32 mali_pp_get_glob_num_pp_cores(void);
+u32 mali_pp_get_max_num_pp_cores(void);
+/* Debug */
+u32 mali_pp_dump_state(struct mali_pp_core *core, char *buf, u32 size);
+
+#endif /* __MALI_PP_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_pp_job.c b/drivers/media/video/samsung/mali/common/mali_pp_job.c
new file mode 100644
index 0000000..47b8a0a
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pp_job.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_job.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *args, u32 id)
+{
+ struct mali_pp_job *job;
+
+ if (args->num_cores > _MALI_PP_MAX_SUB_JOBS)
+ {
+ MALI_PRINT_ERROR(("Mali PP job: Too many sub jobs specified in job object\n"));
+ return NULL;
+ }
+
+ job = _mali_osk_malloc(sizeof(struct mali_pp_job));
+ if (NULL != job)
+ {
+ u32 i;
+ _mali_osk_list_init(&job->list);
+ job->session = session;
+ job->id = id;
+ job->user_id = args->user_job_ptr;
+ job->barrier = args->flags & _MALI_PP_JOB_FLAG_BARRIER ? MALI_TRUE : MALI_FALSE;
+ job->active_barrier = job->barrier;
+ job->no_notification = args->flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+ _mali_osk_memcpy(job->frame_registers, args->frame_registers, sizeof(job->frame_registers));
+ _mali_osk_memcpy(job->frame_registers_addr_frame, args->frame_registers_addr_frame, sizeof(job->frame_registers_addr_frame));
+ _mali_osk_memcpy(job->frame_registers_addr_stack, args->frame_registers_addr_stack, sizeof(job->frame_registers_addr_stack));
+
+ /* Only copy write back registers for the units that are enabled */
+ job->wb0_registers[0] = 0;
+ job->wb1_registers[0] = 0;
+ job->wb2_registers[0] = 0;
+ if (args->wb0_registers[0]) /* M200_WB0_REG_SOURCE_SELECT register */
+ {
+ _mali_osk_memcpy(job->wb0_registers, args->wb0_registers, sizeof(job->wb0_registers));
+ }
+ if (args->wb1_registers[0]) /* M200_WB1_REG_SOURCE_SELECT register */
+ {
+ _mali_osk_memcpy(job->wb1_registers, args->wb1_registers, sizeof(job->wb1_registers));
+ }
+ if (args->wb2_registers[0]) /* M200_WB2_REG_SOURCE_SELECT register */
+ {
+ _mali_osk_memcpy(job->wb2_registers, args->wb2_registers, sizeof(job->wb2_registers));
+ }
+
+ job->perf_counter_flag = args->perf_counter_flag;
+ job->perf_counter_src0 = args->perf_counter_src0;
+ job->perf_counter_src1 = args->perf_counter_src1;
+ for (i = 0; i < args->num_cores; i++)
+ {
+ job->perf_counter_value0[i] = 0;
+ job->perf_counter_value1[i] = 0;
+ }
+ job->sub_job_count = args->num_cores;
+ job->sub_jobs_started = 0;
+ job->sub_jobs_completed = 0;
+ job->sub_job_errors = 0;
+
+ job->pid = _mali_osk_get_pid();
+ job->tid = _mali_osk_get_tid();
+ job->frame_builder_id = args->frame_builder_id;
+ job->flush_id = args->flush_id;
+
+ return job;
+ }
+
+ return NULL;
+}
+
+void mali_pp_job_delete(struct mali_pp_job *job)
+{
+ _mali_osk_free(job);
+}
+
+_mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
+{
+ if ((0 == job->frame_registers[0]) || (0 == job->frame_registers[1]))
+ {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_pp_job.h b/drivers/media/video/samsung/mali/common/mali_pp_job.h
new file mode 100644
index 0000000..4399c1d
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pp_job.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2011-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_JOB_H__
+#define __MALI_PP_JOB_H__
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_uk_types.h"
+#include "mali_session.h"
+#include "mali_kernel_common.h"
+#include "regs/mali_200_regs.h"
+
+/**
+ * The structure represends a PP job, including all sub-jobs
+ * (This struct unfortunatly needs to be public because of how the _mali_osk_list_*
+ * mechanism works)
+ */
+struct mali_pp_job
+{
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 id; /**< identifier for this job in kernel space (sequencial numbering) */
+ u32 user_id; /**< identifier for the job in user space */
+ u32 frame_registers[_MALI_PP_MAX_FRAME_REGISTERS]; /**< core specific registers associated with this job, see ARM DDI0415A */
+ u32 frame_registers_addr_frame[_MALI_PP_MAX_SUB_JOBS - 1]; /**< ADDR_FRAME registers for sub job 1-7 */
+ u32 frame_registers_addr_stack[_MALI_PP_MAX_SUB_JOBS - 1]; /**< ADDR_STACK registers for sub job 1-7 */
+ u32 wb0_registers[_MALI_PP_MAX_WB_REGISTERS]; /**< Write back unit 0 registers */
+ u32 wb1_registers[_MALI_PP_MAX_WB_REGISTERS]; /**< Write back unit 1 registers */
+ u32 wb2_registers[_MALI_PP_MAX_WB_REGISTERS]; /**< Write back unit 2 registers */
+ u32 perf_counter_flag; /**< bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
+ u32 perf_counter_src0; /**< Source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_src1; /**< Source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+ u32 sub_job_count; /**< Total number of sub-jobs in this superjob */
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+ u32 sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ u32 sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
+ u32 frame_builder_id; /**< id of the originating frame builder */
+ u32 flush_id; /**< flush id within the originating frame builder */
+ mali_bool barrier; /**< [in] MALI_TRUE means wait for all my previous jobs to complete before scheduling this one */
+ mali_bool active_barrier; /**< [in] Changes from MALI_TRUE to MALI_FALSE when barrier has been resolved */
+ mali_bool no_notification; /**< [in] MALI_TRUE means do not notify user space when this job has completed */
+};
+
+struct mali_pp_job *mali_pp_job_create(struct mali_session_data *session, _mali_uk_pp_start_job_s *args, u32 id);
+void mali_pp_job_delete(struct mali_pp_job *job);
+
+_mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job);
+
+/******************************************************
+ * simple utility functions for dealing with pp jobs:
+ *****************************************************/
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
+{
+ return (NULL == job) ? 0 : job->id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_user_id(struct mali_pp_job *job)
+{
+ return job->user_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
+{
+ return job->frame_builder_id;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
+{
+ return job->flush_id;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_frame_registers(struct mali_pp_job *job)
+{
+ return job->frame_registers;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
+{
+ if (sub_job == 0)
+ {
+ return job->frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
+ }
+ else if (sub_job < _MALI_PP_MAX_SUB_JOBS)
+ {
+ return job->frame_registers_addr_frame[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
+{
+ if (sub_job == 0)
+ {
+ return job->frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
+ }
+ else if (sub_job < _MALI_PP_MAX_SUB_JOBS)
+ {
+ return job->frame_registers_addr_stack[sub_job - 1];
+ }
+
+ return 0;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
+{
+ return job->wb0_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
+{
+ return job->wb1_registers;
+}
+
+MALI_STATIC_INLINE u32* mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
+{
+ return job->wb2_registers;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
+{
+ job->wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
+{
+ job->wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
+{
+ job->wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
+}
+
+MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
+{
+ return job->session;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
+{
+ return (job->sub_jobs_started < job->sub_job_count) ? MALI_TRUE : MALI_FALSE;
+}
+
+/* Function used when we are terminating a session with jobs. Return TRUE if it has a rendering job.
+ Makes sure that no new subjobs is started. */
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_currently_rendering_and_if_so_abort_new_starts(struct mali_pp_job *job)
+{
+ /* All can not be started, since then it would not be in the job queue */
+ MALI_DEBUG_ASSERT( job->sub_jobs_started != job->sub_job_count );
+
+ /* If at least one job is started */
+ if ( (job->sub_jobs_started > 0) )
+ {
+ /* If at least one job is currently being rendered, and thus assigned to a group and core */
+ if (job->sub_jobs_started > job->sub_jobs_completed )
+ {
+ u32 jobs_remaining = job->sub_job_count - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+ job->sub_jobs_completed += jobs_remaining;
+ job->sub_job_errors += jobs_remaining;
+ /* Returning TRUE indicating that we can not delete this job which is being redered */
+ return MALI_TRUE;
+ }
+ }
+ /* The job is not being rendered to at the moment and can then safely be deleted */
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
+{
+ return (job->sub_job_count == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
+{
+ return job->sub_jobs_started;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
+{
+ return job->sub_job_count;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
+{
+ /* Assert that we are marking the "first unstarted sub job" as started */
+ MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
+ job->sub_jobs_started++;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
+{
+ job->sub_jobs_completed++;
+ if ( MALI_FALSE == success )
+ {
+ job->sub_job_errors++;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
+{
+ if ( 0 == job->sub_job_errors )
+ {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_active_barrier(struct mali_pp_job *job)
+{
+ return job->active_barrier;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_barrier_enforced(struct mali_pp_job *job)
+{
+ job->active_barrier = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+{
+ return job->no_notification;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
+{
+ return job->perf_counter_flag;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job)
+{
+ return job->perf_counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job)
+{
+ return job->perf_counter_src1;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value0[sub_job];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
+{
+ return job->perf_counter_value1[sub_job];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value0[sub_job] = value;
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
+{
+ job->perf_counter_value1[sub_job] = value;
+}
+
+#endif /* __MALI_PP_JOB_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_pp_scheduler.c b/drivers/media/video/samsung/mali/common/mali_pp_scheduler.c
new file mode 100644
index 0000000..a944055
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pp_scheduler.c
@@ -0,0 +1,594 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_pp_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_scheduler.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_cluster.h"
+
+/* Maximum of 8 PP cores (a group can only have maximum of 1 PP core) */
+#define MALI_MAX_NUMBER_OF_PP_GROUPS 8
+
+static mali_bool mali_pp_scheduler_is_suspended(void);
+
+enum mali_pp_slot_state
+{
+ MALI_PP_SLOT_STATE_IDLE,
+ MALI_PP_SLOT_STATE_WORKING,
+};
+
+/* A render slot is an entity which jobs can be scheduled onto */
+struct mali_pp_slot
+{
+ struct mali_group *group;
+ /*
+ * We keep track of the state here as well as in the group object
+ * so we don't need to take the group lock so often (and also avoid clutter with the working lock)
+ */
+ enum mali_pp_slot_state state;
+ struct mali_session_data *session;
+};
+
+static u32 pp_version = 0;
+static _MALI_OSK_LIST_HEAD(job_queue); /* List of jobs with some unscheduled work */
+static struct mali_pp_slot slots[MALI_MAX_NUMBER_OF_PP_GROUPS];
+static u32 num_slots = 0;
+static u32 num_slots_idle = 0;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *pp_scheduler_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+static _mali_osk_lock_t *pp_scheduler_lock = NULL;
+/* Contains tid of thread that locked the scheduler or 0, if not locked */
+MALI_DEBUG_CODE(static u32 pp_scheduler_lock_owner = 0);
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void)
+{
+ u32 i;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue);
+
+ pp_scheduler_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_ORDERED |_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE, 0, _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == pp_scheduler_lock)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_scheduler_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == pp_scheduler_working_wait_queue)
+ {
+ _mali_osk_lock_term(pp_scheduler_lock);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ /* Find all the available PP cores */
+ for (i = 0; i < mali_cluster_get_glob_num_clusters(); i++)
+ {
+ u32 group_id = 0;
+ struct mali_cluster *curr_cluster = mali_cluster_get_global_cluster(i);
+ struct mali_group *group = mali_cluster_get_group(curr_cluster, group_id);
+ while (NULL != group)
+ {
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ if (NULL != pp_core)
+ {
+ if (0 == pp_version)
+ {
+ /* Retrieve PP version from first avaiable PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+ slots[num_slots].group = group;
+ slots[num_slots].state = MALI_PP_SLOT_STATE_IDLE;
+ slots[num_slots].session = NULL;
+ num_slots++;
+ num_slots_idle++;
+ }
+ group_id++;
+ group = mali_cluster_get_group(curr_cluster, group_id);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_pp_scheduler_terminate(void)
+{
+ _mali_osk_wait_queue_term(pp_scheduler_working_wait_queue);
+ _mali_osk_lock_term(pp_scheduler_lock);
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_lock(void)
+{
+ if(_MALI_OSK_ERR_OK != _mali_osk_lock_wait(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW))
+ {
+ /* Non-interruptable lock failed: this should never happen. */
+ MALI_DEBUG_ASSERT(0);
+ }
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: PP scheduler lock taken\n"));
+ MALI_DEBUG_ASSERT(0 == pp_scheduler_lock_owner);
+ MALI_DEBUG_CODE(pp_scheduler_lock_owner = _mali_osk_get_tid());
+}
+
+MALI_STATIC_INLINE void mali_pp_scheduler_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Mali PP scheduler: Releasing PP scheduler lock\n"));
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner);
+ MALI_DEBUG_CODE(pp_scheduler_lock_owner = 0);
+ _mali_osk_lock_signal(pp_scheduler_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+#ifdef DEBUG
+MALI_STATIC_INLINE void mali_pp_scheduler_assert_locked(void)
+{
+ MALI_DEBUG_ASSERT(_mali_osk_get_tid() == pp_scheduler_lock_owner);
+}
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED() mali_pp_scheduler_assert_locked()
+#else
+#define MALI_ASSERT_PP_SCHEDULER_LOCKED()
+#endif
+
+static mali_bool mali_pp_scheduler_session_has_running_jobs(struct mali_session_data *session)
+{
+ u32 i;
+
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ if (num_slots_idle == num_slots)
+ {
+ return MALI_FALSE;
+ }
+
+ for (i = 0; i < num_slots; i++)
+ {
+ if (MALI_PP_SLOT_STATE_WORKING == slots[i].state)
+ {
+ if (slots[i].session == session)
+ {
+ return MALI_TRUE;
+ }
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+static void mali_pp_scheduler_schedule(void)
+{
+ u32 i;
+ struct mali_pp_job *job;
+#if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS
+ struct mali_session_data * session;
+#endif
+
+ MALI_ASSERT_PP_SCHEDULER_LOCKED();
+
+ if (0 < pause_count || 0 == num_slots_idle || _mali_osk_list_empty(&job_queue))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Nothing to schedule (paused=%u, idle slots=%u)\n",
+ pause_count, num_slots_idle));
+ return; /* Nothing to do, so early out */
+ }
+
+
+#if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP
+ if ( num_slots_idle < num_slots )
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Job not started, since only %d/%d cores are available\n", num_slots_idle,num_slots));
+ return;
+ }
+#endif
+
+#if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS
+ /* Finding initial session for the PP cores */
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_pp_job, list);
+ session = job->session;
+ if ( num_slots != num_slots_idle )
+ {
+ for (i = 0; (i < num_slots) ; i++)
+ {
+ if ( slots[i].state == MALI_PP_SLOT_STATE_IDLE )
+ {
+ continue;
+ }
+ session = mali_group_get_session(slots[i].group);
+ break;
+ }
+ }
+#endif
+
+ for (i = 0; (i < num_slots) && (0 < num_slots_idle); i++)
+ {
+ u32 sub_job;
+
+ if (_mali_osk_list_empty(&job_queue)) /* move this check down to where we know we have started all sub jobs for this job??? */
+ {
+ break; /* No more jobs to schedule, so early out */
+ }
+
+ if (MALI_PP_SLOT_STATE_IDLE != slots[i].state)
+ {
+ continue;
+ }
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue.next, struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(mali_pp_job_has_unstarted_sub_jobs(job)); /* All jobs on the job_queue should have unstarted sub jobs */
+
+ if (MALI_TRUE == mali_pp_job_has_active_barrier(job))
+ {
+ if (MALI_TRUE == mali_pp_scheduler_session_has_running_jobs(mali_pp_job_get_session(job)))
+ {
+ /* There is already a running job from this session, so we need to enforce the barrier */
+ return;
+ }
+ else
+ {
+ /* Barrier is now enforced, update job object so we don't delay execution of sub-jobs */
+ mali_pp_job_barrier_enforced(job);
+ }
+ }
+
+ #if MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED
+ if ( (0==job->sub_jobs_started) && (num_slots_idle < num_slots) && (job->sub_job_count > num_slots_idle))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Job with %d subjobs not started, since only %d/%d cores are available\n", job->sub_job_count, num_slots_idle,num_slots));
+ return;
+ }
+ #endif
+
+ #if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS
+ if ( job->session != session )
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Job not started since existing job is from another application\n"));
+ return;
+ }
+ #endif
+
+ sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Starting job %u (0x%08X) part %u/%u\n", mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
+ if (_MALI_OSK_ERR_OK == mali_group_start_pp_job(slots[i].group, job, sub_job))
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Job %u (0x%08X) part %u/%u started\n", mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job)));
+
+ /* Mark this sub job as started */
+ mali_pp_job_mark_sub_job_started(job, sub_job);
+
+ /* Mark slot as busy */
+ slots[i].state = MALI_PP_SLOT_STATE_WORKING;
+ slots[i].session = mali_pp_job_get_session(job);
+ num_slots_idle--;
+
+ if (!mali_pp_job_has_unstarted_sub_jobs(job))
+ {
+ /*
+ * All sub jobs have now started for this job, remove this job from the job queue.
+ * The job will now only be referred to by the slots which are running it.
+ * The last slot to complete will make sure it is returned to user space.
+ */
+ _mali_osk_list_del(&job->list);
+#if MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP
+ MALI_DEBUG_PRINT(6, ("Mali PP scheduler: Skip scheduling more jobs when MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP is set.\n"));
+ return;
+#endif
+ }
+ }
+ else
+ {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Failed to start PP job\n"));
+ return;
+ }
+ }
+}
+
+static void mali_pp_scheduler_return_job_to_user(struct mali_pp_job *job)
+{
+ if (MALI_FALSE == mali_pp_job_use_no_notification(job))
+ {
+ _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_FINISHED, sizeof(_mali_uk_pp_job_finished_s));
+ if (NULL != notobj)
+ {
+ u32 i;
+ u32 sub_jobs = mali_pp_job_get_sub_job_count(job);
+ mali_bool success = mali_pp_job_was_success(job);
+
+ _mali_uk_pp_job_finished_s *jobres = notobj->result_buffer;
+ _mali_osk_memset(jobres, 0, sizeof(_mali_uk_pp_job_finished_s)); /* @@@@ can be removed once we initialize all members in this struct */
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == success)
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ }
+ else
+ {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ for (i = 0; i < sub_jobs; i++)
+ {
+ jobres->perf_counter0[i] = mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] = mali_pp_job_get_perf_counter_value1(job, i);
+ }
+
+ mali_session_send_notification(mali_pp_job_get_session(job), notobj);
+ }
+ else
+ {
+ MALI_PRINT_ERROR(("Mali PP scheduler: Unable to allocate notification object\n"));
+ }
+ }
+
+ mali_pp_job_delete(job);
+}
+
+void mali_pp_scheduler_do_schedule(void)
+{
+ mali_pp_scheduler_lock();
+
+ mali_pp_scheduler_schedule();
+
+ mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success)
+{
+ u32 i;
+ mali_bool job_is_done;
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) part %u/%u completed (%s)\n", mali_pp_job_get_id(job), job, sub_job + 1, mali_pp_job_get_sub_job_count(job), success ? "success" : "failure"));
+
+ mali_pp_scheduler_lock();
+
+ /* Find slot which was running this job */
+ for (i = 0; i < num_slots; i++)
+ {
+ if (slots[i].group == group)
+ {
+ MALI_DEBUG_ASSERT(MALI_PP_SLOT_STATE_WORKING == slots[i].state);
+ slots[i].state = MALI_PP_SLOT_STATE_IDLE;
+ slots[i].session = NULL;
+ num_slots_idle++;
+ mali_pp_job_mark_sub_job_completed(job, success);
+ }
+ }
+
+ /* If paused, then this was the last job, so wake up sleeping workers */
+ if (pause_count > 0)
+ {
+ /* Wake up sleeping workers. Their wake-up condition is that
+ * num_slots == num_slots_idle, so unless we are done working, no
+ * threads will actually be woken up.
+ */
+ _mali_osk_wait_queue_wake_up(pp_scheduler_working_wait_queue);
+ }
+ else
+ {
+ mali_pp_scheduler_schedule();
+ }
+
+ job_is_done = mali_pp_job_is_complete(job);
+
+ mali_pp_scheduler_unlock();
+
+ if (job_is_done)
+ {
+ /* Send notification back to user space */
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: All parts completed for job %u (0x%08X)\n", mali_pp_job_get_id(job), job));
+ mali_pp_scheduler_return_job_to_user(job);
+ }
+}
+
+void mali_pp_scheduler_suspend(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count++; /* Increment the pause_count so that no more jobs will be scheduled */
+ mali_pp_scheduler_unlock();
+
+ /*mali_pp_scheduler_working_lock();*/
+ /* We have now aquired the working lock, which means that we have successfully paused the scheduler */
+ /*mali_pp_scheduler_working_unlock();*/
+
+ /* go to sleep. When woken up again (in mali_pp_scheduler_job_done), the
+ * mali_pp_scheduler_suspended() function will be called. This will return true
+ * iff state is idle and pause_count > 0, so if the core is active this
+ * will not do anything.
+ */
+ _mali_osk_wait_queue_wait_event(pp_scheduler_working_wait_queue, mali_pp_scheduler_is_suspended);
+}
+
+void mali_pp_scheduler_resume(void)
+{
+ mali_pp_scheduler_lock();
+ pause_count--; /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ if (0 == pause_count)
+ {
+ mali_pp_scheduler_schedule();
+ }
+ mali_pp_scheduler_unlock();
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(_mali_uk_pp_start_job_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
+ session = (struct mali_session_data*)args->ctx;
+
+ job = mali_pp_job_create(session, args, mali_scheduler_get_new_id());
+ if (NULL == job)
+ {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ if (_MALI_OSK_ERR_OK != mali_pp_job_check(job))
+ {
+ /* Not a valid job, return to user immediately */
+ mali_pp_job_mark_sub_job_completed(job, MALI_FALSE); /* Flagging the job as failed. */
+ mali_pp_scheduler_return_job_to_user(job); /* This will also delete the job object */
+ return _MALI_OSK_ERR_OK; /* User is notified via a notification, so this call is ok */
+ }
+
+#if PROFILING_SKIP_PP_JOBS || PROFILING_SKIP_PP_AND_GP_JOBS
+#warning PP jobs will not be executed
+ mali_pp_scheduler_return_job_to_user(job);
+ return _MALI_OSK_ERR_OK;
+#endif
+
+ mali_pp_scheduler_lock();
+
+ _mali_osk_list_addtail(&job->list, &job_queue);
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) with %u parts queued\n", mali_pp_job_get_id(job), job, mali_pp_job_get_sub_job_count(job)));
+
+ mali_pp_scheduler_schedule();
+
+ mali_pp_scheduler_unlock();
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ args->number_of_cores = num_slots;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+ args->version = pp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT_POINTER(args->ctx);
+
+ session = (struct mali_session_data*)args->ctx;
+
+ mali_pp_scheduler_lock();
+
+ /* Check queue for jobs that match */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_pp_job, list)
+ {
+ if (mali_pp_job_get_session(job) == session &&
+ mali_pp_job_get_frame_builder_id(job) == (u32)args->fb_id &&
+ mali_pp_job_get_flush_id(job) == (u32)args->flush_id)
+ {
+ if (args->wbx & _MALI_UK_PP_JOB_WB0)
+ {
+ mali_pp_job_disable_wb0(job);
+ }
+ if (args->wbx & _MALI_UK_PP_JOB_WB1)
+ {
+ mali_pp_job_disable_wb1(job);
+ }
+ if (args->wbx & _MALI_UK_PP_JOB_WB2)
+ {
+ mali_pp_job_disable_wb2(job);
+ }
+ break;
+ }
+ }
+
+ mali_pp_scheduler_unlock();
+}
+
+void mali_pp_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_pp_job *job, *tmp;
+ int i;
+
+ mali_pp_scheduler_lock();
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Aborting all jobs from session 0x%08x\n", session));
+
+ /* Check queue for jobs and remove */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &job_queue, struct mali_pp_job, list)
+ {
+ if (mali_pp_job_get_session(job) == session)
+ {
+ _mali_osk_list_del(&(job->list));
+
+ if ( mali_pp_job_is_currently_rendering_and_if_so_abort_new_starts(job) )
+ {
+ /* The job is in the render pipeline, we can not delete it yet. */
+ /* It will be deleted in the mali_group_abort_session() call below */
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Keeping partially started PP job 0x%08x in queue\n", job));
+ continue;
+ }
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Removing PP job 0x%08x from queue\n", job));
+ mali_pp_job_delete(job);
+ }
+ }
+
+ mali_pp_scheduler_unlock();
+
+ /* Abort running jobs from this session */
+ for (i = 0; i < num_slots; i++)
+ {
+ struct mali_group *group = slots[i].group;
+
+ MALI_DEBUG_PRINT(5, ("PP sched abort: Looking at group 0x%08x\n", group));
+
+ if (MALI_PP_SLOT_STATE_WORKING == slots[i].state)
+ {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Aborting session 0x%08x from group 0x%08x\n", session, group));
+
+ mali_group_abort_session(group, session);
+ }
+ }
+}
+
+static mali_bool mali_pp_scheduler_is_suspended(void)
+{
+ mali_bool ret;
+
+ mali_pp_scheduler_lock();
+ ret = pause_count > 0 && num_slots == num_slots_idle;
+ mali_pp_scheduler_unlock();
+
+ return ret;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ int i;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "PP:\n");
+ n += _mali_osk_snprintf(buf + n, size - n, "\tQueue is %s\n", _mali_osk_list_empty(&job_queue) ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ for (i = 0; i < num_slots; i++)
+ {
+ n += mali_group_dump_state(slots[i].group, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n, "\t\tState: %d\n", slots[i].state);
+ }
+
+ return n;
+}
+#endif
diff --git a/drivers/media/video/samsung/mali/common/mali_pp_scheduler.h b/drivers/media/video/samsung/mali/common/mali_pp_scheduler.h
new file mode 100644
index 0000000..48eb3bd
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_pp_scheduler.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_PP_SCHEDULER_H__
+#define __MALI_PP_SCHEDULER_H__
+
+#include "mali_osk.h"
+#include "mali_cluster.h"
+#include "mali_pp_job.h"
+
+_mali_osk_errcode_t mali_pp_scheduler_initialize(void);
+void mali_pp_scheduler_terminate(void);
+
+void mali_pp_scheduler_do_schedule(void);
+void mali_pp_scheduler_job_done(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool success);
+
+void mali_pp_scheduler_suspend(void);
+void mali_pp_scheduler_resume(void);
+
+/** @brief Abort all PP jobs from session running or queued
+ *
+ * This functions aborts all PP jobs from the specified session. Queued jobs are removed from the queue and jobs
+ * currently running on a core will be aborted.
+ *
+ * @param session Pointer to session whose jobs should be aborted
+ */
+void mali_pp_scheduler_abort_session(struct mali_session_data *session);
+
+u32 mali_pp_scheduler_dump_state(char *buf, u32 size);
+
+#endif /* __MALI_PP_SCHEDULER_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_scheduler.c b/drivers/media/video/samsung/mali/common/mali_scheduler.c
new file mode 100644
index 0000000..f360209
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_scheduler.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+
+static _mali_osk_atomic_t mali_job_autonumber;
+
+_mali_osk_errcode_t mali_scheduler_initialize(void)
+{
+ if ( _MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_autonumber, 0))
+ {
+ MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_scheduler_terminate(void)
+{
+ _mali_osk_atomic_term(&mali_job_autonumber);
+}
+
+u32 mali_scheduler_get_new_id(void)
+{
+ u32 job_id = _mali_osk_atomic_inc_return(&mali_job_autonumber);
+ return job_id;
+}
+
diff --git a/drivers/media/video/samsung/mali/common/mali_scheduler.h b/drivers/media/video/samsung/mali/common/mali_scheduler.h
new file mode 100644
index 0000000..74f0947
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_scheduler.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SCHEDULER_H__
+#define __MALI_SCHEDULER_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_scheduler_initialize(void);
+void mali_scheduler_terminate(void);
+
+u32 mali_scheduler_get_new_id(void);
+
+#endif /* __MALI_SCHEDULER_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_session.c b/drivers/media/video/samsung/mali/common/mali_session.c
new file mode 100644
index 0000000..2394bb9
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_session.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_session.h"
+
+_MALI_OSK_LIST_HEAD(mali_sessions);
+
+_mali_osk_lock_t *mali_sessions_lock;
+
+_mali_osk_errcode_t mali_session_initialize(void)
+{
+ _MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
+
+ mali_sessions_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_READERWRITER | _MALI_OSK_LOCKFLAG_ORDERED, 0, _MALI_OSK_LOCK_ORDER_SESSIONS);
+
+ if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_session_terminate(void)
+{
+ _mali_osk_lock_term(mali_sessions_lock);
+}
+
+void mali_session_add(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_add(&session->link, &mali_sessions);
+ mali_session_unlock();
+}
+
+void mali_session_remove(struct mali_session_data *session)
+{
+ mali_session_lock();
+ _mali_osk_list_delinit(&session->link);
+ mali_session_unlock();
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_session.h b/drivers/media/video/samsung/mali/common/mali_session.h
new file mode 100644
index 0000000..c8640b5
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_session.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_SESSION_H__
+#define __MALI_SESSION_H__
+
+#include "mali_mmu_page_directory.h"
+#include "mali_kernel_descriptor_mapping.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+
+struct mali_session_data
+{
+ _mali_osk_notification_queue_t * ioctl_queue;
+
+ _mali_osk_lock_t *memory_lock; /**< Lock protecting the vm manipulation */
+ mali_descriptor_mapping * descriptor_mapping; /**< Mapping between userspace descriptors and our pointers */
+ _mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
+
+ struct mali_page_directory *page_directory; /**< MMU page directory for this session */
+
+ _MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
+};
+
+_mali_osk_errcode_t mali_session_initialize(void);
+void mali_session_terminate(void);
+
+/* List of all sessions. Actual list head in mali_kernel_core.c */
+extern _mali_osk_list_t mali_sessions;
+/* Lock to protect modification and access to the mali_sessions list */
+extern _mali_osk_lock_t *mali_sessions_lock;
+
+MALI_STATIC_INLINE void mali_session_lock(void)
+{
+ _mali_osk_lock_wait(mali_sessions_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+MALI_STATIC_INLINE void mali_session_unlock(void)
+{
+ _mali_osk_lock_signal(mali_sessions_lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+void mali_session_add(struct mali_session_data *session);
+void mali_session_remove(struct mali_session_data *session);
+#define MALI_SESSION_FOREACH(session, tmp, link) \
+ _MALI_OSK_LIST_FOREACHENTRY(session, tmp, &mali_sessions, struct mali_session_data, link)
+
+MALI_STATIC_INLINE struct mali_page_directory *mali_session_get_page_directory(struct mali_session_data *session)
+{
+ return session->page_directory;
+}
+
+MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
+{
+ _mali_osk_notification_queue_send(session->ioctl_queue, object);
+}
+
+#endif /* __MALI_SESSION_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_ukk.h b/drivers/media/video/samsung/mali/common/mali_ukk.h
new file mode 100644
index 0000000..6b018d0
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_ukk.h
@@ -0,0 +1,612 @@
+/*
+ * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_ukk.h
+ * Defines the kernel-side interface of the user-kernel interface
+ */
+
+#ifndef __MALI_UKK_H__
+#define __MALI_UKK_H__
+
+#include "mali_osk.h"
+#include "mali_uk_types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * @addtogroup uddapi Unified Device Driver (UDD) APIs
+ *
+ * @{
+ */
+
+/**
+ * @addtogroup u_k_api UDD User/Kernel Interface (U/K) APIs
+ *
+ * - The _mali_uk functions are an abstraction of the interface to the device
+ * driver. On certain OSs, this would be implemented via the IOCTL interface.
+ * On other OSs, it could be via extension of some Device Driver Class, or
+ * direct function call for Bare metal/RTOSs.
+ * - It is important to note that:
+ * - The Device Driver has implemented the _mali_ukk set of functions
+ * - The Base Driver calls the corresponding set of _mali_uku functions.
+ * - What requires porting is solely the calling mechanism from User-side to
+ * Kernel-side, and propagating back the results.
+ * - Each U/K function is associated with a (group, number) pair from
+ * \ref _mali_uk_functions to make it possible for a common function in the
+ * Base Driver and Device Driver to route User/Kernel calls from/to the
+ * correct _mali_uk function. For example, in an IOCTL system, the IOCTL number
+ * would be formed based on the group and number assigned to the _mali_uk
+ * function, as listed in \ref _mali_uk_functions. On the user-side, each
+ * _mali_uku function would just make an IOCTL with the IOCTL-code being an
+ * encoded form of the (group, number) pair. On the kernel-side, the Device
+ * Driver's IOCTL handler decodes the IOCTL-code back into a (group, number)
+ * pair, and uses this to determine which corresponding _mali_ukk should be
+ * called.
+ * - Refer to \ref _mali_uk_functions for more information about this
+ * (group, number) pairing.
+ * - In a system where there is no distinction between user and kernel-side,
+ * the U/K interface may be implemented as:@code
+ * MALI_STATIC_INLINE _mali_osk_errcode_t _mali_uku_examplefunction( _mali_uk_examplefunction_s *args )
+ * {
+ * return mali_ukk_examplefunction( args );
+ * }
+ * @endcode
+ * - Therefore, all U/K calls behave \em as \em though they were direct
+ * function calls (but the \b implementation \em need \em not be a direct
+ * function calls)
+ *
+ * @note Naming the _mali_uk functions the same on both User and Kernel sides
+ * on non-RTOS systems causes debugging issues when setting breakpoints. In
+ * this case, it is not clear which function the breakpoint is put on.
+ * Therefore the _mali_uk functions in user space are prefixed with \c _mali_uku
+ * and in kernel space with \c _mali_ukk. The naming for the argument
+ * structures is unaffected.
+ *
+ * - The _mali_uk functions are synchronous.
+ * - Arguments to the _mali_uk functions are passed in a structure. The only
+ * parameter passed to the _mali_uk functions is a pointer to this structure.
+ * This first member of this structure, ctx, is a pointer to a context returned
+ * by _mali_uku_open(). For example:@code
+ * typedef struct
+ * {
+ * void *ctx;
+ * u32 number_of_cores;
+ * } _mali_uk_get_gp_number_of_cores_s;
+ * @endcode
+ *
+ * - Each _mali_uk function has its own argument structure named after the
+ * function. The argument is distinguished by the _s suffix.
+ * - The argument types are defined by the base driver and user-kernel
+ * interface.
+ * - All _mali_uk functions return a standard \ref _mali_osk_errcode_t.
+ * - Only arguments of type input or input/output need be initialized before
+ * calling a _mali_uk function.
+ * - Arguments of type output and input/output are only valid when the
+ * _mali_uk function returns \ref _MALI_OSK_ERR_OK.
+ * - The \c ctx member is always invalid after it has been used by a
+ * _mali_uk function, except for the context management functions
+ *
+ *
+ * \b Interface \b restrictions
+ *
+ * The requirements of the interface mean that an implementation of the
+ * User-kernel interface may do no 'real' work. For example, the following are
+ * illegal in the User-kernel implementation:
+ * - Calling functions necessary for operation on all systems, which would
+ * not otherwise get called on RTOS systems.
+ * - For example, a U/K interface that calls multiple _mali_ukk functions
+ * during one particular U/K call. This could not be achieved by the same code
+ * which uses direct function calls for the U/K interface.
+ * - Writing in values to the args members, when otherwise these members would
+ * not hold a useful value for a direct function call U/K interface.
+ * - For example, U/K interface implementation that take NULL members in
+ * their arguments structure from the user side, but those members are
+ * replaced with non-NULL values in the kernel-side of the U/K interface
+ * implementation. A scratch area for writing data is one such example. In this
+ * case, a direct function call U/K interface would segfault, because no code
+ * would be present to replace the NULL pointer with a meaningful pointer.
+ * - Note that we discourage the case where the U/K implementation changes
+ * a NULL argument member to non-NULL, and then the Device Driver code (outside
+ * of the U/K layer) re-checks this member for NULL, and corrects it when
+ * necessary. Whilst such code works even on direct function call U/K
+ * intefaces, it reduces the testing coverage of the Device Driver code. This
+ * is because we have no way of testing the NULL == value path on an OS
+ * implementation.
+ *
+ * A number of allowable examples exist where U/K interfaces do 'real' work:
+ * - The 'pointer switching' technique for \ref _mali_ukk_get_system_info
+ * - In this case, without the pointer switching on direct function call
+ * U/K interface, the Device Driver code still sees the same thing: a pointer
+ * to which it can write memory. This is because such a system has no
+ * distinction between a user and kernel pointer.
+ * - Writing an OS-specific value into the ukk_private member for
+ * _mali_ukk_mem_mmap().
+ * - In this case, this value is passed around by Device Driver code, but
+ * its actual value is never checked. Device Driver code simply passes it from
+ * the U/K layer to the OSK layer, where it can be acted upon. In this case,
+ * \em some OS implementations of the U/K (_mali_ukk_mem_mmap()) and OSK
+ * (_mali_osk_mem_mapregion_init()) functions will collaborate on the
+ * meaning of ukk_private member. On other OSs, it may be unused by both
+ * U/K and OSK layers
+ * - Therefore, on error inside the U/K interface implementation itself,
+ * it will be as though the _mali_ukk function itself had failed, and cleaned
+ * up after itself.
+ * - Compare this to a direct function call U/K implementation, where all
+ * error cleanup is handled by the _mali_ukk function itself. The direct
+ * function call U/K interface implementation is automatically atomic.
+ *
+ * The last example highlights a consequence of all U/K interface
+ * implementations: they must be atomic with respect to the Device Driver code.
+ * And therefore, should Device Driver code succeed but the U/K implementation
+ * fail afterwards (but before return to user-space), then the U/K
+ * implementation must cause appropriate cleanup actions to preserve the
+ * atomicity of the interface.
+ *
+ * @{
+ */
+
+
+/** @defgroup _mali_uk_context U/K Context management
+ *
+ * These functions allow for initialisation of the user-kernel interface once per process.
+ *
+ * Generally the context will store the OS specific object to communicate with the kernel device driver and further
+ * state information required by the specific implementation. The context is shareable among all threads in the caller process.
+ *
+ * On IOCTL systems, this is likely to be a file descriptor as a result of opening the kernel device driver.
+ *
+ * On a bare-metal/RTOS system with no distinction between kernel and
+ * user-space, the U/K interface simply calls the _mali_ukk variant of the
+ * function by direct function call. In this case, the context returned is the
+ * mali_session_data from _mali_ukk_open().
+ *
+ * The kernel side implementations of the U/K interface expect the first member of the argument structure to
+ * be the context created by _mali_uku_open(). On some OS implementations, the meaning of this context
+ * will be different between user-side and kernel-side. In which case, the kernel-side will need to replace this context
+ * with the kernel-side equivalent, because user-side will not have access to kernel-side data. The context parameter
+ * in the argument structure therefore has to be of type input/output.
+ *
+ * It should be noted that the caller cannot reuse the \c ctx member of U/K
+ * argument structure after a U/K call, because it may be overwritten. Instead,
+ * the context handle must always be stored elsewhere, and copied into
+ * the appropriate U/K argument structure for each user-side call to
+ * the U/K interface. This is not usually a problem, since U/K argument
+ * structures are usually placed on the stack.
+ *
+ * @{ */
+
+/** @brief Begin a new Mali Device Driver session
+ *
+ * This is used to obtain a per-process context handle for all future U/K calls.
+ *
+ * @param context pointer to storage to return a (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_open( void **context );
+
+/** @brief End a Mali Device Driver session
+ *
+ * This should be called when the process no longer requires use of the Mali Device Driver.
+ *
+ * The context handle must not be used after it has been closed.
+ *
+ * @param context pointer to a stored (void*)context handle.
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_close( void **context );
+
+/** @} */ /* end group _mali_uk_context */
+
+
+/** @addtogroup _mali_uk_core U/K Core
+ *
+ * The core functions provide the following functionality:
+ * - verify that the user and kernel API are compatible
+ * - retrieve information about the cores and memory banks in the system
+ * - wait for the result of jobs started on a core
+ *
+ * @{ */
+
+/** @brief Waits for a job notification.
+ *
+ * Sleeps until notified or a timeout occurs. Returns information about the notification.
+ *
+ * @param args see _mali_uk_wait_for_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_wait_for_notification( _mali_uk_wait_for_notification_s *args );
+
+/** @brief Post a notification to the notification queue of this application.
+ *
+ * @param args see _mali_uk_post_notification_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_post_notification( _mali_uk_post_notification_s *args );
+
+/** @brief Verifies if the user and kernel side of this API are compatible.
+ *
+ * @param args see _mali_uk_get_api_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_api_version( _mali_uk_get_api_version_s *args );
+
+/** @brief Get the user space settings applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_settings_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args);
+
+/** @brief Get a user space setting applicable for calling process.
+ *
+ * @param args see _mali_uk_get_user_setting_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args);
+
+/** @} */ /* end group _mali_uk_core */
+
+
+/** @addtogroup _mali_uk_memory U/K Memory
+ *
+ * The memory functions provide functionality with and without a Mali-MMU present.
+ *
+ * For Mali-MMU based systems, the following functionality is provided:
+ * - Initialize and terminate MALI virtual address space
+ * - Allocate/deallocate physical memory to a MALI virtual address range and map into/unmap from the
+ * current process address space
+ * - Map/unmap external physical memory into the MALI virtual address range
+ *
+ * For Mali-nonMMU based systems:
+ * - Allocate/deallocate MALI memory
+ *
+ * @{ */
+
+/**
+ * @brief Initialize the Mali-MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called before any
+ * other functions in the \ref _mali_uk_memory group are called.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_init_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_init_mem( _mali_uk_init_mem_s *args );
+
+/**
+ * @brief Terminate the MMU Memory system
+ *
+ * For Mali-MMU builds of the drivers, this function must be called when
+ * functions in the \ref _mali_uk_memory group will no longer be called. This
+ * function must be called before the application terminates.
+ *
+ * @note This function is for Mali-MMU builds \b only. It should not be called
+ * when the drivers are built without Mali-MMU support.
+ *
+ * @param args see \ref _mali_uk_term_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable
+ * _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_term_mem( _mali_uk_term_mem_s *args );
+
+/** @brief Map Mali Memory into the current user process
+ *
+ * Maps Mali memory into the current user process in a generic way.
+ *
+ * This function is to be used for Mali-MMU mode. The function is available in both Mali-MMU and Mali-nonMMU modes,
+ * but should not be called by a user process in Mali-nonMMU mode.
+ *
+ * The implementation and operation of _mali_ukk_mem_mmap() is dependant on whether the driver is built for Mali-MMU
+ * or Mali-nonMMU:
+ * - In the nonMMU case, _mali_ukk_mem_mmap() requires a physical address to be specified. For this reason, an OS U/K
+ * implementation should not allow this to be called from user-space. In any case, nonMMU implementations are
+ * inherently insecure, and so the overall impact is minimal. Mali-MMU mode should be used if security is desired.
+ * - In the MMU case, _mali_ukk_mem_mmap() the _mali_uk_mem_mmap_s::phys_addr
+ * member is used for the \em Mali-virtual address desired for the mapping. The
+ * implementation of _mali_ukk_mem_mmap() will allocate both the CPU-virtual
+ * and CPU-physical addresses, and can cope with mapping a contiguous virtual
+ * address range to a sequence of non-contiguous physical pages. In this case,
+ * the CPU-physical addresses are not communicated back to the user-side, as
+ * they are unnecsessary; the \em Mali-virtual address range must be used for
+ * programming Mali structures.
+ *
+ * In the second (MMU) case, _mali_ukk_mem_mmap() handles management of
+ * CPU-virtual and CPU-physical ranges, but the \em caller must manage the
+ * \em Mali-virtual address range from the user-side.
+ *
+ * @note Mali-virtual address ranges are entirely separate between processes.
+ * It is not possible for a process to accidentally corrupt another process'
+ * \em Mali-virtual address space.
+ *
+ * @param args see _mali_uk_mem_mmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_mmap( _mali_uk_mem_mmap_s *args );
+
+/** @brief Unmap Mali Memory from the current user process
+ *
+ * Unmaps Mali memory from the current user process in a generic way. This only operates on Mali memory supplied
+ * from _mali_ukk_mem_mmap().
+ *
+ * @param args see _mali_uk_mem_munmap_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_mem_munmap( _mali_uk_mem_munmap_s *args );
+
+/** @brief Determine the buffer size necessary for an MMU page table dump.
+ * @param args see _mali_uk_query_mmu_page_table_dump_size_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_query_mmu_page_table_dump_size( _mali_uk_query_mmu_page_table_dump_size_s *args );
+/** @brief Dump MMU Page tables.
+ * @param args see _mali_uk_dump_mmu_page_table_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_dump_mmu_page_table( _mali_uk_dump_mmu_page_table_s * args );
+
+/** @brief Map a physically contiguous range of memory into Mali
+ * @param args see _mali_uk_map_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_map_external_mem( _mali_uk_map_external_mem_s *args );
+
+/** @brief Unmap a physically contiguous range of memory from Mali
+ * @param args see _mali_uk_unmap_external_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_unmap_external_mem( _mali_uk_unmap_external_mem_s *args );
+
+#if MALI_USE_UNIFIED_MEMORY_PROVIDER != 0
+/** @brief Map UMP memory into Mali
+ * @param args see _mali_uk_attach_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_attach_ump_mem( _mali_uk_attach_ump_mem_s *args );
+/** @brief Unmap UMP memory from Mali
+ * @param args see _mali_uk_release_ump_mem_s in mali_utgard_uk_types.h
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_release_ump_mem( _mali_uk_release_ump_mem_s *args );
+#endif /* MALI_USE_UNIFIED_MEMORY_PROVIDER */
+
+/** @brief Determine virtual-to-physical mapping of a contiguous memory range
+ * (optional)
+ *
+ * This allows the user-side to do a virtual-to-physical address translation.
+ * In conjunction with _mali_uku_map_external_mem, this can be used to do
+ * direct rendering.
+ *
+ * This function will only succeed on a virtual range that is mapped into the
+ * current process, and that is contigious.
+ *
+ * If va is not page-aligned, then it is rounded down to the next page
+ * boundary. The remainer is added to size, such that ((u32)va)+size before
+ * rounding is equal to ((u32)va)+size after rounding. The rounded modified
+ * va and size will be written out into args on success.
+ *
+ * If the supplied size is zero, or not a multiple of the system's PAGE_SIZE,
+ * then size will be rounded up to the next multiple of PAGE_SIZE before
+ * translation occurs. The rounded up size will be written out into args on
+ * success.
+ *
+ * On most OSs, virtual-to-physical address translation is a priveledged
+ * function. Therefore, the implementer must validate the range supplied, to
+ * ensure they are not providing arbitrary virtual-to-physical address
+ * translations. While it is unlikely such a mechanism could be used to
+ * compromise the security of a system on its own, it is possible it could be
+ * combined with another small security risk to cause a much larger security
+ * risk.
+ *
+ * @note This is an optional part of the interface, and is only used by certain
+ * implementations of libEGL. If the platform layer in your libEGL
+ * implementation does not require Virtual-to-Physical address translation,
+ * then this function need not be implemented. A stub implementation should not
+ * be required either, as it would only be removed by the compiler's dead code
+ * elimination.
+ *
+ * @note if implemented, this function is entirely platform-dependant, and does
+ * not exist in common code.
+ *
+ * @param args see _mali_uk_va_to_mali_pa_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_va_to_mali_pa( _mali_uk_va_to_mali_pa_s * args );
+
+/** @} */ /* end group _mali_uk_memory */
+
+
+/** @addtogroup _mali_uk_pp U/K Fragment Processor
+ *
+ * The Fragment Processor (aka PP (Pixel Processor)) functions provide the following functionality:
+ * - retrieving version of the fragment processors
+ * - determine number of fragment processors
+ * - starting a job on a fragment processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Fragment Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started instead and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_pp_start_job_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_pp_start_job( _mali_uk_pp_start_job_s *args );
+
+/** @brief Returns the number of Fragment Processors in the system
+ *
+ * @param args see _mali_uk_get_pp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores( _mali_uk_get_pp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Fragment Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_ukk_get_pp_number_of_cores() indicated at least one Fragment
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version( _mali_uk_get_pp_core_version_s *args );
+
+/** @brief Disable Write-back unit(s) on specified job
+ *
+ * @param args see _mali_uk_get_pp_core_version_s in "mali_utgard_uk_types.h"
+ */
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args);
+
+
+/** @} */ /* end group _mali_uk_pp */
+
+
+/** @addtogroup _mali_uk_gp U/K Vertex Processor
+ *
+ * The Vertex Processor (aka GP (Geometry Processor)) functions provide the following functionality:
+ * - retrieving version of the Vertex Processors
+ * - determine number of Vertex Processors available
+ * - starting a job on a Vertex Processor
+ *
+ * @{ */
+
+/** @brief Issue a request to start a new job on a Vertex Processor.
+ *
+ * If the request fails args->status is set to _MALI_UK_START_JOB_NOT_STARTED_DO_REQUEUE and you can
+ * try to start the job again.
+ *
+ * An existing job could be returned for requeueing if the new job has a higher priority than a previously started job
+ * which the hardware hasn't actually started processing yet. In this case the new job will be started and the
+ * existing one returned, otherwise the new job is started and the status field args->status is set to
+ * _MALI_UK_START_JOB_STARTED.
+ *
+ * Job completion can be awaited with _mali_ukk_wait_for_notification().
+ *
+ * @param args see _mali_uk_gp_start_job_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_start_job( _mali_uk_gp_start_job_s *args );
+
+/** @brief Returns the number of Vertex Processors in the system.
+ *
+ * @param args see _mali_uk_get_gp_number_of_cores_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores( _mali_uk_get_gp_number_of_cores_s *args );
+
+/** @brief Returns the version that all Vertex Processor cores are compatible with.
+ *
+ * This function may only be called when _mali_uk_get_gp_number_of_cores() indicated at least one Vertex
+ * Processor core is available.
+ *
+ * @param args see _mali_uk_get_gp_core_version_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version( _mali_uk_get_gp_core_version_s *args );
+
+/** @brief Resume or abort suspended Vertex Processor jobs.
+ *
+ * After receiving notification that a Vertex Processor job was suspended from
+ * _mali_ukk_wait_for_notification() you can use this function to resume or abort the job.
+ *
+ * @param args see _mali_uk_gp_suspend_response_s in "mali_utgard_uk_types.h"
+ * @return _MALI_OSK_ERR_OK on success, otherwise a suitable _mali_osk_errcode_t on failure.
+ */
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response( _mali_uk_gp_suspend_response_s *args );
+
+/** @} */ /* end group _mali_uk_gp */
+
+#if MALI_TIMELINE_PROFILING_ENABLED
+/** @addtogroup _mali_uk_profiling U/K Timeline profiling module
+ * @{ */
+
+/** @brief Start recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_start_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args);
+
+/** @brief Add event to profiling buffer.
+ *
+ * @param args see _mali_uk_profiling_add_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args);
+
+/** @brief Stop recording profiling events.
+ *
+ * @param args see _mali_uk_profiling_stop_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args);
+
+/** @brief Retrieve a recorded profiling event.
+ *
+ * @param args see _mali_uk_profiling_get_event_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args);
+
+/** @brief Clear recorded profiling events.
+ *
+ * @param args see _mali_uk_profiling_clear_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args);
+
+/** @} */ /* end group _mali_uk_profiling */
+#endif
+
+/** @addtogroup _mali_uk_vsync U/K VSYNC reporting module
+ * @{ */
+
+/** @brief Report events related to vsync.
+ *
+ * @note Events should be reported when starting to wait for vsync and when the
+ * waiting is finished. This information can then be used in kernel space to
+ * complement the GPU utilization metric.
+ *
+ * @param args see _mali_uk_vsync_event_report_s in "mali_utgard_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args);
+
+/** @} */ /* end group _mali_uk_vsync */
+
+/** @addtogroup _mali_sw_counters_report U/K Software counter reporting
+ * @{ */
+
+/** @brief Report software counters.
+ *
+ * @param args see _mali_uk_sw_counters_report_s in "mali_uk_types.h"
+ */
+_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args);
+
+/** @} */ /* end group _mali_sw_counters_report */
+
+/** @} */ /* end group u_k_api */
+
+/** @} */ /* end group uddapi */
+
+u32 _mali_ukk_report_memory_usage(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __MALI_UKK_H__ */
diff --git a/drivers/media/video/samsung/mali/common/mali_user_settings_db.c b/drivers/media/video/samsung/mali/common/mali_user_settings_db.c
new file mode 100644
index 0000000..d3f1e50
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_user_settings_db.c
@@ -0,0 +1,88 @@
+/**
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_uk_types.h"
+#include "mali_user_settings_db.h"
+#include "mali_session.h"
+
+static u32 mali_user_settings[_MALI_UK_USER_SETTING_MAX];
+const char *_mali_uk_user_setting_descriptions[] = _MALI_UK_USER_SETTING_DESCRIPTIONS;
+
+static void mali_user_settings_notify(_mali_uk_user_setting_t setting, u32 value)
+{
+ struct mali_session_data *session, *tmp;
+
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link)
+ {
+ _mali_osk_notification_t *notobj = _mali_osk_notification_create(_MALI_NOTIFICATION_SETTINGS_CHANGED, sizeof(_mali_uk_settings_changed_s));
+ _mali_uk_settings_changed_s *data = notobj->result_buffer;
+ data->setting = setting;
+ data->value = value;
+
+ mali_session_send_notification(session, notobj);
+ }
+ mali_session_unlock();
+}
+
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value)
+{
+ mali_bool notify = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT(setting < _MALI_UK_USER_SETTING_MAX && setting >= 0);
+
+ if (mali_user_settings[setting] != value)
+ {
+ notify = MALI_TRUE;
+ }
+
+ mali_user_settings[setting] = value;
+
+ if (notify)
+ {
+ mali_user_settings_notify(setting, value);
+ }
+}
+
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting)
+{
+ MALI_DEBUG_ASSERT(setting < _MALI_UK_USER_SETTING_MAX && setting >= 0);
+
+ return mali_user_settings[setting];
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_setting(_mali_uk_get_user_setting_s *args)
+{
+ _mali_uk_user_setting_t setting;
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ setting = args->setting;
+
+ if (0 <= setting && _MALI_UK_USER_SETTING_MAX > setting)
+ {
+ args->value = mali_user_settings[setting];
+ return _MALI_OSK_ERR_OK;
+ }
+ else
+ {
+ return _MALI_OSK_ERR_INVALID_ARGS;
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_get_user_settings(_mali_uk_get_user_settings_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+
+ _mali_osk_memcpy(args->settings, mali_user_settings, sizeof(mali_user_settings));
+
+ return _MALI_OSK_ERR_OK;
+}
diff --git a/drivers/media/video/samsung/mali/common/mali_user_settings_db.h b/drivers/media/video/samsung/mali/common/mali_user_settings_db.h
new file mode 100644
index 0000000..fbb9415
--- /dev/null
+++ b/drivers/media/video/samsung/mali/common/mali_user_settings_db.h
@@ -0,0 +1,40 @@
+/**
+ * Copyright (C) 2012 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_USER_SETTINGS_DB_H__
+#define __MALI_USER_SETTINGS_DB_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#include "mali_uk_types.h"
+
+/** @brief Set Mali user setting in DB
+ *
+ * Update the DB with a new value for \a setting. If the value is different from theprevious set value running sessions will be notified of the change.
+ *
+ * @param setting the setting to be changed
+ * @param value the new value to set
+ */
+void mali_set_user_setting(_mali_uk_user_setting_t setting, u32 value);
+
+/** @brief Get current Mali user setting value from DB
+ *
+ * @param setting the setting to extract
+ * @return the value of the selected setting
+ */
+u32 mali_get_user_setting(_mali_uk_user_setting_t setting);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __MALI_KERNEL_USER_SETTING__ */