aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/mali400/r3p2/ump/linux/ump_osk_low_level_mem.c
blob: 5fd85a6fc6d65480e0f437b635106c27b83af07c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
/*
 * Copyright (C) 2010-2012 ARM Limited. All rights reserved.
 * 
 * This program is free software and is provided to you under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
 * 
 * A copy of the licence is included with the program, and can also be obtained from Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */

/**
 * @file ump_osk_memory.c
 * Implementation of the OS abstraction layer for the kernel device driver
 */

/* needed to detect kernel version specific code */
#include <linux/version.h>

#include "ump_osk.h"
#include "ump_uk_types.h"
#include "ump_ukk.h"
#include "ump_kernel_common.h"
#include <linux/module.h>            /* kernel module definitions */
#include <linux/kernel.h>
#include <linux/mm.h>
/* MALI_SEC */
#include <linux/sched.h>
#include <linux/slab.h>

#include <asm/memory.h>
#include <asm/uaccess.h>			/* to verify pointers from user space */
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>

typedef struct ump_vma_usage_tracker
{
	atomic_t references;
	ump_memory_allocation *descriptor;
} ump_vma_usage_tracker;

static void ump_vma_open(struct vm_area_struct * vma);
static void ump_vma_close(struct vm_area_struct * vma);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
#else
static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
#endif

static struct vm_operations_struct ump_vm_ops =
{
	.open = ump_vma_open,
	.close = ump_vma_close,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
	.fault = ump_cpu_page_fault_handler
#else
	.nopfn = ump_cpu_page_fault_handler
#endif
};

/*
 * Page fault for VMA region
 * This should never happen since we always map in the entire virtual memory range.
 */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
#else
static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
#endif
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
	void __user * address;
	address = vmf->virtual_address;
#endif
	MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
	MSG_ERR(("VMA: 0x%08lx, virtual address: 0x%08lx\n", (unsigned long)vma, address));

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
	return VM_FAULT_SIGBUS;
#else
	return NOPFN_SIGBUS;
#endif
}

static void ump_vma_open(struct vm_area_struct * vma)
{
	ump_vma_usage_tracker * vma_usage_tracker;
	int new_val;

	vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
	BUG_ON(NULL == vma_usage_tracker);

	new_val = atomic_inc_return(&vma_usage_tracker->references);

	DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
}

static void ump_vma_close(struct vm_area_struct * vma)
{
	ump_vma_usage_tracker * vma_usage_tracker;
	_ump_uk_unmap_mem_s args;
	int new_val;

	vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
	BUG_ON(NULL == vma_usage_tracker);

	new_val = atomic_dec_return(&vma_usage_tracker->references);

	DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));

	if (0 == new_val)
	{
		ump_memory_allocation * descriptor;

		descriptor = vma_usage_tracker->descriptor;

		args.ctx = descriptor->ump_session;
		args.cookie = descriptor->cookie;
		args.mapping = descriptor->mapping;
		args.size = descriptor->size;

		args._ukk_private = NULL; /** @note unused */

		DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
		_ump_ukk_unmap_mem( & args );

		/* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
	}
}

_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
{
	ump_vma_usage_tracker * vma_usage_tracker;
	struct vm_area_struct *vma;

	if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;

	vma_usage_tracker = kmalloc(sizeof(ump_vma_usage_tracker), GFP_KERNEL);
	if (NULL == vma_usage_tracker)
	{
		DBG_MSG(1, ("Failed to allocate memory for ump_vma_usage_tracker in _mali_osk_mem_mapregion_init\n"));
		return -_MALI_OSK_ERR_FAULT;
	}

	vma = (struct vm_area_struct*)descriptor->process_mapping_info;
	if (NULL == vma )
	{
		kfree(vma_usage_tracker);
		return _MALI_OSK_ERR_FAULT;
	}

	vma->vm_private_data = vma_usage_tracker;
	vma->vm_flags |= VM_IO;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)
	vma->vm_flags |= VM_RESERVED;
#else
	vma->vm_flags |= VM_DONTDUMP;
	vma->vm_flags |= VM_DONTEXPAND;
	vma->vm_flags |= VM_PFNMAP;
#endif


	if (0==descriptor->is_cached)
	{
		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
	}
	DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));

	/* Setup the functions which handle further VMA handling */
	vma->vm_ops = &ump_vm_ops;

	/* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
	descriptor->mapping = (void __user*)vma->vm_start;

	atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
	vma_usage_tracker->descriptor = descriptor;

	return _MALI_OSK_ERR_OK;
}

void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
{
	struct vm_area_struct* vma;
	ump_vma_usage_tracker * vma_usage_tracker;

	if (NULL == descriptor) return;

	/* Linux does the right thing as part of munmap to remove the mapping
	 * All that remains is that we remove the vma_usage_tracker setup in init() */
	vma = (struct vm_area_struct*)descriptor->process_mapping_info;

	vma_usage_tracker = vma->vm_private_data;

	/* We only get called if mem_mapregion_init succeeded */
	kfree(vma_usage_tracker);
	return;
}

_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
{
	struct vm_area_struct *vma;
	_mali_osk_errcode_t retval;

	if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;

	vma = (struct vm_area_struct*)descriptor->process_mapping_info;

	if (NULL == vma ) return _MALI_OSK_ERR_FAULT;

	retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;

		DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
		        ump_dd_secure_id_get(descriptor->handle),
		        (unsigned long)vma,
		        (unsigned long)(vma->vm_start + offset),
		        (unsigned long)*phys_addr,
		        size,
		        (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));

	return retval;
}

/* MALI_SEC */
static u32 _ump_osk_virt_to_phys_start(ump_dd_mem * mem, u32 start, u32 address, int *index)
{
	int i;
	u32 offset = address - start;
	ump_dd_physical_block *block;
	u32 sum = 0;

	for (i=0; i<mem->nr_blocks; i++) {
		block = &mem->block_array[i];
		sum += block->size;
		if (sum > offset) {
			*index = i;
			DBG_MSG(3, ("_ump_osk_virt_to_phys : index : %d, virtual 0x%x, phys 0x%x\n", i, address, (u32)block->addr + offset - (sum -block->size)));
			return (u32)block->addr + offset - (sum -block->size);
		}
	}

	return _MALI_OSK_ERR_FAULT;
}

/* MALI_SEC */
static u32 _ump_osk_virt_to_phys_end(ump_dd_mem * mem, u32 start, u32 address, int *index)
{
	int i;
	u32 offset = address - start;
	ump_dd_physical_block *block;
	u32 sum = 0;

	for (i=0; i<mem->nr_blocks; i++) {
		block = &mem->block_array[i];
		sum += block->size;
		if (sum >= offset) {
			*index = i;
			DBG_MSG(3, ("_ump_osk_virt_to_phys : index : %d, virtual 0x%x, phys 0x%x\n", i, address, (u32)block->addr + offset - (sum -block->size)));
			return (u32)block->addr + offset - (sum -block->size);
		}
	}

	return _MALI_OSK_ERR_FAULT;
}

/* MALI_SEC */
static void _ump_osk_msync_with_virt(ump_dd_mem * mem, ump_uk_msync_op op, u32 start, u32 address, u32 size)
{
	int start_index, end_index;
	u32 start_p, end_p;

	DBG_MSG(3, ("Cache flush with user virtual address. start : 0x%x, end : 0x%x, address 0x%x, size 0x%x\n", start, start+mem->size_bytes, address, size));

	start_p = _ump_osk_virt_to_phys_start(mem, start, address, &start_index);
	end_p = _ump_osk_virt_to_phys_end(mem, start, address+size, &end_index);

	if (start_index==end_index) {
		if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE)
			outer_flush_range(start_p, end_p);
		else
			outer_clean_range(start_p, end_p);
	} else {
		ump_dd_physical_block *block;
		int i;

		for (i=start_index; i<=end_index; i++) {
			block = &mem->block_array[i];

			if (i == start_index) {
				if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
					outer_flush_range(start_p, block->addr+block->size);
				} else {
					outer_clean_range(start_p, block->addr+block->size);
				}
			}
			else if (i == end_index) {
				if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
					outer_flush_range(block->addr, end_p);
				} else {
					outer_clean_range(block->addr, end_p);
				}
				break;
			}
			else {
				if (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE) {
					outer_flush_range(block->addr, block->addr+block->size);
				} else {
					outer_clean_range(block->addr, block->addr+block->size);
				}
			}
		}
	}
	return;
}
/* The end of MALI_SEC */

static void level1_cache_flush_all(void)
{
	DBG_MSG(4, ("UMP[xx] Flushing complete L1 cache\n"));
	__cpuc_flush_kern_all();
}

void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data )
{
	int i;
	/* MALI_SEC */
	const void *start_v, *end_v;

	/* Flush L1 using virtual address, the entire range in one go.
	 * Only flush if user space process has a valid write mapping on given address. */
	if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) )
	{
		/* MALI_SEC */
		start_v = (void *)virt;
		end_v   = (void *)(start_v + size - 1);
		/*  There is no dmac_clean_range, so the L1 is always flushed,
		 *  also for UMP_MSYNC_CLEAN. */
		if (size >= SZ_64K)
			flush_all_cpu_caches();
		else
			dmac_flush_range(start_v, end_v);

		/* MALI ORIGINAL CODE */
		//__cpuc_flush_dcache_area(virt, size);

		DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. Cpu address: %x-%x\n", mem->secure_id, start_v,end_v));
	}
	else
	{
		if (session_data)
		{
			if (op == _UMP_UK_MSYNC_FLUSH_L1  )
			{
				DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
				session_data->has_pending_level1_cache_flush = 0;
				level1_cache_flush_all();
				return;
			}
			else
			{
				if (session_data->cache_operations_ongoing)
				{
					session_data->has_pending_level1_cache_flush++;
					DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) );
				}
				else
				{
					/* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
					level1_cache_flush_all();
				}
			}
		}
		else
		{
			DBG_MSG(4, ("Unkown state %s %d\n", __FUNCTION__, __LINE__));
			level1_cache_flush_all();
		}
	}

	if ( NULL == mem ) return;

	if ( mem->size_bytes==size)
	{
		DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id));
	}
	else
	{
		DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
	            mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
	}


	/* Flush L2 using physical addresses, block for block. */
	/* MALI_SEC */
	if ((virt!=NULL) && (mem->size_bytes >= SZ_1M)) {
		if (op == _UMP_UK_MSYNC_CLEAN)
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0)
			outer_clean_all();
#else
			outer_sync();
#endif
		else if ((op == _UMP_UK_MSYNC_INVALIDATE) || (op == _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE))
			outer_flush_all();
		return;
	}

	for (i=0 ; i < mem->nr_blocks; i++)
	{
		u32 start_p, end_p;
		ump_dd_physical_block *block;
		block = &mem->block_array[i];

		if(offset >= block->size)
		{
			offset -= block->size;
			continue;
		}

		if(offset)
		{
			start_p = (u32)block->addr + offset;
			/* We'll zero the offset later, after using it to calculate end_p. */
		}
		else
		{
			start_p = (u32)block->addr;
		}

		if(size < block->size - offset)
		{
			end_p = start_p + size - 1;
			size = 0;
		}
		else
		{
			if(offset)
			{
				end_p = start_p + (block->size - offset - 1);
				size -= block->size - offset;
				offset = 0;
			}
			else
			{
				end_p = start_p + block->size - 1;
				size -= block->size;
			}
		}

		switch(op)
		{
				case _UMP_UK_MSYNC_CLEAN:
						outer_clean_range(start_p, end_p);
						break;
				case _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE:
						outer_flush_range(start_p, end_p);
						break;
				case _UMP_UK_MSYNC_INVALIDATE:
						outer_inv_range(start_p, end_p);
						break;
				default:
						break;
		}

		if(0 == size)
		{
			/* Nothing left to flush. */
			break;
		}
	}

	return;
}

/* MALI_SEC */
void _ump_osk_mem_mapregion_get( ump_dd_mem ** mem, unsigned long vaddr)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	ump_vma_usage_tracker * vma_usage_tracker;
	ump_memory_allocation *descriptor;
	ump_dd_handle handle;

	DBG_MSG(3, ("_ump_osk_mem_mapregion_get: vaddr 0x%08lx\n", vaddr));

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, vaddr);
	up_read(&mm->mmap_sem);
	if(!vma)
	{
		DBG_MSG(3, ("Not found VMA\n"));
		*mem = NULL;
		return;
	}
	DBG_MSG(4, ("Get vma: 0x%08lx vma->vm_start: 0x%08lx\n", (unsigned long)vma, vma->vm_start));

	vma_usage_tracker = (struct ump_vma_usage_tracker*)vma->vm_private_data;
	if(vma_usage_tracker == NULL)
	{
		DBG_MSG(3, ("Not found vma_usage_tracker\n"));
		*mem = NULL;
		return;
	}

	descriptor = (struct ump_memory_allocation*)vma_usage_tracker->descriptor;
	handle = (ump_dd_handle)descriptor->handle;

	DBG_MSG(3, ("Get handle: 0x%08lx\n", handle));
	*mem = (ump_dd_mem*)handle;
}