aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/exynos_tmp/exynos_drm_iommu.c
blob: f2ffa6817482e6e5bb9524a85c2a195295710531 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
/* exynos_drm_iommu.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 * Author: Inki Dae <inki.dae@samsung.com>
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include "drmP.h"
#include "drm.h"

#include <drm/exynos_drm.h>

#include <plat/s5p-iovmm.h>

#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"

static DEFINE_MUTEX(iommu_mutex);

struct exynos_iommu_ops {
	int (*setup)(struct device *dev);
	void (*cleanup)(struct device *dev);
	int (*activate)(struct device *dev);
	void (*deactivate)(struct device *dev);
	dma_addr_t (*map)(struct device *dev, struct scatterlist *sg,
				off_t offset, size_t size);
	void (*unmap)(struct device *dev, dma_addr_t iova);
};

static const struct exynos_iommu_ops iommu_ops = {
	.setup		= iovmm_setup,
	.cleanup	= iovmm_cleanup,
	.activate	= iovmm_activate,
	.deactivate	= iovmm_deactivate,
	.map		= iovmm_map,
	.unmap		= iovmm_unmap
};

static bool check_iommu_map_params(struct iommu_gem_map_params *params)
{
	if (!params) {
		DRM_ERROR("params is null.\n");
		return false;
	}

	if (!params->dev || !params->drm_dev || !params->file) {
		DRM_ERROR("invalid params.\n");
		return false;
	}

	return true;
}

void exynos_drm_remove_iommu_list(struct list_head *iommu_list,
					void *gem_obj)
{
	struct iommu_info_node *im, *t_im;

	list_for_each_entry_safe(im, t_im, iommu_list, list) {
		if (im->gem_obj == gem_obj) {
			list_del(&im->list);
			kfree(im);
			im = NULL;
			break;
		}
	}

}

dma_addr_t exynos_drm_iommu_map_gem(struct iommu_gem_map_params *params,
					struct list_head *iommu_list,
					unsigned int gem_handle,
					enum iommu_types type)
{
	struct sg_table *sgt;
	struct iommu_info_node *node;
	struct exynos_drm_gem_obj *obj;
	dma_addr_t dma_addr;

	if (!is_iommu_type_valid(type)) {
		DRM_ERROR("invalid iommu type.\n");
		return 0;
	}

	if (!check_iommu_map_params(params))
		return 0;

	/* get gem object from specific gem framework. */
	obj = exynos_drm_gem_get_obj(params->drm_dev, gem_handle,
					params->file);
	if (IS_ERR(obj))
		return 0;

	mutex_lock(&iommu_mutex);

	/*
	 * if this gem object had already been mapped to iommu then
	 * return dma address mapped before this time.
	 */
	if (obj->iommu_info.mapped & (1 << type)) {
		DRM_DEBUG_KMS("already mapped to iommu");
		mutex_unlock(&iommu_mutex);
		return obj->iommu_info.dma_addrs[type];
	}

	sgt = obj->buffer->sgt;

	/*
	 * if not using iommu, just return base address to physical
	 * memory region of the gem.
	 */
	if (!iommu_ops.map) {
		mutex_unlock(&iommu_mutex);
		return sg_dma_address(&sgt->sgl[0]);
	}
	mutex_unlock(&iommu_mutex);

	/*
	 * allocate device address space for this driver and then
	 * map all pages contained in sg list to iommu table.
	 */
	dma_addr = iommu_ops.map(params->dev, sgt->sgl, (off_t)0,
					(size_t)obj->size);
	if (!dma_addr) {
		mutex_unlock(&iommu_mutex);
		return dma_addr;
	}

	mutex_lock(&iommu_mutex);

	/*
	 * check map flag bit and device address mapped to iommu.
	 * this data would be used to avoid duplicated mapping.
	 */
	obj->iommu_info.mapped |= (1 << type);
	obj->iommu_info.dma_addrs[type] = dma_addr;
	obj->iommu_info.devs[type] = params->dev;
	obj->iommu_info.iommu_lists[type] = iommu_list;

	params->gem_obj = obj;

	/*
	 * this gem object is referenced by this driver so
	 * the object refcount should be increased.
	 */
	drm_gem_object_reference(&obj->base);

	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (!node) {
		DRM_ERROR("failed to allocate iommu node.\n");
		dma_addr = 0;
		goto err;
	}

	node->gem_obj = obj;
	node->dma_addr = dma_addr;
	mutex_unlock(&iommu_mutex);

	list_add_tail(&node->list, iommu_list);

	return dma_addr;
err:
	mutex_unlock(&iommu_mutex);
	iommu_ops.unmap(params->dev, dma_addr);
	return dma_addr;
}

void exynos_drm_iommu_unmap_gem(struct iommu_gem_map_params *params,
				dma_addr_t dma_addr,
				enum iommu_types type)
{
	struct exynos_drm_gem_obj *obj;

	if (!iommu_ops.unmap)
		return;

	if (!is_iommu_type_valid(type)) {
		DRM_ERROR("invalid iommu type.\n");
		return;
	}

	if (!check_iommu_map_params(params))
		return;

	if (!params->gem_obj)
		return;

	obj = (struct exynos_drm_gem_obj *)params->gem_obj;

	mutex_lock(&iommu_mutex);
	if (!(obj->iommu_info.mapped & (1 << type))) {
		DRM_DEBUG_KMS("not already mapped to iommu so just return\n");
		mutex_unlock(&iommu_mutex);
		return;
	}

	/* uncheck map flag bit. */
	obj->iommu_info.mapped &= ~(1 << type);
	obj->iommu_info.dma_addrs[type] = 0;
	mutex_unlock(&iommu_mutex);

	iommu_ops.unmap(params->dev, dma_addr);

	/*
	 * drop this gem object refcount to release allocated buffer
	 * and resources.
	 */
	drm_gem_object_unreference_unlocked(&obj->base);
}

dma_addr_t exynos_drm_iommu_map(struct device *dev, dma_addr_t paddr,
				size_t size)
{
	struct sg_table *sgt;
	struct scatterlist *sgl;
	dma_addr_t dma_addr = 0, tmp_addr;
	unsigned int npages, i = 0;
	int ret;

	 /* if not using iommu, just return paddr. */
	if (!iommu_ops.map)
		return paddr;

	npages = size >> PAGE_SHIFT;

	sgt = kzalloc(sizeof(struct sg_table) * npages, GFP_KERNEL);
	if (!sgt) {
		dev_err(dev, "failed to allocate sg table.\n");
		return dma_addr;
	}

	ret = sg_alloc_table(sgt, npages, GFP_KERNEL);
	if (ret < 0) {
		dev_err(dev, "failed to initialize sg table.\n");
		goto err;
	}

	sgl = sgt->sgl;
	tmp_addr = paddr;

	while (i < npages) {
		struct page *page = phys_to_page(tmp_addr);
		sg_set_page(sgl, page, PAGE_SIZE, 0);
		sg_dma_len(sgl) = PAGE_SIZE;
		tmp_addr += PAGE_SIZE;
		i++;
		sgl = sg_next(sgl);
	}

	/*
	 * allocate device address space for this driver and then
	 * map all pages contained in sg list to iommu table.
	 */
	dma_addr = iommu_ops.map(dev, sgt->sgl, (off_t)0, (size_t)size);
	if (!dma_addr)
		dev_err(dev, "failed to map cmdlist pool.\n");

	sg_free_table(sgt);
err:
	kfree(sgt);
	sgt = NULL;

	return dma_addr;
}


void exynos_drm_iommu_unmap(struct device *dev, dma_addr_t dma_addr)
{
	if (iommu_ops.unmap)
		iommu_ops.unmap(dev, dma_addr);
}

int exynos_drm_iommu_setup(struct device *dev)
{
	/*
	 * allocate device address space to this driver and add vmm object
	 * to s5p_iovmm_list. please know that each iommu will use
	 * 1GB as its own device address apace.
	 *
	 * the device address space : 0x80000000 ~ 0xA0000000
	 */
	if (iommu_ops.setup)
		return iommu_ops.setup(dev);

	return 0;
}

int exynos_drm_iommu_activate(struct device *dev)
{
	if (iommu_ops.activate)
		return iovmm_activate(dev);

	return 0;
}

void exynos_drm_iommu_deactivate(struct device *dev)
{
	if (iommu_ops.deactivate)
		iommu_ops.deactivate(dev);
}

void exynos_drm_iommu_cleanup(struct device *dev)
{
	if (iommu_ops.cleanup)
		iommu_ops.cleanup(dev);
}

MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DRM IOMMU Framework");
MODULE_LICENSE("GPL");