aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/exynos/exynos_drm_iommu.c
blob: b0a8e1cf4709ac8e7e7091e1638eb18f96ea295f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
/* exynos_drm_iommu.c
 *
 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
 * Author: Inki Dae <inki.dae@samsung.com>
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include "drmP.h"
#include "drm.h"

#include <drm/exynos_drm.h>

#include <plat/s5p-iovmm.h>

#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"

static DEFINE_MUTEX(iommu_mutex);

struct exynos_iommu_ops {
	void *(*setup)(unsigned long s_iova, unsigned long size);
	void (*cleanup)(void *in_vmm);
	int (*activate)(void *in_vmm, struct device *dev);
	void (*deactivate)(void *in_vmm, struct device *dev);
	dma_addr_t (*map)(void *in_vmm, struct scatterlist *sg,
				off_t offset, size_t size);
	void (*unmap)(void *in_vmm, dma_addr_t iova);
};

static const struct exynos_iommu_ops iommu_ops = {
	.setup		= iovmm_setup,
	.cleanup	= iovmm_cleanup,
	.activate	= iovmm_activate,
	.deactivate	= iovmm_deactivate,
	.map		= iovmm_map,
	.unmap		= iovmm_unmap
};

dma_addr_t exynos_drm_iommu_map_gem(struct drm_device *drm_dev,
					struct drm_gem_object *obj)
{
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buf;
	struct sg_table *sgt;
	dma_addr_t dev_addr;

	mutex_lock(&iommu_mutex);

	exynos_gem_obj = to_exynos_gem_obj(obj);

	buf = exynos_gem_obj->buffer;
	sgt = buf->sgt;

	/*
	 * if not using iommu, just return base address to physical
	 * memory region of the gem.
	 */
	if (!iommu_ops.map) {
		mutex_unlock(&iommu_mutex);
		return sg_dma_address(&sgt->sgl[0]);
	}

	/*
	 * if a gem buffer was already mapped with iommu table then
	 * just return dev_addr;
	 *
	 * Note: device address is unique to system globally.
	 */
	if (buf->dev_addr) {
		mutex_unlock(&iommu_mutex);
		return buf->dev_addr;
	}

	/*
	 * allocate device address space for this driver and then
	 * map all pages contained in sg list to iommu table.
	 */
	dev_addr = iommu_ops.map(exynos_gem_obj->vmm, sgt->sgl, (off_t)0,
					(size_t)obj->size);
	if (!dev_addr) {
		mutex_unlock(&iommu_mutex);
		return dev_addr;
	}

	mutex_unlock(&iommu_mutex);

	return dev_addr;
}

void exynos_drm_iommu_unmap_gem(struct drm_gem_object *obj)
{
	struct exynos_drm_gem_obj *exynos_gem_obj;
	struct exynos_drm_gem_buf *buf;

	if (!iommu_ops.unmap || !obj)
		return;

	exynos_gem_obj = to_exynos_gem_obj(obj);
	buf = exynos_gem_obj->buffer;

	/* workaround */
	usleep_range(15000, 20000);

	mutex_lock(&iommu_mutex);

	if (!buf->dev_addr) {
		mutex_unlock(&iommu_mutex);
		DRM_DEBUG_KMS("not mapped with iommu table.\n");
		return;
	}

	if (exynos_gem_obj->vmm)
		iommu_ops.unmap(exynos_gem_obj->vmm, buf->dev_addr);

	buf->dev_addr = 0;
	mutex_unlock(&iommu_mutex);
}

dma_addr_t exynos_drm_iommu_map(void *in_vmm, dma_addr_t paddr,
				size_t size)
{
	struct sg_table *sgt;
	struct scatterlist *sgl;
	dma_addr_t dma_addr = 0, tmp_addr;
	unsigned int npages, i = 0;
	int ret;

	 /* if not using iommu, just return paddr. */
	if (!iommu_ops.map)
		return paddr;

	npages = size >> PAGE_SHIFT;

	sgt = kzalloc(sizeof(struct sg_table) * npages, GFP_KERNEL);
	if (!sgt) {
		DRM_ERROR("failed to allocate sg table.\n");
		return dma_addr;
	}

	ret = sg_alloc_table(sgt, npages, GFP_KERNEL);
	if (ret < 0) {
		DRM_ERROR("failed to initialize sg table.\n");
		goto err;
	}

	sgl = sgt->sgl;
	tmp_addr = paddr;

	while (i < npages) {
		struct page *page = phys_to_page(tmp_addr);
		sg_set_page(sgl, page, PAGE_SIZE, 0);
		sg_dma_len(sgl) = PAGE_SIZE;
		tmp_addr += PAGE_SIZE;
		i++;
		sgl = sg_next(sgl);
	}

	/*
	 * allocate device address space for this driver and then
	 * map all pages contained in sg list to iommu table.
	 */
	dma_addr = iommu_ops.map(in_vmm, sgt->sgl, (off_t)0, (size_t)size);
	if (!dma_addr)
		DRM_ERROR("failed to map cmdlist pool.\n");

	sg_free_table(sgt);
err:
	kfree(sgt);
	sgt = NULL;

	return dma_addr;
}


void exynos_drm_iommu_unmap(void *in_vmm, dma_addr_t dma_addr)
{
	if (iommu_ops.unmap)
		iommu_ops.unmap(in_vmm, dma_addr);
}

void *exynos_drm_iommu_setup(unsigned long s_iova, unsigned long size)
{
	/*
	 * allocate device address space to this driver and add vmm object
	 * to s5p_iovmm_list. please know that each iommu will use
	 * 1GB as its own device address apace.
	 *
	 * the device address space : s_iova ~ s_iova + size
	 */
	if (iommu_ops.setup)
		return iommu_ops.setup(s_iova, size);

	return ERR_PTR(-EINVAL);
}

int exynos_drm_iommu_activate(void *in_vmm, struct device *dev)
{
	if (iommu_ops.activate)
		return iovmm_activate(in_vmm, dev);

	return 0;
}

void exynos_drm_iommu_deactivate(void *in_vmm, struct device *dev)
{
	if (iommu_ops.deactivate)
		iommu_ops.deactivate(in_vmm, dev);
}

void exynos_drm_iommu_cleanup(void *in_vmm)
{
	if (iommu_ops.cleanup)
		iommu_ops.cleanup(in_vmm);
}

MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DRM IOMMU Framework");
MODULE_LICENSE("GPL");