blob: 3f739a203b064dc9244aac1658f036adcbf7a5cd [file] [log] [blame]
Liam Marka67eb072017-11-30 16:59:25 -08001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
Olav Hauganff0116e2015-05-28 17:21:45 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/kref.h>
16#include <linux/slab.h>
17#include <linux/rbtree.h>
18#include <linux/mutex.h>
19#include <linux/err.h>
Liam Markea236032016-09-19 13:27:27 -070020#include <asm/barrier.h>
Olav Hauganff0116e2015-05-28 17:21:45 -070021
22#include <linux/msm_dma_iommu_mapping.h>
23
24/**
25 * struct msm_iommu_map - represents a mapping of an ion buffer to an iommu
26 * @lnode - list node to exist in the buffer's list of iommu mappings
27 * @dev - Device this is mapped to. Used as key
28 * @sgl - The scatterlist for this mapping
29 * @nents - Number of entries in sgl
Liam Marka67eb072017-11-30 16:59:25 -080030 * @dir - The direction for the map.
Olav Hauganff0116e2015-05-28 17:21:45 -070031 * @meta - Backpointer to the meta this guy belongs to.
32 * @ref - for reference counting this mapping
Liam Marka67eb072017-11-30 16:59:25 -080033 * @map_attrs - dma mapping attributes
34 * @buf_start_addr - address of start of buffer
Olav Hauganff0116e2015-05-28 17:21:45 -070035 *
36 * Represents a mapping of one dma_buf buffer to a particular device
37 * and address range. There may exist other mappings of this buffer in
38 * different devices. All mappings will have the same cacheability and security.
39 */
40struct msm_iommu_map {
41 struct list_head lnode;
42 struct rb_node node;
43 struct device *dev;
44 struct scatterlist sgl;
45 unsigned int nents;
46 enum dma_data_direction dir;
47 struct msm_iommu_meta *meta;
48 struct kref ref;
Liam Marka67eb072017-11-30 16:59:25 -080049 unsigned long map_attrs;
50 dma_addr_t buf_start_addr;
Olav Hauganff0116e2015-05-28 17:21:45 -070051};
52
53struct msm_iommu_meta {
54 struct rb_node node;
55 struct list_head iommu_maps;
56 struct kref ref;
57 struct mutex lock;
58 void *buffer;
59};
60
61static struct rb_root iommu_root;
62static DEFINE_MUTEX(msm_iommu_map_mutex);
63
64static void msm_iommu_meta_add(struct msm_iommu_meta *meta)
65{
66 struct rb_root *root = &iommu_root;
67 struct rb_node **p = &root->rb_node;
68 struct rb_node *parent = NULL;
69 struct msm_iommu_meta *entry;
70
71 while (*p) {
72 parent = *p;
73 entry = rb_entry(parent, struct msm_iommu_meta, node);
74
75 if (meta->buffer < entry->buffer)
76 p = &(*p)->rb_left;
77 else if (meta->buffer > entry->buffer)
78 p = &(*p)->rb_right;
79 else
80 pr_err("%s: dma_buf %p already exists\n", __func__,
81 entry->buffer);
82 }
83
84 rb_link_node(&meta->node, parent, p);
85 rb_insert_color(&meta->node, root);
86}
87
88static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer)
89{
90 struct rb_root *root = &iommu_root;
91 struct rb_node **p = &root->rb_node;
92 struct rb_node *parent = NULL;
93 struct msm_iommu_meta *entry = NULL;
94
95 while (*p) {
96 parent = *p;
97 entry = rb_entry(parent, struct msm_iommu_meta, node);
98
99 if (buffer < entry->buffer)
100 p = &(*p)->rb_left;
101 else if (buffer > entry->buffer)
102 p = &(*p)->rb_right;
103 else
104 return entry;
105 }
106
107 return NULL;
108}
109
110static void msm_iommu_add(struct msm_iommu_meta *meta,
111 struct msm_iommu_map *iommu)
112{
113 INIT_LIST_HEAD(&iommu->lnode);
114 list_add(&iommu->lnode, &meta->iommu_maps);
115}
116
117
118static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta,
119 struct device *dev)
120{
121 struct msm_iommu_map *entry;
122
123 list_for_each_entry(entry, &meta->iommu_maps, lnode) {
124 if (entry->dev == dev)
125 return entry;
126 }
127
128 return NULL;
129}
130
131static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)
132{
133 struct msm_iommu_meta *meta;
134
135 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
136
137 if (!meta)
138 return ERR_PTR(-ENOMEM);
139
140 INIT_LIST_HEAD(&meta->iommu_maps);
141 meta->buffer = dma_buf->priv;
142 kref_init(&meta->ref);
143 mutex_init(&meta->lock);
144 msm_iommu_meta_add(meta);
145
146 return meta;
147}
148
149static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
150
151static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
152 int nents, enum dma_data_direction dir,
Laura Abbott29defcc2014-08-01 16:13:40 -0700153 struct dma_buf *dma_buf,
154 unsigned long attrs)
Olav Hauganff0116e2015-05-28 17:21:45 -0700155{
156 struct msm_iommu_map *iommu_map;
157 struct msm_iommu_meta *iommu_meta = NULL;
158 int ret = 0;
159 bool extra_meta_ref_taken = false;
Laura Abbott29defcc2014-08-01 16:13:40 -0700160 int late_unmap = !(attrs & DMA_ATTR_NO_DELAYED_UNMAP);
Olav Hauganff0116e2015-05-28 17:21:45 -0700161
162 mutex_lock(&msm_iommu_map_mutex);
163 iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);
164
165 if (!iommu_meta) {
166 iommu_meta = msm_iommu_meta_create(dma_buf);
167
168 if (IS_ERR(iommu_meta)) {
169 mutex_unlock(&msm_iommu_map_mutex);
170 ret = PTR_ERR(iommu_meta);
171 goto out;
172 }
173 if (late_unmap) {
174 kref_get(&iommu_meta->ref);
175 extra_meta_ref_taken = true;
176 }
177 } else {
178 kref_get(&iommu_meta->ref);
179 }
180
181 mutex_unlock(&msm_iommu_map_mutex);
182
183 mutex_lock(&iommu_meta->lock);
184 iommu_map = msm_iommu_lookup(iommu_meta, dev);
185 if (!iommu_map) {
186 iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);
187
188 if (!iommu_map) {
189 ret = -ENOMEM;
190 goto out_unlock;
191 }
192
Laura Abbott29defcc2014-08-01 16:13:40 -0700193 ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
Olav Hauganff0116e2015-05-28 17:21:45 -0700194 if (ret != nents) {
195 kfree(iommu_map);
196 goto out_unlock;
197 }
198
199 kref_init(&iommu_map->ref);
200 if (late_unmap)
201 kref_get(&iommu_map->ref);
202 iommu_map->meta = iommu_meta;
203 iommu_map->sgl.dma_address = sg->dma_address;
204 iommu_map->sgl.dma_length = sg->dma_length;
205 iommu_map->dev = dev;
Liam Marka67eb072017-11-30 16:59:25 -0800206 iommu_map->dir = dir;
207 iommu_map->nents = nents;
208 iommu_map->map_attrs = attrs;
209 iommu_map->buf_start_addr = sg_phys(sg);
Olav Hauganff0116e2015-05-28 17:21:45 -0700210 msm_iommu_add(iommu_meta, iommu_map);
211
212 } else {
Liam Marka67eb072017-11-30 16:59:25 -0800213 if (nents == iommu_map->nents &&
214 dir == iommu_map->dir &&
215 attrs == iommu_map->map_attrs &&
216 sg_phys(sg) == iommu_map->buf_start_addr) {
217 sg->dma_address = iommu_map->sgl.dma_address;
218 sg->dma_length = iommu_map->sgl.dma_length;
Olav Hauganff0116e2015-05-28 17:21:45 -0700219
Liam Marka67eb072017-11-30 16:59:25 -0800220 kref_get(&iommu_map->ref);
221 if (is_device_dma_coherent(dev))
222 /*
223 * Ensure all outstanding changes for coherent
224 * buffers are applied to the cache before any
225 * DMA occurs.
226 */
227 dmb(ish);
228 ret = nents;
229 } else {
230 bool start_diff = (sg_phys(sg) !=
231 iommu_map->buf_start_addr);
232
233 dev_err(dev, "lazy map request differs:\n"
234 "req dir:%d, original dir:%d\n"
235 "req nents:%d, original nents:%d\n"
236 "req map attrs:%lu, original map attrs:%lu\n"
237 "req buffer start address differs:%d\n",
238 dir, iommu_map->dir, nents,
239 iommu_map->nents, attrs, iommu_map->map_attrs,
240 start_diff);
241 ret = -EINVAL;
242 }
Olav Hauganff0116e2015-05-28 17:21:45 -0700243 }
244 mutex_unlock(&iommu_meta->lock);
245 return ret;
246
247out_unlock:
248 mutex_unlock(&iommu_meta->lock);
249out:
250 if (!IS_ERR(iommu_meta)) {
251 if (extra_meta_ref_taken)
252 msm_iommu_meta_put(iommu_meta);
253 msm_iommu_meta_put(iommu_meta);
254 }
255 return ret;
256
257}
258
259/*
260 * We are not taking a reference to the dma_buf here. It is expected that
261 * clients hold reference to the dma_buf until they are done with mapping and
262 * unmapping.
263 */
264int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
265 enum dma_data_direction dir, struct dma_buf *dma_buf,
Laura Abbott29defcc2014-08-01 16:13:40 -0700266 unsigned long attrs)
Olav Hauganff0116e2015-05-28 17:21:45 -0700267{
268 int ret;
269
270 if (IS_ERR_OR_NULL(dev)) {
271 pr_err("%s: dev pointer is invalid\n", __func__);
272 return -EINVAL;
273 }
274
275 if (IS_ERR_OR_NULL(sg)) {
276 pr_err("%s: sg table pointer is invalid\n", __func__);
277 return -EINVAL;
278 }
279
280 if (IS_ERR_OR_NULL(dma_buf)) {
281 pr_err("%s: dma_buf pointer is invalid\n", __func__);
282 return -EINVAL;
283 }
284
Laura Abbott29defcc2014-08-01 16:13:40 -0700285 ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, attrs);
Olav Hauganff0116e2015-05-28 17:21:45 -0700286
287 return ret;
288}
289EXPORT_SYMBOL(msm_dma_map_sg_attrs);
290
291static void msm_iommu_meta_destroy(struct kref *kref)
292{
293 struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta,
294 ref);
295
296 if (!list_empty(&meta->iommu_maps)) {
297 WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n",
298 __func__, meta->buffer);
299 }
300 rb_erase(&meta->node, &iommu_root);
301 kfree(meta);
302}
303
304static void msm_iommu_meta_put(struct msm_iommu_meta *meta)
305{
306 /*
307 * Need to lock here to prevent race against map/unmap
308 */
309 mutex_lock(&msm_iommu_map_mutex);
310 kref_put(&meta->ref, msm_iommu_meta_destroy);
311 mutex_unlock(&msm_iommu_map_mutex);
312}
313
314static void msm_iommu_map_release(struct kref *kref)
315{
316 struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
317 ref);
318
319 list_del(&map->lnode);
320 dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);
321 kfree(map);
322}
323
324void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
325 enum dma_data_direction dir, struct dma_buf *dma_buf)
326{
327 struct msm_iommu_map *iommu_map;
328 struct msm_iommu_meta *meta;
329
330 mutex_lock(&msm_iommu_map_mutex);
331 meta = msm_iommu_meta_lookup(dma_buf->priv);
332 if (!meta) {
333 WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf);
334 mutex_unlock(&msm_iommu_map_mutex);
335 goto out;
336
337 }
338 mutex_unlock(&msm_iommu_map_mutex);
339
340 mutex_lock(&meta->lock);
341 iommu_map = msm_iommu_lookup(meta, dev);
342
343 if (!iommu_map) {
344 WARN(1, "%s: (%p) was never mapped for device %p\n", __func__,
345 dma_buf, dev);
346 mutex_unlock(&meta->lock);
347 goto out;
348 }
349
Liam Marka67eb072017-11-30 16:59:25 -0800350 if (dir != iommu_map->dir)
351 WARN(1, "%s: (%pK) dir:%d differs from original dir:%d\n",
352 __func__, dma_buf, dir, iommu_map->dir);
Olav Hauganff0116e2015-05-28 17:21:45 -0700353
354 kref_put(&iommu_map->ref, msm_iommu_map_release);
355 mutex_unlock(&meta->lock);
356
357 msm_iommu_meta_put(meta);
358
359out:
360 return;
361}
362EXPORT_SYMBOL(msm_dma_unmap_sg);
363
Laura Abbott29defcc2014-08-01 16:13:40 -0700364int msm_dma_unmap_all_for_dev(struct device *dev)
365{
366 int ret = 0;
367 struct msm_iommu_meta *meta;
368 struct rb_root *root;
369 struct rb_node *meta_node;
370
371 mutex_lock(&msm_iommu_map_mutex);
372 root = &iommu_root;
373 meta_node = rb_first(root);
374 while (meta_node) {
375 struct msm_iommu_map *iommu_map;
Liam Mark35225892016-09-13 14:52:27 -0700376 struct msm_iommu_map *iommu_map_next;
Laura Abbott29defcc2014-08-01 16:13:40 -0700377
378 meta = rb_entry(meta_node, struct msm_iommu_meta, node);
379 mutex_lock(&meta->lock);
Liam Mark35225892016-09-13 14:52:27 -0700380 list_for_each_entry_safe(iommu_map, iommu_map_next,
381 &meta->iommu_maps, lnode)
Laura Abbott29defcc2014-08-01 16:13:40 -0700382 if (iommu_map->dev == dev)
383 if (!kref_put(&iommu_map->ref,
384 msm_iommu_map_release))
385 ret = -EINVAL;
386
387 mutex_unlock(&meta->lock);
388 meta_node = rb_next(meta_node);
389 }
390 mutex_unlock(&msm_iommu_map_mutex);
391
392 return ret;
393}
394
Olav Hauganff0116e2015-05-28 17:21:45 -0700395/*
396 * Only to be called by ION code when a buffer is freed
397 */
398void msm_dma_buf_freed(void *buffer)
399{
400 struct msm_iommu_map *iommu_map;
401 struct msm_iommu_map *iommu_map_next;
402 struct msm_iommu_meta *meta;
403
404 mutex_lock(&msm_iommu_map_mutex);
405 meta = msm_iommu_meta_lookup(buffer);
406 if (!meta) {
407 /* Already unmapped (assuming no late unmapping) */
408 mutex_unlock(&msm_iommu_map_mutex);
409 return;
410 }
411 mutex_unlock(&msm_iommu_map_mutex);
412
413 mutex_lock(&meta->lock);
414
415 list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps,
416 lnode)
417 kref_put(&iommu_map->ref, msm_iommu_map_release);
418
419 if (!list_empty(&meta->iommu_maps)) {
420 WARN(1, "%s: DMA buffer %p destroyed with outstanding iommu mappings\n",
421 __func__, meta->buffer);
422 }
423
424 INIT_LIST_HEAD(&meta->iommu_maps);
425 mutex_unlock(&meta->lock);
426
427 msm_iommu_meta_put(meta);
428}