blob: 05ed3117a093710a95c934876f37119e19693663 [file] [log] [blame]
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -08001/* Copyright (c) 2015-2018, 2020 The Linux Foundation. All rights reserved.
Olav Hauganff0116e2015-05-28 17:21:45 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/kref.h>
16#include <linux/slab.h>
17#include <linux/rbtree.h>
18#include <linux/mutex.h>
19#include <linux/err.h>
Liam Markea236032016-09-19 13:27:27 -070020#include <asm/barrier.h>
Olav Hauganff0116e2015-05-28 17:21:45 -070021
22#include <linux/msm_dma_iommu_mapping.h>
23
24/**
25 * struct msm_iommu_map - represents a mapping of an ion buffer to an iommu
26 * @lnode - list node to exist in the buffer's list of iommu mappings
27 * @dev - Device this is mapped to. Used as key
28 * @sgl - The scatterlist for this mapping
29 * @nents - Number of entries in sgl
Liam Marka67eb072017-11-30 16:59:25 -080030 * @dir - The direction for the map.
Olav Hauganff0116e2015-05-28 17:21:45 -070031 * @meta - Backpointer to the meta this guy belongs to.
32 * @ref - for reference counting this mapping
Liam Marka67eb072017-11-30 16:59:25 -080033 * @map_attrs - dma mapping attributes
34 * @buf_start_addr - address of start of buffer
Olav Hauganff0116e2015-05-28 17:21:45 -070035 *
36 * Represents a mapping of one dma_buf buffer to a particular device
37 * and address range. There may exist other mappings of this buffer in
38 * different devices. All mappings will have the same cacheability and security.
39 */
40struct msm_iommu_map {
41 struct list_head lnode;
42 struct rb_node node;
43 struct device *dev;
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -080044 struct scatterlist *sgl;
Olav Hauganff0116e2015-05-28 17:21:45 -070045 unsigned int nents;
46 enum dma_data_direction dir;
47 struct msm_iommu_meta *meta;
48 struct kref ref;
Liam Marka67eb072017-11-30 16:59:25 -080049 unsigned long map_attrs;
50 dma_addr_t buf_start_addr;
Olav Hauganff0116e2015-05-28 17:21:45 -070051};
52
53struct msm_iommu_meta {
54 struct rb_node node;
55 struct list_head iommu_maps;
56 struct kref ref;
57 struct mutex lock;
58 void *buffer;
59};
60
61static struct rb_root iommu_root;
62static DEFINE_MUTEX(msm_iommu_map_mutex);
63
64static void msm_iommu_meta_add(struct msm_iommu_meta *meta)
65{
66 struct rb_root *root = &iommu_root;
67 struct rb_node **p = &root->rb_node;
68 struct rb_node *parent = NULL;
69 struct msm_iommu_meta *entry;
70
71 while (*p) {
72 parent = *p;
73 entry = rb_entry(parent, struct msm_iommu_meta, node);
74
75 if (meta->buffer < entry->buffer)
76 p = &(*p)->rb_left;
77 else if (meta->buffer > entry->buffer)
78 p = &(*p)->rb_right;
79 else
80 pr_err("%s: dma_buf %p already exists\n", __func__,
81 entry->buffer);
82 }
83
84 rb_link_node(&meta->node, parent, p);
85 rb_insert_color(&meta->node, root);
86}
87
88static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer)
89{
90 struct rb_root *root = &iommu_root;
91 struct rb_node **p = &root->rb_node;
92 struct rb_node *parent = NULL;
93 struct msm_iommu_meta *entry = NULL;
94
95 while (*p) {
96 parent = *p;
97 entry = rb_entry(parent, struct msm_iommu_meta, node);
98
99 if (buffer < entry->buffer)
100 p = &(*p)->rb_left;
101 else if (buffer > entry->buffer)
102 p = &(*p)->rb_right;
103 else
104 return entry;
105 }
106
107 return NULL;
108}
109
110static void msm_iommu_add(struct msm_iommu_meta *meta,
111 struct msm_iommu_map *iommu)
112{
113 INIT_LIST_HEAD(&iommu->lnode);
114 list_add(&iommu->lnode, &meta->iommu_maps);
115}
116
117
118static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta,
119 struct device *dev)
120{
121 struct msm_iommu_map *entry;
122
123 list_for_each_entry(entry, &meta->iommu_maps, lnode) {
124 if (entry->dev == dev)
125 return entry;
126 }
127
128 return NULL;
129}
130
131static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)
132{
133 struct msm_iommu_meta *meta;
134
135 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
136
137 if (!meta)
138 return ERR_PTR(-ENOMEM);
139
140 INIT_LIST_HEAD(&meta->iommu_maps);
141 meta->buffer = dma_buf->priv;
142 kref_init(&meta->ref);
143 mutex_init(&meta->lock);
144 msm_iommu_meta_add(meta);
145
146 return meta;
147}
148
149static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
150
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -0800151static struct scatterlist *clone_sgl(struct scatterlist *sg, int nents)
152{
153 struct scatterlist *next, *s;
154 int i;
155 struct sg_table table;
156
157 if (sg_alloc_table(&table, nents, GFP_KERNEL))
158 return NULL;
159 next = table.sgl;
160 for_each_sg(sg, s, nents, i) {
161 *next = *s;
162 next = sg_next(next);
163 }
164 return table.sgl;
165}
166
Olav Hauganff0116e2015-05-28 17:21:45 -0700167static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
168 int nents, enum dma_data_direction dir,
Laura Abbott29defcc2014-08-01 16:13:40 -0700169 struct dma_buf *dma_buf,
170 unsigned long attrs)
Olav Hauganff0116e2015-05-28 17:21:45 -0700171{
172 struct msm_iommu_map *iommu_map;
173 struct msm_iommu_meta *iommu_meta = NULL;
174 int ret = 0;
175 bool extra_meta_ref_taken = false;
Laura Abbott29defcc2014-08-01 16:13:40 -0700176 int late_unmap = !(attrs & DMA_ATTR_NO_DELAYED_UNMAP);
Olav Hauganff0116e2015-05-28 17:21:45 -0700177
178 mutex_lock(&msm_iommu_map_mutex);
179 iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);
180
181 if (!iommu_meta) {
182 iommu_meta = msm_iommu_meta_create(dma_buf);
183
184 if (IS_ERR(iommu_meta)) {
185 mutex_unlock(&msm_iommu_map_mutex);
186 ret = PTR_ERR(iommu_meta);
187 goto out;
188 }
189 if (late_unmap) {
190 kref_get(&iommu_meta->ref);
191 extra_meta_ref_taken = true;
192 }
193 } else {
194 kref_get(&iommu_meta->ref);
195 }
196
197 mutex_unlock(&msm_iommu_map_mutex);
198
199 mutex_lock(&iommu_meta->lock);
200 iommu_map = msm_iommu_lookup(iommu_meta, dev);
201 if (!iommu_map) {
Patrick Daly2d600832018-02-11 15:12:55 -0800202 iommu_map = kmalloc(sizeof(*iommu_map), GFP_KERNEL);
Olav Hauganff0116e2015-05-28 17:21:45 -0700203
204 if (!iommu_map) {
205 ret = -ENOMEM;
206 goto out_unlock;
207 }
208
Laura Abbott29defcc2014-08-01 16:13:40 -0700209 ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -0800210 if (!ret) {
Olav Hauganff0116e2015-05-28 17:21:45 -0700211 kfree(iommu_map);
212 goto out_unlock;
213 }
214
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -0800215 iommu_map->sgl = clone_sgl(sg, nents);
216 if (!iommu_map->sgl) {
217 kfree(iommu_map);
218 ret = -ENOMEM;
219 goto out_unlock;
220 }
221 iommu_map->nents = nents;
222 iommu_map->dev = dev;
223
Olav Hauganff0116e2015-05-28 17:21:45 -0700224 kref_init(&iommu_map->ref);
225 if (late_unmap)
226 kref_get(&iommu_map->ref);
227 iommu_map->meta = iommu_meta;
Liam Marka67eb072017-11-30 16:59:25 -0800228 iommu_map->dir = dir;
Liam Marka67eb072017-11-30 16:59:25 -0800229 iommu_map->map_attrs = attrs;
230 iommu_map->buf_start_addr = sg_phys(sg);
Olav Hauganff0116e2015-05-28 17:21:45 -0700231 msm_iommu_add(iommu_meta, iommu_map);
232
233 } else {
Liam Marka67eb072017-11-30 16:59:25 -0800234 if (nents == iommu_map->nents &&
235 dir == iommu_map->dir &&
236 attrs == iommu_map->map_attrs &&
237 sg_phys(sg) == iommu_map->buf_start_addr) {
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -0800238 sg->dma_address = iommu_map->sgl->dma_address;
239 sg->dma_length = iommu_map->sgl->dma_length;
Olav Hauganff0116e2015-05-28 17:21:45 -0700240
Liam Marka67eb072017-11-30 16:59:25 -0800241 kref_get(&iommu_map->ref);
242 if (is_device_dma_coherent(dev))
243 /*
244 * Ensure all outstanding changes for coherent
245 * buffers are applied to the cache before any
246 * DMA occurs.
247 */
248 dmb(ish);
249 ret = nents;
250 } else {
251 bool start_diff = (sg_phys(sg) !=
252 iommu_map->buf_start_addr);
253
254 dev_err(dev, "lazy map request differs:\n"
255 "req dir:%d, original dir:%d\n"
256 "req nents:%d, original nents:%d\n"
257 "req map attrs:%lu, original map attrs:%lu\n"
258 "req buffer start address differs:%d\n",
259 dir, iommu_map->dir, nents,
260 iommu_map->nents, attrs, iommu_map->map_attrs,
261 start_diff);
262 ret = -EINVAL;
263 }
Olav Hauganff0116e2015-05-28 17:21:45 -0700264 }
265 mutex_unlock(&iommu_meta->lock);
266 return ret;
267
268out_unlock:
269 mutex_unlock(&iommu_meta->lock);
270out:
271 if (!IS_ERR(iommu_meta)) {
272 if (extra_meta_ref_taken)
273 msm_iommu_meta_put(iommu_meta);
274 msm_iommu_meta_put(iommu_meta);
275 }
276 return ret;
277
278}
279
280/*
281 * We are not taking a reference to the dma_buf here. It is expected that
282 * clients hold reference to the dma_buf until they are done with mapping and
283 * unmapping.
284 */
285int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
286 enum dma_data_direction dir, struct dma_buf *dma_buf,
Laura Abbott29defcc2014-08-01 16:13:40 -0700287 unsigned long attrs)
Olav Hauganff0116e2015-05-28 17:21:45 -0700288{
289 int ret;
290
291 if (IS_ERR_OR_NULL(dev)) {
292 pr_err("%s: dev pointer is invalid\n", __func__);
293 return -EINVAL;
294 }
295
296 if (IS_ERR_OR_NULL(sg)) {
297 pr_err("%s: sg table pointer is invalid\n", __func__);
298 return -EINVAL;
299 }
300
301 if (IS_ERR_OR_NULL(dma_buf)) {
302 pr_err("%s: dma_buf pointer is invalid\n", __func__);
303 return -EINVAL;
304 }
305
Laura Abbott29defcc2014-08-01 16:13:40 -0700306 ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, attrs);
Olav Hauganff0116e2015-05-28 17:21:45 -0700307
308 return ret;
309}
310EXPORT_SYMBOL(msm_dma_map_sg_attrs);
311
312static void msm_iommu_meta_destroy(struct kref *kref)
313{
314 struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta,
315 ref);
316
317 if (!list_empty(&meta->iommu_maps)) {
318 WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n",
319 __func__, meta->buffer);
320 }
321 rb_erase(&meta->node, &iommu_root);
322 kfree(meta);
323}
324
325static void msm_iommu_meta_put(struct msm_iommu_meta *meta)
326{
327 /*
328 * Need to lock here to prevent race against map/unmap
329 */
330 mutex_lock(&msm_iommu_map_mutex);
331 kref_put(&meta->ref, msm_iommu_meta_destroy);
332 mutex_unlock(&msm_iommu_map_mutex);
333}
334
335static void msm_iommu_map_release(struct kref *kref)
336{
337 struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
338 ref);
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -0800339 struct sg_table table;
Olav Hauganff0116e2015-05-28 17:21:45 -0700340
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -0800341 table.nents = table.orig_nents = map->nents;
342 table.sgl = map->sgl;
Olav Hauganff0116e2015-05-28 17:21:45 -0700343 list_del(&map->lnode);
Sudarshan Rajagopalan3cc58f42018-01-26 15:56:11 -0800344
345 dma_unmap_sg(map->dev, map->sgl, map->nents, map->dir);
346 sg_free_table(&table);
Olav Hauganff0116e2015-05-28 17:21:45 -0700347 kfree(map);
348}
349
350void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
351 enum dma_data_direction dir, struct dma_buf *dma_buf)
352{
353 struct msm_iommu_map *iommu_map;
354 struct msm_iommu_meta *meta;
355
356 mutex_lock(&msm_iommu_map_mutex);
357 meta = msm_iommu_meta_lookup(dma_buf->priv);
358 if (!meta) {
359 WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf);
360 mutex_unlock(&msm_iommu_map_mutex);
361 goto out;
362
363 }
364 mutex_unlock(&msm_iommu_map_mutex);
365
366 mutex_lock(&meta->lock);
367 iommu_map = msm_iommu_lookup(meta, dev);
368
369 if (!iommu_map) {
370 WARN(1, "%s: (%p) was never mapped for device %p\n", __func__,
371 dma_buf, dev);
372 mutex_unlock(&meta->lock);
373 goto out;
374 }
375
Liam Marka67eb072017-11-30 16:59:25 -0800376 if (dir != iommu_map->dir)
377 WARN(1, "%s: (%pK) dir:%d differs from original dir:%d\n",
378 __func__, dma_buf, dir, iommu_map->dir);
Olav Hauganff0116e2015-05-28 17:21:45 -0700379
380 kref_put(&iommu_map->ref, msm_iommu_map_release);
381 mutex_unlock(&meta->lock);
382
383 msm_iommu_meta_put(meta);
384
385out:
386 return;
387}
388EXPORT_SYMBOL(msm_dma_unmap_sg);
389
Laura Abbott29defcc2014-08-01 16:13:40 -0700390int msm_dma_unmap_all_for_dev(struct device *dev)
391{
392 int ret = 0;
393 struct msm_iommu_meta *meta;
394 struct rb_root *root;
395 struct rb_node *meta_node;
396
397 mutex_lock(&msm_iommu_map_mutex);
398 root = &iommu_root;
399 meta_node = rb_first(root);
400 while (meta_node) {
401 struct msm_iommu_map *iommu_map;
Liam Mark35225892016-09-13 14:52:27 -0700402 struct msm_iommu_map *iommu_map_next;
Laura Abbott29defcc2014-08-01 16:13:40 -0700403
404 meta = rb_entry(meta_node, struct msm_iommu_meta, node);
405 mutex_lock(&meta->lock);
Liam Mark35225892016-09-13 14:52:27 -0700406 list_for_each_entry_safe(iommu_map, iommu_map_next,
407 &meta->iommu_maps, lnode)
Laura Abbott29defcc2014-08-01 16:13:40 -0700408 if (iommu_map->dev == dev)
409 if (!kref_put(&iommu_map->ref,
410 msm_iommu_map_release))
411 ret = -EINVAL;
412
413 mutex_unlock(&meta->lock);
414 meta_node = rb_next(meta_node);
415 }
416 mutex_unlock(&msm_iommu_map_mutex);
417
418 return ret;
419}
420
Olav Hauganff0116e2015-05-28 17:21:45 -0700421/*
422 * Only to be called by ION code when a buffer is freed
423 */
424void msm_dma_buf_freed(void *buffer)
425{
426 struct msm_iommu_map *iommu_map;
427 struct msm_iommu_map *iommu_map_next;
428 struct msm_iommu_meta *meta;
429
430 mutex_lock(&msm_iommu_map_mutex);
431 meta = msm_iommu_meta_lookup(buffer);
432 if (!meta) {
433 /* Already unmapped (assuming no late unmapping) */
434 mutex_unlock(&msm_iommu_map_mutex);
435 return;
436 }
437 mutex_unlock(&msm_iommu_map_mutex);
438
439 mutex_lock(&meta->lock);
440
441 list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps,
442 lnode)
443 kref_put(&iommu_map->ref, msm_iommu_map_release);
444
445 if (!list_empty(&meta->iommu_maps)) {
446 WARN(1, "%s: DMA buffer %p destroyed with outstanding iommu mappings\n",
447 __func__, meta->buffer);
448 }
449
450 INIT_LIST_HEAD(&meta->iommu_maps);
451 mutex_unlock(&meta->lock);
452
453 msm_iommu_meta_put(meta);
454}