blob: 07e523663c124219b98af97511446c9a14ce98fb [file] [log] [blame]
Olav Hauganff0116e2015-05-28 17:21:45 -07001/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/kref.h>
16#include <linux/slab.h>
17#include <linux/rbtree.h>
18#include <linux/mutex.h>
19#include <linux/err.h>
Liam Markea236032016-09-19 13:27:27 -070020#include <asm/barrier.h>
Olav Hauganff0116e2015-05-28 17:21:45 -070021
22#include <linux/msm_dma_iommu_mapping.h>
23
24/**
25 * struct msm_iommu_map - represents a mapping of an ion buffer to an iommu
26 * @lnode - list node to exist in the buffer's list of iommu mappings
27 * @dev - Device this is mapped to. Used as key
28 * @sgl - The scatterlist for this mapping
29 * @nents - Number of entries in sgl
30 * @dir - The direction for the unmap.
31 * @meta - Backpointer to the meta this guy belongs to.
32 * @ref - for reference counting this mapping
33 *
34 * Represents a mapping of one dma_buf buffer to a particular device
35 * and address range. There may exist other mappings of this buffer in
36 * different devices. All mappings will have the same cacheability and security.
37 */
38struct msm_iommu_map {
39 struct list_head lnode;
40 struct rb_node node;
41 struct device *dev;
42 struct scatterlist sgl;
43 unsigned int nents;
44 enum dma_data_direction dir;
45 struct msm_iommu_meta *meta;
46 struct kref ref;
47};
48
49struct msm_iommu_meta {
50 struct rb_node node;
51 struct list_head iommu_maps;
52 struct kref ref;
53 struct mutex lock;
54 void *buffer;
55};
56
57static struct rb_root iommu_root;
58static DEFINE_MUTEX(msm_iommu_map_mutex);
59
60static void msm_iommu_meta_add(struct msm_iommu_meta *meta)
61{
62 struct rb_root *root = &iommu_root;
63 struct rb_node **p = &root->rb_node;
64 struct rb_node *parent = NULL;
65 struct msm_iommu_meta *entry;
66
67 while (*p) {
68 parent = *p;
69 entry = rb_entry(parent, struct msm_iommu_meta, node);
70
71 if (meta->buffer < entry->buffer)
72 p = &(*p)->rb_left;
73 else if (meta->buffer > entry->buffer)
74 p = &(*p)->rb_right;
75 else
76 pr_err("%s: dma_buf %p already exists\n", __func__,
77 entry->buffer);
78 }
79
80 rb_link_node(&meta->node, parent, p);
81 rb_insert_color(&meta->node, root);
82}
83
84static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer)
85{
86 struct rb_root *root = &iommu_root;
87 struct rb_node **p = &root->rb_node;
88 struct rb_node *parent = NULL;
89 struct msm_iommu_meta *entry = NULL;
90
91 while (*p) {
92 parent = *p;
93 entry = rb_entry(parent, struct msm_iommu_meta, node);
94
95 if (buffer < entry->buffer)
96 p = &(*p)->rb_left;
97 else if (buffer > entry->buffer)
98 p = &(*p)->rb_right;
99 else
100 return entry;
101 }
102
103 return NULL;
104}
105
106static void msm_iommu_add(struct msm_iommu_meta *meta,
107 struct msm_iommu_map *iommu)
108{
109 INIT_LIST_HEAD(&iommu->lnode);
110 list_add(&iommu->lnode, &meta->iommu_maps);
111}
112
113
114static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta,
115 struct device *dev)
116{
117 struct msm_iommu_map *entry;
118
119 list_for_each_entry(entry, &meta->iommu_maps, lnode) {
120 if (entry->dev == dev)
121 return entry;
122 }
123
124 return NULL;
125}
126
127static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)
128{
129 struct msm_iommu_meta *meta;
130
131 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
132
133 if (!meta)
134 return ERR_PTR(-ENOMEM);
135
136 INIT_LIST_HEAD(&meta->iommu_maps);
137 meta->buffer = dma_buf->priv;
138 kref_init(&meta->ref);
139 mutex_init(&meta->lock);
140 msm_iommu_meta_add(meta);
141
142 return meta;
143}
144
145static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
146
147static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
148 int nents, enum dma_data_direction dir,
Laura Abbott29defcc2014-08-01 16:13:40 -0700149 struct dma_buf *dma_buf,
150 unsigned long attrs)
Olav Hauganff0116e2015-05-28 17:21:45 -0700151{
152 struct msm_iommu_map *iommu_map;
153 struct msm_iommu_meta *iommu_meta = NULL;
154 int ret = 0;
155 bool extra_meta_ref_taken = false;
Laura Abbott29defcc2014-08-01 16:13:40 -0700156 int late_unmap = !(attrs & DMA_ATTR_NO_DELAYED_UNMAP);
Olav Hauganff0116e2015-05-28 17:21:45 -0700157
158 mutex_lock(&msm_iommu_map_mutex);
159 iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);
160
161 if (!iommu_meta) {
162 iommu_meta = msm_iommu_meta_create(dma_buf);
163
164 if (IS_ERR(iommu_meta)) {
165 mutex_unlock(&msm_iommu_map_mutex);
166 ret = PTR_ERR(iommu_meta);
167 goto out;
168 }
169 if (late_unmap) {
170 kref_get(&iommu_meta->ref);
171 extra_meta_ref_taken = true;
172 }
173 } else {
174 kref_get(&iommu_meta->ref);
175 }
176
177 mutex_unlock(&msm_iommu_map_mutex);
178
179 mutex_lock(&iommu_meta->lock);
180 iommu_map = msm_iommu_lookup(iommu_meta, dev);
181 if (!iommu_map) {
182 iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);
183
184 if (!iommu_map) {
185 ret = -ENOMEM;
186 goto out_unlock;
187 }
188
Laura Abbott29defcc2014-08-01 16:13:40 -0700189 ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
Olav Hauganff0116e2015-05-28 17:21:45 -0700190 if (ret != nents) {
191 kfree(iommu_map);
192 goto out_unlock;
193 }
194
195 kref_init(&iommu_map->ref);
196 if (late_unmap)
197 kref_get(&iommu_map->ref);
198 iommu_map->meta = iommu_meta;
199 iommu_map->sgl.dma_address = sg->dma_address;
200 iommu_map->sgl.dma_length = sg->dma_length;
201 iommu_map->dev = dev;
202 msm_iommu_add(iommu_meta, iommu_map);
203
204 } else {
205 sg->dma_address = iommu_map->sgl.dma_address;
206 sg->dma_length = iommu_map->sgl.dma_length;
207
208 kref_get(&iommu_map->ref);
Liam Markea236032016-09-19 13:27:27 -0700209 if (is_device_dma_coherent(dev))
210 /*
211 * Ensure all outstanding changes for coherent
212 * buffers are applied to the cache before any
213 * DMA occurs.
214 */
215 dmb(ish);
Olav Hauganff0116e2015-05-28 17:21:45 -0700216 ret = nents;
217 }
218 mutex_unlock(&iommu_meta->lock);
219 return ret;
220
221out_unlock:
222 mutex_unlock(&iommu_meta->lock);
223out:
224 if (!IS_ERR(iommu_meta)) {
225 if (extra_meta_ref_taken)
226 msm_iommu_meta_put(iommu_meta);
227 msm_iommu_meta_put(iommu_meta);
228 }
229 return ret;
230
231}
232
233/*
234 * We are not taking a reference to the dma_buf here. It is expected that
235 * clients hold reference to the dma_buf until they are done with mapping and
236 * unmapping.
237 */
238int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
239 enum dma_data_direction dir, struct dma_buf *dma_buf,
Laura Abbott29defcc2014-08-01 16:13:40 -0700240 unsigned long attrs)
Olav Hauganff0116e2015-05-28 17:21:45 -0700241{
242 int ret;
243
244 if (IS_ERR_OR_NULL(dev)) {
245 pr_err("%s: dev pointer is invalid\n", __func__);
246 return -EINVAL;
247 }
248
249 if (IS_ERR_OR_NULL(sg)) {
250 pr_err("%s: sg table pointer is invalid\n", __func__);
251 return -EINVAL;
252 }
253
254 if (IS_ERR_OR_NULL(dma_buf)) {
255 pr_err("%s: dma_buf pointer is invalid\n", __func__);
256 return -EINVAL;
257 }
258
Laura Abbott29defcc2014-08-01 16:13:40 -0700259 ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, attrs);
Olav Hauganff0116e2015-05-28 17:21:45 -0700260
261 return ret;
262}
263EXPORT_SYMBOL(msm_dma_map_sg_attrs);
264
265static void msm_iommu_meta_destroy(struct kref *kref)
266{
267 struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta,
268 ref);
269
270 if (!list_empty(&meta->iommu_maps)) {
271 WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n",
272 __func__, meta->buffer);
273 }
274 rb_erase(&meta->node, &iommu_root);
275 kfree(meta);
276}
277
278static void msm_iommu_meta_put(struct msm_iommu_meta *meta)
279{
280 /*
281 * Need to lock here to prevent race against map/unmap
282 */
283 mutex_lock(&msm_iommu_map_mutex);
284 kref_put(&meta->ref, msm_iommu_meta_destroy);
285 mutex_unlock(&msm_iommu_map_mutex);
286}
287
288static void msm_iommu_map_release(struct kref *kref)
289{
290 struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
291 ref);
292
293 list_del(&map->lnode);
294 dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);
295 kfree(map);
296}
297
298void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
299 enum dma_data_direction dir, struct dma_buf *dma_buf)
300{
301 struct msm_iommu_map *iommu_map;
302 struct msm_iommu_meta *meta;
303
304 mutex_lock(&msm_iommu_map_mutex);
305 meta = msm_iommu_meta_lookup(dma_buf->priv);
306 if (!meta) {
307 WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf);
308 mutex_unlock(&msm_iommu_map_mutex);
309 goto out;
310
311 }
312 mutex_unlock(&msm_iommu_map_mutex);
313
314 mutex_lock(&meta->lock);
315 iommu_map = msm_iommu_lookup(meta, dev);
316
317 if (!iommu_map) {
318 WARN(1, "%s: (%p) was never mapped for device %p\n", __func__,
319 dma_buf, dev);
320 mutex_unlock(&meta->lock);
321 goto out;
322 }
323
324 /*
325 * Save direction for later use when we actually unmap.
326 * Not used right now but in the future if we go to coherent mapping
327 * API we might want to call the appropriate API when client asks
328 * to unmap
329 */
330 iommu_map->dir = dir;
331
332 kref_put(&iommu_map->ref, msm_iommu_map_release);
333 mutex_unlock(&meta->lock);
334
335 msm_iommu_meta_put(meta);
336
337out:
338 return;
339}
340EXPORT_SYMBOL(msm_dma_unmap_sg);
341
Laura Abbott29defcc2014-08-01 16:13:40 -0700342int msm_dma_unmap_all_for_dev(struct device *dev)
343{
344 int ret = 0;
345 struct msm_iommu_meta *meta;
346 struct rb_root *root;
347 struct rb_node *meta_node;
348
349 mutex_lock(&msm_iommu_map_mutex);
350 root = &iommu_root;
351 meta_node = rb_first(root);
352 while (meta_node) {
353 struct msm_iommu_map *iommu_map;
Liam Mark35225892016-09-13 14:52:27 -0700354 struct msm_iommu_map *iommu_map_next;
Laura Abbott29defcc2014-08-01 16:13:40 -0700355
356 meta = rb_entry(meta_node, struct msm_iommu_meta, node);
357 mutex_lock(&meta->lock);
Liam Mark35225892016-09-13 14:52:27 -0700358 list_for_each_entry_safe(iommu_map, iommu_map_next,
359 &meta->iommu_maps, lnode)
Laura Abbott29defcc2014-08-01 16:13:40 -0700360 if (iommu_map->dev == dev)
361 if (!kref_put(&iommu_map->ref,
362 msm_iommu_map_release))
363 ret = -EINVAL;
364
365 mutex_unlock(&meta->lock);
366 meta_node = rb_next(meta_node);
367 }
368 mutex_unlock(&msm_iommu_map_mutex);
369
370 return ret;
371}
372
Olav Hauganff0116e2015-05-28 17:21:45 -0700373/*
374 * Only to be called by ION code when a buffer is freed
375 */
376void msm_dma_buf_freed(void *buffer)
377{
378 struct msm_iommu_map *iommu_map;
379 struct msm_iommu_map *iommu_map_next;
380 struct msm_iommu_meta *meta;
381
382 mutex_lock(&msm_iommu_map_mutex);
383 meta = msm_iommu_meta_lookup(buffer);
384 if (!meta) {
385 /* Already unmapped (assuming no late unmapping) */
386 mutex_unlock(&msm_iommu_map_mutex);
387 return;
388 }
389 mutex_unlock(&msm_iommu_map_mutex);
390
391 mutex_lock(&meta->lock);
392
393 list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps,
394 lnode)
395 kref_put(&iommu_map->ref, msm_iommu_map_release);
396
397 if (!list_empty(&meta->iommu_maps)) {
398 WARN(1, "%s: DMA buffer %p destroyed with outstanding iommu mappings\n",
399 __func__, meta->buffer);
400 }
401
402 INIT_LIST_HEAD(&meta->iommu_maps);
403 mutex_unlock(&meta->lock);
404
405 msm_iommu_meta_put(meta);
406}