blob: 958f300fec4d578d185aefccd017f656e1c88f4c [file] [log] [blame]
Olav Hauganff0116e2015-05-28 17:21:45 -07001/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/kernel.h>
15#include <linux/kref.h>
16#include <linux/slab.h>
17#include <linux/rbtree.h>
18#include <linux/mutex.h>
19#include <linux/err.h>
20
21#include <linux/msm_dma_iommu_mapping.h>
22
23/**
24 * struct msm_iommu_map - represents a mapping of an ion buffer to an iommu
25 * @lnode - list node to exist in the buffer's list of iommu mappings
26 * @dev - Device this is mapped to. Used as key
27 * @sgl - The scatterlist for this mapping
28 * @nents - Number of entries in sgl
29 * @dir - The direction for the unmap.
30 * @meta - Backpointer to the meta this guy belongs to.
31 * @ref - for reference counting this mapping
32 *
33 * Represents a mapping of one dma_buf buffer to a particular device
34 * and address range. There may exist other mappings of this buffer in
35 * different devices. All mappings will have the same cacheability and security.
36 */
37struct msm_iommu_map {
38 struct list_head lnode;
39 struct rb_node node;
40 struct device *dev;
41 struct scatterlist sgl;
42 unsigned int nents;
43 enum dma_data_direction dir;
44 struct msm_iommu_meta *meta;
45 struct kref ref;
46};
47
48struct msm_iommu_meta {
49 struct rb_node node;
50 struct list_head iommu_maps;
51 struct kref ref;
52 struct mutex lock;
53 void *buffer;
54};
55
56static struct rb_root iommu_root;
57static DEFINE_MUTEX(msm_iommu_map_mutex);
58
59static void msm_iommu_meta_add(struct msm_iommu_meta *meta)
60{
61 struct rb_root *root = &iommu_root;
62 struct rb_node **p = &root->rb_node;
63 struct rb_node *parent = NULL;
64 struct msm_iommu_meta *entry;
65
66 while (*p) {
67 parent = *p;
68 entry = rb_entry(parent, struct msm_iommu_meta, node);
69
70 if (meta->buffer < entry->buffer)
71 p = &(*p)->rb_left;
72 else if (meta->buffer > entry->buffer)
73 p = &(*p)->rb_right;
74 else
75 pr_err("%s: dma_buf %p already exists\n", __func__,
76 entry->buffer);
77 }
78
79 rb_link_node(&meta->node, parent, p);
80 rb_insert_color(&meta->node, root);
81}
82
83static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer)
84{
85 struct rb_root *root = &iommu_root;
86 struct rb_node **p = &root->rb_node;
87 struct rb_node *parent = NULL;
88 struct msm_iommu_meta *entry = NULL;
89
90 while (*p) {
91 parent = *p;
92 entry = rb_entry(parent, struct msm_iommu_meta, node);
93
94 if (buffer < entry->buffer)
95 p = &(*p)->rb_left;
96 else if (buffer > entry->buffer)
97 p = &(*p)->rb_right;
98 else
99 return entry;
100 }
101
102 return NULL;
103}
104
105static void msm_iommu_add(struct msm_iommu_meta *meta,
106 struct msm_iommu_map *iommu)
107{
108 INIT_LIST_HEAD(&iommu->lnode);
109 list_add(&iommu->lnode, &meta->iommu_maps);
110}
111
112
113static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta,
114 struct device *dev)
115{
116 struct msm_iommu_map *entry;
117
118 list_for_each_entry(entry, &meta->iommu_maps, lnode) {
119 if (entry->dev == dev)
120 return entry;
121 }
122
123 return NULL;
124}
125
126static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)
127{
128 struct msm_iommu_meta *meta;
129
130 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
131
132 if (!meta)
133 return ERR_PTR(-ENOMEM);
134
135 INIT_LIST_HEAD(&meta->iommu_maps);
136 meta->buffer = dma_buf->priv;
137 kref_init(&meta->ref);
138 mutex_init(&meta->lock);
139 msm_iommu_meta_add(meta);
140
141 return meta;
142}
143
144static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
145
146static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
147 int nents, enum dma_data_direction dir,
Laura Abbott29defcc2014-08-01 16:13:40 -0700148 struct dma_buf *dma_buf,
149 unsigned long attrs)
Olav Hauganff0116e2015-05-28 17:21:45 -0700150{
151 struct msm_iommu_map *iommu_map;
152 struct msm_iommu_meta *iommu_meta = NULL;
153 int ret = 0;
154 bool extra_meta_ref_taken = false;
Laura Abbott29defcc2014-08-01 16:13:40 -0700155 int late_unmap = !(attrs & DMA_ATTR_NO_DELAYED_UNMAP);
Olav Hauganff0116e2015-05-28 17:21:45 -0700156
157 mutex_lock(&msm_iommu_map_mutex);
158 iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);
159
160 if (!iommu_meta) {
161 iommu_meta = msm_iommu_meta_create(dma_buf);
162
163 if (IS_ERR(iommu_meta)) {
164 mutex_unlock(&msm_iommu_map_mutex);
165 ret = PTR_ERR(iommu_meta);
166 goto out;
167 }
168 if (late_unmap) {
169 kref_get(&iommu_meta->ref);
170 extra_meta_ref_taken = true;
171 }
172 } else {
173 kref_get(&iommu_meta->ref);
174 }
175
176 mutex_unlock(&msm_iommu_map_mutex);
177
178 mutex_lock(&iommu_meta->lock);
179 iommu_map = msm_iommu_lookup(iommu_meta, dev);
180 if (!iommu_map) {
181 iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);
182
183 if (!iommu_map) {
184 ret = -ENOMEM;
185 goto out_unlock;
186 }
187
Laura Abbott29defcc2014-08-01 16:13:40 -0700188 ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
Olav Hauganff0116e2015-05-28 17:21:45 -0700189 if (ret != nents) {
190 kfree(iommu_map);
191 goto out_unlock;
192 }
193
194 kref_init(&iommu_map->ref);
195 if (late_unmap)
196 kref_get(&iommu_map->ref);
197 iommu_map->meta = iommu_meta;
198 iommu_map->sgl.dma_address = sg->dma_address;
199 iommu_map->sgl.dma_length = sg->dma_length;
200 iommu_map->dev = dev;
201 msm_iommu_add(iommu_meta, iommu_map);
202
203 } else {
204 sg->dma_address = iommu_map->sgl.dma_address;
205 sg->dma_length = iommu_map->sgl.dma_length;
206
207 kref_get(&iommu_map->ref);
208 /*
209 * Need to do cache operations here based on "dir" in the
210 * future if we go with coherent mappings.
211 */
212 ret = nents;
213 }
214 mutex_unlock(&iommu_meta->lock);
215 return ret;
216
217out_unlock:
218 mutex_unlock(&iommu_meta->lock);
219out:
220 if (!IS_ERR(iommu_meta)) {
221 if (extra_meta_ref_taken)
222 msm_iommu_meta_put(iommu_meta);
223 msm_iommu_meta_put(iommu_meta);
224 }
225 return ret;
226
227}
228
229/*
230 * We are not taking a reference to the dma_buf here. It is expected that
231 * clients hold reference to the dma_buf until they are done with mapping and
232 * unmapping.
233 */
234int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
235 enum dma_data_direction dir, struct dma_buf *dma_buf,
Laura Abbott29defcc2014-08-01 16:13:40 -0700236 unsigned long attrs)
Olav Hauganff0116e2015-05-28 17:21:45 -0700237{
238 int ret;
239
240 if (IS_ERR_OR_NULL(dev)) {
241 pr_err("%s: dev pointer is invalid\n", __func__);
242 return -EINVAL;
243 }
244
245 if (IS_ERR_OR_NULL(sg)) {
246 pr_err("%s: sg table pointer is invalid\n", __func__);
247 return -EINVAL;
248 }
249
250 if (IS_ERR_OR_NULL(dma_buf)) {
251 pr_err("%s: dma_buf pointer is invalid\n", __func__);
252 return -EINVAL;
253 }
254
Laura Abbott29defcc2014-08-01 16:13:40 -0700255 ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, attrs);
Olav Hauganff0116e2015-05-28 17:21:45 -0700256
257 return ret;
258}
259EXPORT_SYMBOL(msm_dma_map_sg_attrs);
260
261static void msm_iommu_meta_destroy(struct kref *kref)
262{
263 struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta,
264 ref);
265
266 if (!list_empty(&meta->iommu_maps)) {
267 WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n",
268 __func__, meta->buffer);
269 }
270 rb_erase(&meta->node, &iommu_root);
271 kfree(meta);
272}
273
274static void msm_iommu_meta_put(struct msm_iommu_meta *meta)
275{
276 /*
277 * Need to lock here to prevent race against map/unmap
278 */
279 mutex_lock(&msm_iommu_map_mutex);
280 kref_put(&meta->ref, msm_iommu_meta_destroy);
281 mutex_unlock(&msm_iommu_map_mutex);
282}
283
284static void msm_iommu_map_release(struct kref *kref)
285{
286 struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
287 ref);
288
289 list_del(&map->lnode);
290 dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);
291 kfree(map);
292}
293
294void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
295 enum dma_data_direction dir, struct dma_buf *dma_buf)
296{
297 struct msm_iommu_map *iommu_map;
298 struct msm_iommu_meta *meta;
299
300 mutex_lock(&msm_iommu_map_mutex);
301 meta = msm_iommu_meta_lookup(dma_buf->priv);
302 if (!meta) {
303 WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf);
304 mutex_unlock(&msm_iommu_map_mutex);
305 goto out;
306
307 }
308 mutex_unlock(&msm_iommu_map_mutex);
309
310 mutex_lock(&meta->lock);
311 iommu_map = msm_iommu_lookup(meta, dev);
312
313 if (!iommu_map) {
314 WARN(1, "%s: (%p) was never mapped for device %p\n", __func__,
315 dma_buf, dev);
316 mutex_unlock(&meta->lock);
317 goto out;
318 }
319
320 /*
321 * Save direction for later use when we actually unmap.
322 * Not used right now but in the future if we go to coherent mapping
323 * API we might want to call the appropriate API when client asks
324 * to unmap
325 */
326 iommu_map->dir = dir;
327
328 kref_put(&iommu_map->ref, msm_iommu_map_release);
329 mutex_unlock(&meta->lock);
330
331 msm_iommu_meta_put(meta);
332
333out:
334 return;
335}
336EXPORT_SYMBOL(msm_dma_unmap_sg);
337
Laura Abbott29defcc2014-08-01 16:13:40 -0700338int msm_dma_unmap_all_for_dev(struct device *dev)
339{
340 int ret = 0;
341 struct msm_iommu_meta *meta;
342 struct rb_root *root;
343 struct rb_node *meta_node;
344
345 mutex_lock(&msm_iommu_map_mutex);
346 root = &iommu_root;
347 meta_node = rb_first(root);
348 while (meta_node) {
349 struct msm_iommu_map *iommu_map;
Liam Mark35225892016-09-13 14:52:27 -0700350 struct msm_iommu_map *iommu_map_next;
Laura Abbott29defcc2014-08-01 16:13:40 -0700351
352 meta = rb_entry(meta_node, struct msm_iommu_meta, node);
353 mutex_lock(&meta->lock);
Liam Mark35225892016-09-13 14:52:27 -0700354 list_for_each_entry_safe(iommu_map, iommu_map_next,
355 &meta->iommu_maps, lnode)
Laura Abbott29defcc2014-08-01 16:13:40 -0700356 if (iommu_map->dev == dev)
357 if (!kref_put(&iommu_map->ref,
358 msm_iommu_map_release))
359 ret = -EINVAL;
360
361 mutex_unlock(&meta->lock);
362 meta_node = rb_next(meta_node);
363 }
364 mutex_unlock(&msm_iommu_map_mutex);
365
366 return ret;
367}
368
Olav Hauganff0116e2015-05-28 17:21:45 -0700369/*
370 * Only to be called by ION code when a buffer is freed
371 */
372void msm_dma_buf_freed(void *buffer)
373{
374 struct msm_iommu_map *iommu_map;
375 struct msm_iommu_map *iommu_map_next;
376 struct msm_iommu_meta *meta;
377
378 mutex_lock(&msm_iommu_map_mutex);
379 meta = msm_iommu_meta_lookup(buffer);
380 if (!meta) {
381 /* Already unmapped (assuming no late unmapping) */
382 mutex_unlock(&msm_iommu_map_mutex);
383 return;
384 }
385 mutex_unlock(&msm_iommu_map_mutex);
386
387 mutex_lock(&meta->lock);
388
389 list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps,
390 lnode)
391 kref_put(&iommu_map->ref, msm_iommu_map_release);
392
393 if (!list_empty(&meta->iommu_maps)) {
394 WARN(1, "%s: DMA buffer %p destroyed with outstanding iommu mappings\n",
395 __func__, meta->buffer);
396 }
397
398 INIT_LIST_HEAD(&meta->iommu_maps);
399 mutex_unlock(&meta->lock);
400
401 msm_iommu_meta_put(meta);
402}