Olav Haugan | ff0116e | 2015-05-28 17:21:45 -0700 | [diff] [blame^] | 1 | /* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. |
| 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/kref.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/rbtree.h> |
| 18 | #include <linux/mutex.h> |
| 19 | #include <linux/err.h> |
| 20 | |
| 21 | #include <linux/msm_dma_iommu_mapping.h> |
| 22 | |
| 23 | /** |
| 24 | * struct msm_iommu_map - represents a mapping of an ion buffer to an iommu |
| 25 | * @lnode - list node to exist in the buffer's list of iommu mappings |
| 26 | * @dev - Device this is mapped to. Used as key |
| 27 | * @sgl - The scatterlist for this mapping |
| 28 | * @nents - Number of entries in sgl |
| 29 | * @dir - The direction for the unmap. |
| 30 | * @meta - Backpointer to the meta this guy belongs to. |
| 31 | * @ref - for reference counting this mapping |
| 32 | * |
| 33 | * Represents a mapping of one dma_buf buffer to a particular device |
| 34 | * and address range. There may exist other mappings of this buffer in |
| 35 | * different devices. All mappings will have the same cacheability and security. |
| 36 | */ |
| 37 | struct msm_iommu_map { |
| 38 | struct list_head lnode; |
| 39 | struct rb_node node; |
| 40 | struct device *dev; |
| 41 | struct scatterlist sgl; |
| 42 | unsigned int nents; |
| 43 | enum dma_data_direction dir; |
| 44 | struct msm_iommu_meta *meta; |
| 45 | struct kref ref; |
| 46 | }; |
| 47 | |
| 48 | struct msm_iommu_meta { |
| 49 | struct rb_node node; |
| 50 | struct list_head iommu_maps; |
| 51 | struct kref ref; |
| 52 | struct mutex lock; |
| 53 | void *buffer; |
| 54 | }; |
| 55 | |
| 56 | static struct rb_root iommu_root; |
| 57 | static DEFINE_MUTEX(msm_iommu_map_mutex); |
| 58 | |
| 59 | static void msm_iommu_meta_add(struct msm_iommu_meta *meta) |
| 60 | { |
| 61 | struct rb_root *root = &iommu_root; |
| 62 | struct rb_node **p = &root->rb_node; |
| 63 | struct rb_node *parent = NULL; |
| 64 | struct msm_iommu_meta *entry; |
| 65 | |
| 66 | while (*p) { |
| 67 | parent = *p; |
| 68 | entry = rb_entry(parent, struct msm_iommu_meta, node); |
| 69 | |
| 70 | if (meta->buffer < entry->buffer) |
| 71 | p = &(*p)->rb_left; |
| 72 | else if (meta->buffer > entry->buffer) |
| 73 | p = &(*p)->rb_right; |
| 74 | else |
| 75 | pr_err("%s: dma_buf %p already exists\n", __func__, |
| 76 | entry->buffer); |
| 77 | } |
| 78 | |
| 79 | rb_link_node(&meta->node, parent, p); |
| 80 | rb_insert_color(&meta->node, root); |
| 81 | } |
| 82 | |
| 83 | static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer) |
| 84 | { |
| 85 | struct rb_root *root = &iommu_root; |
| 86 | struct rb_node **p = &root->rb_node; |
| 87 | struct rb_node *parent = NULL; |
| 88 | struct msm_iommu_meta *entry = NULL; |
| 89 | |
| 90 | while (*p) { |
| 91 | parent = *p; |
| 92 | entry = rb_entry(parent, struct msm_iommu_meta, node); |
| 93 | |
| 94 | if (buffer < entry->buffer) |
| 95 | p = &(*p)->rb_left; |
| 96 | else if (buffer > entry->buffer) |
| 97 | p = &(*p)->rb_right; |
| 98 | else |
| 99 | return entry; |
| 100 | } |
| 101 | |
| 102 | return NULL; |
| 103 | } |
| 104 | |
| 105 | static void msm_iommu_add(struct msm_iommu_meta *meta, |
| 106 | struct msm_iommu_map *iommu) |
| 107 | { |
| 108 | INIT_LIST_HEAD(&iommu->lnode); |
| 109 | list_add(&iommu->lnode, &meta->iommu_maps); |
| 110 | } |
| 111 | |
| 112 | |
| 113 | static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta, |
| 114 | struct device *dev) |
| 115 | { |
| 116 | struct msm_iommu_map *entry; |
| 117 | |
| 118 | list_for_each_entry(entry, &meta->iommu_maps, lnode) { |
| 119 | if (entry->dev == dev) |
| 120 | return entry; |
| 121 | } |
| 122 | |
| 123 | return NULL; |
| 124 | } |
| 125 | |
| 126 | static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf) |
| 127 | { |
| 128 | struct msm_iommu_meta *meta; |
| 129 | |
| 130 | meta = kzalloc(sizeof(*meta), GFP_KERNEL); |
| 131 | |
| 132 | if (!meta) |
| 133 | return ERR_PTR(-ENOMEM); |
| 134 | |
| 135 | INIT_LIST_HEAD(&meta->iommu_maps); |
| 136 | meta->buffer = dma_buf->priv; |
| 137 | kref_init(&meta->ref); |
| 138 | mutex_init(&meta->lock); |
| 139 | msm_iommu_meta_add(meta); |
| 140 | |
| 141 | return meta; |
| 142 | } |
| 143 | |
| 144 | static void msm_iommu_meta_put(struct msm_iommu_meta *meta); |
| 145 | |
| 146 | static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 147 | int nents, enum dma_data_direction dir, |
| 148 | struct dma_buf *dma_buf, int flags) |
| 149 | { |
| 150 | struct msm_iommu_map *iommu_map; |
| 151 | struct msm_iommu_meta *iommu_meta = NULL; |
| 152 | int ret = 0; |
| 153 | bool extra_meta_ref_taken = false; |
| 154 | bool late_unmap = (flags & MSM_DMA_ATTR_NO_DELAYED_UNMAP) == 0; |
| 155 | |
| 156 | mutex_lock(&msm_iommu_map_mutex); |
| 157 | iommu_meta = msm_iommu_meta_lookup(dma_buf->priv); |
| 158 | |
| 159 | if (!iommu_meta) { |
| 160 | iommu_meta = msm_iommu_meta_create(dma_buf); |
| 161 | |
| 162 | if (IS_ERR(iommu_meta)) { |
| 163 | mutex_unlock(&msm_iommu_map_mutex); |
| 164 | ret = PTR_ERR(iommu_meta); |
| 165 | goto out; |
| 166 | } |
| 167 | if (late_unmap) { |
| 168 | kref_get(&iommu_meta->ref); |
| 169 | extra_meta_ref_taken = true; |
| 170 | } |
| 171 | } else { |
| 172 | kref_get(&iommu_meta->ref); |
| 173 | } |
| 174 | |
| 175 | mutex_unlock(&msm_iommu_map_mutex); |
| 176 | |
| 177 | mutex_lock(&iommu_meta->lock); |
| 178 | iommu_map = msm_iommu_lookup(iommu_meta, dev); |
| 179 | if (!iommu_map) { |
| 180 | iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC); |
| 181 | |
| 182 | if (!iommu_map) { |
| 183 | ret = -ENOMEM; |
| 184 | goto out_unlock; |
| 185 | } |
| 186 | |
| 187 | ret = dma_map_sg(dev, sg, nents, dir); |
| 188 | if (ret != nents) { |
| 189 | kfree(iommu_map); |
| 190 | goto out_unlock; |
| 191 | } |
| 192 | |
| 193 | kref_init(&iommu_map->ref); |
| 194 | if (late_unmap) |
| 195 | kref_get(&iommu_map->ref); |
| 196 | iommu_map->meta = iommu_meta; |
| 197 | iommu_map->sgl.dma_address = sg->dma_address; |
| 198 | iommu_map->sgl.dma_length = sg->dma_length; |
| 199 | iommu_map->dev = dev; |
| 200 | msm_iommu_add(iommu_meta, iommu_map); |
| 201 | |
| 202 | } else { |
| 203 | sg->dma_address = iommu_map->sgl.dma_address; |
| 204 | sg->dma_length = iommu_map->sgl.dma_length; |
| 205 | |
| 206 | kref_get(&iommu_map->ref); |
| 207 | /* |
| 208 | * Need to do cache operations here based on "dir" in the |
| 209 | * future if we go with coherent mappings. |
| 210 | */ |
| 211 | ret = nents; |
| 212 | } |
| 213 | mutex_unlock(&iommu_meta->lock); |
| 214 | return ret; |
| 215 | |
| 216 | out_unlock: |
| 217 | mutex_unlock(&iommu_meta->lock); |
| 218 | out: |
| 219 | if (!IS_ERR(iommu_meta)) { |
| 220 | if (extra_meta_ref_taken) |
| 221 | msm_iommu_meta_put(iommu_meta); |
| 222 | msm_iommu_meta_put(iommu_meta); |
| 223 | } |
| 224 | return ret; |
| 225 | |
| 226 | } |
| 227 | |
| 228 | /* |
| 229 | * We are not taking a reference to the dma_buf here. It is expected that |
| 230 | * clients hold reference to the dma_buf until they are done with mapping and |
| 231 | * unmapping. |
| 232 | */ |
| 233 | int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, |
| 234 | enum dma_data_direction dir, struct dma_buf *dma_buf, |
| 235 | int flags) |
| 236 | { |
| 237 | int ret; |
| 238 | |
| 239 | if (IS_ERR_OR_NULL(dev)) { |
| 240 | pr_err("%s: dev pointer is invalid\n", __func__); |
| 241 | return -EINVAL; |
| 242 | } |
| 243 | |
| 244 | if (IS_ERR_OR_NULL(sg)) { |
| 245 | pr_err("%s: sg table pointer is invalid\n", __func__); |
| 246 | return -EINVAL; |
| 247 | } |
| 248 | |
| 249 | if (IS_ERR_OR_NULL(dma_buf)) { |
| 250 | pr_err("%s: dma_buf pointer is invalid\n", __func__); |
| 251 | return -EINVAL; |
| 252 | } |
| 253 | |
| 254 | ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, flags); |
| 255 | |
| 256 | return ret; |
| 257 | } |
| 258 | EXPORT_SYMBOL(msm_dma_map_sg_attrs); |
| 259 | |
| 260 | static void msm_iommu_meta_destroy(struct kref *kref) |
| 261 | { |
| 262 | struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta, |
| 263 | ref); |
| 264 | |
| 265 | if (!list_empty(&meta->iommu_maps)) { |
| 266 | WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n", |
| 267 | __func__, meta->buffer); |
| 268 | } |
| 269 | rb_erase(&meta->node, &iommu_root); |
| 270 | kfree(meta); |
| 271 | } |
| 272 | |
| 273 | static void msm_iommu_meta_put(struct msm_iommu_meta *meta) |
| 274 | { |
| 275 | /* |
| 276 | * Need to lock here to prevent race against map/unmap |
| 277 | */ |
| 278 | mutex_lock(&msm_iommu_map_mutex); |
| 279 | kref_put(&meta->ref, msm_iommu_meta_destroy); |
| 280 | mutex_unlock(&msm_iommu_map_mutex); |
| 281 | } |
| 282 | |
| 283 | static void msm_iommu_map_release(struct kref *kref) |
| 284 | { |
| 285 | struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map, |
| 286 | ref); |
| 287 | |
| 288 | list_del(&map->lnode); |
| 289 | dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir); |
| 290 | kfree(map); |
| 291 | } |
| 292 | |
| 293 | void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 294 | enum dma_data_direction dir, struct dma_buf *dma_buf) |
| 295 | { |
| 296 | struct msm_iommu_map *iommu_map; |
| 297 | struct msm_iommu_meta *meta; |
| 298 | |
| 299 | mutex_lock(&msm_iommu_map_mutex); |
| 300 | meta = msm_iommu_meta_lookup(dma_buf->priv); |
| 301 | if (!meta) { |
| 302 | WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf); |
| 303 | mutex_unlock(&msm_iommu_map_mutex); |
| 304 | goto out; |
| 305 | |
| 306 | } |
| 307 | mutex_unlock(&msm_iommu_map_mutex); |
| 308 | |
| 309 | mutex_lock(&meta->lock); |
| 310 | iommu_map = msm_iommu_lookup(meta, dev); |
| 311 | |
| 312 | if (!iommu_map) { |
| 313 | WARN(1, "%s: (%p) was never mapped for device %p\n", __func__, |
| 314 | dma_buf, dev); |
| 315 | mutex_unlock(&meta->lock); |
| 316 | goto out; |
| 317 | } |
| 318 | |
| 319 | /* |
| 320 | * Save direction for later use when we actually unmap. |
| 321 | * Not used right now but in the future if we go to coherent mapping |
| 322 | * API we might want to call the appropriate API when client asks |
| 323 | * to unmap |
| 324 | */ |
| 325 | iommu_map->dir = dir; |
| 326 | |
| 327 | kref_put(&iommu_map->ref, msm_iommu_map_release); |
| 328 | mutex_unlock(&meta->lock); |
| 329 | |
| 330 | msm_iommu_meta_put(meta); |
| 331 | |
| 332 | out: |
| 333 | return; |
| 334 | } |
| 335 | EXPORT_SYMBOL(msm_dma_unmap_sg); |
| 336 | |
| 337 | /* |
| 338 | * Only to be called by ION code when a buffer is freed |
| 339 | */ |
| 340 | void msm_dma_buf_freed(void *buffer) |
| 341 | { |
| 342 | struct msm_iommu_map *iommu_map; |
| 343 | struct msm_iommu_map *iommu_map_next; |
| 344 | struct msm_iommu_meta *meta; |
| 345 | |
| 346 | mutex_lock(&msm_iommu_map_mutex); |
| 347 | meta = msm_iommu_meta_lookup(buffer); |
| 348 | if (!meta) { |
| 349 | /* Already unmapped (assuming no late unmapping) */ |
| 350 | mutex_unlock(&msm_iommu_map_mutex); |
| 351 | return; |
| 352 | } |
| 353 | mutex_unlock(&msm_iommu_map_mutex); |
| 354 | |
| 355 | mutex_lock(&meta->lock); |
| 356 | |
| 357 | list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps, |
| 358 | lnode) |
| 359 | kref_put(&iommu_map->ref, msm_iommu_map_release); |
| 360 | |
| 361 | if (!list_empty(&meta->iommu_maps)) { |
| 362 | WARN(1, "%s: DMA buffer %p destroyed with outstanding iommu mappings\n", |
| 363 | __func__, meta->buffer); |
| 364 | } |
| 365 | |
| 366 | INIT_LIST_HEAD(&meta->iommu_maps); |
| 367 | mutex_unlock(&meta->lock); |
| 368 | |
| 369 | msm_iommu_meta_put(meta); |
| 370 | } |