Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 1 | /* |
David Woodhouse | a15a519 | 2009-07-01 18:49:06 +0100 | [diff] [blame] | 2 | * Copyright © 2006-2009, Intel Corporation. |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 3 | * |
David Woodhouse | a15a519 | 2009-07-01 18:49:06 +0100 | [diff] [blame] | 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms and conditions of the GNU General Public License, |
| 6 | * version 2, as published by the Free Software Foundation. |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 7 | * |
David Woodhouse | a15a519 | 2009-07-01 18:49:06 +0100 | [diff] [blame] | 8 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 11 | * more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public License along with |
| 14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
| 15 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
| 16 | * |
mark gross | 98bcef5 | 2008-02-23 15:23:35 -0800 | [diff] [blame] | 17 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 18 | */ |
| 19 | |
Kay, Allen M | 3871794 | 2008-09-09 18:37:29 +0300 | [diff] [blame] | 20 | #include <linux/iova.h> |
Robin Murphy | 85b4545 | 2015-01-12 17:51:14 +0000 | [diff] [blame] | 21 | #include <linux/slab.h> |
| 22 | |
| 23 | static struct kmem_cache *iommu_iova_cache; |
| 24 | |
| 25 | int iommu_iova_cache_init(void) |
| 26 | { |
| 27 | int ret = 0; |
| 28 | |
| 29 | iommu_iova_cache = kmem_cache_create("iommu_iova", |
| 30 | sizeof(struct iova), |
| 31 | 0, |
| 32 | SLAB_HWCACHE_ALIGN, |
| 33 | NULL); |
| 34 | if (!iommu_iova_cache) { |
| 35 | pr_err("Couldn't create iova cache\n"); |
| 36 | ret = -ENOMEM; |
| 37 | } |
| 38 | |
| 39 | return ret; |
| 40 | } |
| 41 | |
| 42 | void iommu_iova_cache_destroy(void) |
| 43 | { |
| 44 | kmem_cache_destroy(iommu_iova_cache); |
| 45 | } |
| 46 | |
| 47 | struct iova *alloc_iova_mem(void) |
| 48 | { |
| 49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); |
| 50 | } |
| 51 | |
| 52 | void free_iova_mem(struct iova *iova) |
| 53 | { |
| 54 | kmem_cache_free(iommu_iova_cache, iova); |
| 55 | } |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 56 | |
| 57 | void |
Robin Murphy | 0fb5fe8 | 2015-01-12 17:51:16 +0000 | [diff] [blame] | 58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
| 59 | unsigned long start_pfn, unsigned long pfn_32bit) |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 60 | { |
Robin Murphy | 0fb5fe8 | 2015-01-12 17:51:16 +0000 | [diff] [blame] | 61 | /* |
| 62 | * IOVA granularity will normally be equal to the smallest |
| 63 | * supported IOMMU page size; both *must* be capable of |
| 64 | * representing individual CPU pages exactly. |
| 65 | */ |
| 66 | BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); |
| 67 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 68 | spin_lock_init(&iovad->iova_rbtree_lock); |
| 69 | iovad->rbroot = RB_ROOT; |
| 70 | iovad->cached32_node = NULL; |
Robin Murphy | 0fb5fe8 | 2015-01-12 17:51:16 +0000 | [diff] [blame] | 71 | iovad->granule = granule; |
Robin Murphy | 1b72250 | 2015-01-12 17:51:15 +0000 | [diff] [blame] | 72 | iovad->start_pfn = start_pfn; |
David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 73 | iovad->dma_32bit_pfn = pfn_32bit; |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | static struct rb_node * |
| 77 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) |
| 78 | { |
David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 79 | if ((*limit_pfn != iovad->dma_32bit_pfn) || |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 80 | (iovad->cached32_node == NULL)) |
| 81 | return rb_last(&iovad->rbroot); |
| 82 | else { |
| 83 | struct rb_node *prev_node = rb_prev(iovad->cached32_node); |
| 84 | struct iova *curr_iova = |
| 85 | container_of(iovad->cached32_node, struct iova, node); |
| 86 | *limit_pfn = curr_iova->pfn_lo - 1; |
| 87 | return prev_node; |
| 88 | } |
| 89 | } |
| 90 | |
| 91 | static void |
| 92 | __cached_rbnode_insert_update(struct iova_domain *iovad, |
| 93 | unsigned long limit_pfn, struct iova *new) |
| 94 | { |
David Miller | f661197 | 2008-02-06 01:36:23 -0800 | [diff] [blame] | 95 | if (limit_pfn != iovad->dma_32bit_pfn) |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 96 | return; |
| 97 | iovad->cached32_node = &new->node; |
| 98 | } |
| 99 | |
| 100 | static void |
| 101 | __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) |
| 102 | { |
| 103 | struct iova *cached_iova; |
| 104 | struct rb_node *curr; |
| 105 | |
| 106 | if (!iovad->cached32_node) |
| 107 | return; |
| 108 | curr = iovad->cached32_node; |
| 109 | cached_iova = container_of(curr, struct iova, node); |
| 110 | |
Chris Wright | 1c9fc3d | 2011-05-28 13:15:04 -0500 | [diff] [blame] | 111 | if (free->pfn_lo >= cached_iova->pfn_lo) { |
| 112 | struct rb_node *node = rb_next(&free->node); |
| 113 | struct iova *iova = container_of(node, struct iova, node); |
| 114 | |
| 115 | /* only cache if it's below 32bit pfn */ |
| 116 | if (node && iova->pfn_lo < iovad->dma_32bit_pfn) |
| 117 | iovad->cached32_node = node; |
| 118 | else |
| 119 | iovad->cached32_node = NULL; |
| 120 | } |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 121 | } |
| 122 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 123 | /* Computes the padding size required, to make the |
| 124 | * the start address naturally aligned on its size |
| 125 | */ |
| 126 | static int |
| 127 | iova_get_pad_size(int size, unsigned int limit_pfn) |
| 128 | { |
| 129 | unsigned int pad_size = 0; |
| 130 | unsigned int order = ilog2(size); |
| 131 | |
| 132 | if (order) |
| 133 | pad_size = (limit_pfn + 1) % (1 << order); |
| 134 | |
| 135 | return pad_size; |
| 136 | } |
| 137 | |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 138 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
| 139 | unsigned long size, unsigned long limit_pfn, |
| 140 | struct iova *new, bool size_aligned) |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 141 | { |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 142 | struct rb_node *prev, *curr = NULL; |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 143 | unsigned long flags; |
| 144 | unsigned long saved_pfn; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 145 | unsigned int pad_size = 0; |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 146 | |
| 147 | /* Walk the tree backwards */ |
| 148 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
| 149 | saved_pfn = limit_pfn; |
| 150 | curr = __get_cached_rbnode(iovad, &limit_pfn); |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 151 | prev = curr; |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 152 | while (curr) { |
| 153 | struct iova *curr_iova = container_of(curr, struct iova, node); |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 154 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 155 | if (limit_pfn < curr_iova->pfn_lo) |
| 156 | goto move_left; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 157 | else if (limit_pfn < curr_iova->pfn_hi) |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 158 | goto adjust_limit_pfn; |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 159 | else { |
| 160 | if (size_aligned) |
| 161 | pad_size = iova_get_pad_size(size, limit_pfn); |
| 162 | if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) |
| 163 | break; /* found a free slot */ |
| 164 | } |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 165 | adjust_limit_pfn: |
| 166 | limit_pfn = curr_iova->pfn_lo - 1; |
| 167 | move_left: |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 168 | prev = curr; |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 169 | curr = rb_prev(curr); |
| 170 | } |
| 171 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 172 | if (!curr) { |
| 173 | if (size_aligned) |
| 174 | pad_size = iova_get_pad_size(size, limit_pfn); |
Robin Murphy | 1b72250 | 2015-01-12 17:51:15 +0000 | [diff] [blame] | 175 | if ((iovad->start_pfn + size + pad_size) > limit_pfn) { |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 176 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 177 | return -ENOMEM; |
| 178 | } |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 179 | } |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 180 | |
| 181 | /* pfn_lo will point to size aligned address if size_aligned is set */ |
| 182 | new->pfn_lo = limit_pfn - (size + pad_size) + 1; |
| 183 | new->pfn_hi = new->pfn_lo + size - 1; |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 184 | |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 185 | /* Insert the new_iova into domain rbtree by holding writer lock */ |
| 186 | /* Add new node and rebalance tree. */ |
| 187 | { |
David Woodhouse | a15a519 | 2009-07-01 18:49:06 +0100 | [diff] [blame] | 188 | struct rb_node **entry, *parent = NULL; |
| 189 | |
| 190 | /* If we have 'prev', it's a valid place to start the |
| 191 | insertion. Otherwise, start from the root. */ |
| 192 | if (prev) |
| 193 | entry = &prev; |
| 194 | else |
| 195 | entry = &iovad->rbroot.rb_node; |
| 196 | |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 197 | /* Figure out where to put new node */ |
| 198 | while (*entry) { |
| 199 | struct iova *this = container_of(*entry, |
| 200 | struct iova, node); |
| 201 | parent = *entry; |
| 202 | |
| 203 | if (new->pfn_lo < this->pfn_lo) |
| 204 | entry = &((*entry)->rb_left); |
| 205 | else if (new->pfn_lo > this->pfn_lo) |
| 206 | entry = &((*entry)->rb_right); |
| 207 | else |
| 208 | BUG(); /* this should not happen */ |
| 209 | } |
| 210 | |
| 211 | /* Add new node and rebalance tree. */ |
| 212 | rb_link_node(&new->node, parent, entry); |
| 213 | rb_insert_color(&new->node, &iovad->rbroot); |
| 214 | } |
| 215 | __cached_rbnode_insert_update(iovad, saved_pfn, new); |
| 216 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 217 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 218 | |
| 219 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 220 | return 0; |
| 221 | } |
| 222 | |
| 223 | static void |
| 224 | iova_insert_rbtree(struct rb_root *root, struct iova *iova) |
| 225 | { |
| 226 | struct rb_node **new = &(root->rb_node), *parent = NULL; |
| 227 | /* Figure out where to put new node */ |
| 228 | while (*new) { |
| 229 | struct iova *this = container_of(*new, struct iova, node); |
Robert Callicotte | 733cac2 | 2015-04-16 23:32:47 -0500 | [diff] [blame] | 230 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 231 | parent = *new; |
| 232 | |
| 233 | if (iova->pfn_lo < this->pfn_lo) |
| 234 | new = &((*new)->rb_left); |
| 235 | else if (iova->pfn_lo > this->pfn_lo) |
| 236 | new = &((*new)->rb_right); |
| 237 | else |
| 238 | BUG(); /* this should not happen */ |
| 239 | } |
| 240 | /* Add new node and rebalance tree. */ |
| 241 | rb_link_node(&iova->node, parent, new); |
| 242 | rb_insert_color(&iova->node, root); |
| 243 | } |
| 244 | |
| 245 | /** |
| 246 | * alloc_iova - allocates an iova |
Masanari Iida | 07db040 | 2012-07-22 02:21:32 +0900 | [diff] [blame] | 247 | * @iovad: - iova domain in question |
| 248 | * @size: - size of page frames to allocate |
| 249 | * @limit_pfn: - max limit address |
| 250 | * @size_aligned: - set if size_aligned address range is required |
Robin Murphy | 1b72250 | 2015-01-12 17:51:15 +0000 | [diff] [blame] | 251 | * This function allocates an iova in the range iovad->start_pfn to limit_pfn, |
| 252 | * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 253 | * flag is set then the allocated address iova->pfn_lo will be naturally |
| 254 | * aligned on roundup_power_of_two(size). |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 255 | */ |
| 256 | struct iova * |
| 257 | alloc_iova(struct iova_domain *iovad, unsigned long size, |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 258 | unsigned long limit_pfn, |
| 259 | bool size_aligned) |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 260 | { |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 261 | struct iova *new_iova; |
| 262 | int ret; |
| 263 | |
| 264 | new_iova = alloc_iova_mem(); |
| 265 | if (!new_iova) |
| 266 | return NULL; |
| 267 | |
Keshavamurthy, Anil S | f76aec7 | 2007-10-21 16:41:58 -0700 | [diff] [blame] | 268 | /* If size aligned is set then round the size to |
| 269 | * to next power of two. |
| 270 | */ |
| 271 | if (size_aligned) |
| 272 | size = __roundup_pow_of_two(size); |
| 273 | |
mark gross | ddf0288 | 2008-03-04 15:22:04 -0800 | [diff] [blame] | 274 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
| 275 | new_iova, size_aligned); |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 276 | |
| 277 | if (ret) { |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 278 | free_iova_mem(new_iova); |
| 279 | return NULL; |
| 280 | } |
| 281 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 282 | return new_iova; |
| 283 | } |
| 284 | |
| 285 | /** |
| 286 | * find_iova - find's an iova for a given pfn |
Masanari Iida | 07db040 | 2012-07-22 02:21:32 +0900 | [diff] [blame] | 287 | * @iovad: - iova domain in question. |
| 288 | * @pfn: - page frame number |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 289 | * This function finds and returns an iova belonging to the |
| 290 | * given doamin which matches the given pfn. |
| 291 | */ |
| 292 | struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) |
| 293 | { |
| 294 | unsigned long flags; |
| 295 | struct rb_node *node; |
| 296 | |
| 297 | /* Take the lock so that no other thread is manipulating the rbtree */ |
| 298 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
| 299 | node = iovad->rbroot.rb_node; |
| 300 | while (node) { |
| 301 | struct iova *iova = container_of(node, struct iova, node); |
| 302 | |
| 303 | /* If pfn falls within iova's range, return iova */ |
| 304 | if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { |
| 305 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 306 | /* We are not holding the lock while this iova |
| 307 | * is referenced by the caller as the same thread |
| 308 | * which called this function also calls __free_iova() |
Masanari Iida | 07db040 | 2012-07-22 02:21:32 +0900 | [diff] [blame] | 309 | * and it is by design that only one thread can possibly |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 310 | * reference a particular iova and hence no conflict. |
| 311 | */ |
| 312 | return iova; |
| 313 | } |
| 314 | |
| 315 | if (pfn < iova->pfn_lo) |
| 316 | node = node->rb_left; |
| 317 | else if (pfn > iova->pfn_lo) |
| 318 | node = node->rb_right; |
| 319 | } |
| 320 | |
| 321 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 322 | return NULL; |
| 323 | } |
| 324 | |
| 325 | /** |
| 326 | * __free_iova - frees the given iova |
| 327 | * @iovad: iova domain in question. |
| 328 | * @iova: iova in question. |
| 329 | * Frees the given iova belonging to the giving domain |
| 330 | */ |
| 331 | void |
| 332 | __free_iova(struct iova_domain *iovad, struct iova *iova) |
| 333 | { |
| 334 | unsigned long flags; |
| 335 | |
| 336 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
| 337 | __cached_rbnode_delete_update(iovad, iova); |
| 338 | rb_erase(&iova->node, &iovad->rbroot); |
| 339 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 340 | free_iova_mem(iova); |
| 341 | } |
| 342 | |
| 343 | /** |
| 344 | * free_iova - finds and frees the iova for a given pfn |
| 345 | * @iovad: - iova domain in question. |
| 346 | * @pfn: - pfn that is allocated previously |
| 347 | * This functions finds an iova for a given pfn and then |
| 348 | * frees the iova from that domain. |
| 349 | */ |
| 350 | void |
| 351 | free_iova(struct iova_domain *iovad, unsigned long pfn) |
| 352 | { |
| 353 | struct iova *iova = find_iova(iovad, pfn); |
Robert Callicotte | 733cac2 | 2015-04-16 23:32:47 -0500 | [diff] [blame] | 354 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 355 | if (iova) |
| 356 | __free_iova(iovad, iova); |
| 357 | |
| 358 | } |
| 359 | |
| 360 | /** |
| 361 | * put_iova_domain - destroys the iova doamin |
| 362 | * @iovad: - iova domain in question. |
| 363 | * All the iova's in that domain are destroyed. |
| 364 | */ |
| 365 | void put_iova_domain(struct iova_domain *iovad) |
| 366 | { |
| 367 | struct rb_node *node; |
| 368 | unsigned long flags; |
| 369 | |
| 370 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
| 371 | node = rb_first(&iovad->rbroot); |
| 372 | while (node) { |
| 373 | struct iova *iova = container_of(node, struct iova, node); |
Robert Callicotte | 733cac2 | 2015-04-16 23:32:47 -0500 | [diff] [blame] | 374 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 375 | rb_erase(node, &iovad->rbroot); |
| 376 | free_iova_mem(iova); |
| 377 | node = rb_first(&iovad->rbroot); |
| 378 | } |
| 379 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 380 | } |
| 381 | |
| 382 | static int |
| 383 | __is_range_overlap(struct rb_node *node, |
| 384 | unsigned long pfn_lo, unsigned long pfn_hi) |
| 385 | { |
| 386 | struct iova *iova = container_of(node, struct iova, node); |
| 387 | |
| 388 | if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) |
| 389 | return 1; |
| 390 | return 0; |
| 391 | } |
| 392 | |
Jiang Liu | 75f0556 | 2014-02-19 14:07:37 +0800 | [diff] [blame] | 393 | static inline struct iova * |
| 394 | alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) |
| 395 | { |
| 396 | struct iova *iova; |
| 397 | |
| 398 | iova = alloc_iova_mem(); |
| 399 | if (iova) { |
| 400 | iova->pfn_lo = pfn_lo; |
| 401 | iova->pfn_hi = pfn_hi; |
| 402 | } |
| 403 | |
| 404 | return iova; |
| 405 | } |
| 406 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 407 | static struct iova * |
| 408 | __insert_new_range(struct iova_domain *iovad, |
| 409 | unsigned long pfn_lo, unsigned long pfn_hi) |
| 410 | { |
| 411 | struct iova *iova; |
| 412 | |
Jiang Liu | 75f0556 | 2014-02-19 14:07:37 +0800 | [diff] [blame] | 413 | iova = alloc_and_init_iova(pfn_lo, pfn_hi); |
| 414 | if (iova) |
| 415 | iova_insert_rbtree(&iovad->rbroot, iova); |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 416 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 417 | return iova; |
| 418 | } |
| 419 | |
| 420 | static void |
| 421 | __adjust_overlap_range(struct iova *iova, |
| 422 | unsigned long *pfn_lo, unsigned long *pfn_hi) |
| 423 | { |
| 424 | if (*pfn_lo < iova->pfn_lo) |
| 425 | iova->pfn_lo = *pfn_lo; |
| 426 | if (*pfn_hi > iova->pfn_hi) |
| 427 | *pfn_lo = iova->pfn_hi + 1; |
| 428 | } |
| 429 | |
| 430 | /** |
| 431 | * reserve_iova - reserves an iova in the given range |
| 432 | * @iovad: - iova domain pointer |
| 433 | * @pfn_lo: - lower page frame address |
| 434 | * @pfn_hi:- higher pfn adderss |
| 435 | * This function allocates reserves the address range from pfn_lo to pfn_hi so |
| 436 | * that this address is not dished out as part of alloc_iova. |
| 437 | */ |
| 438 | struct iova * |
| 439 | reserve_iova(struct iova_domain *iovad, |
| 440 | unsigned long pfn_lo, unsigned long pfn_hi) |
| 441 | { |
| 442 | struct rb_node *node; |
| 443 | unsigned long flags; |
| 444 | struct iova *iova; |
| 445 | unsigned int overlap = 0; |
| 446 | |
David Woodhouse | 3d39cec | 2009-07-08 15:23:30 +0100 | [diff] [blame] | 447 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 448 | for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { |
| 449 | if (__is_range_overlap(node, pfn_lo, pfn_hi)) { |
| 450 | iova = container_of(node, struct iova, node); |
| 451 | __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); |
| 452 | if ((pfn_lo >= iova->pfn_lo) && |
| 453 | (pfn_hi <= iova->pfn_hi)) |
| 454 | goto finish; |
| 455 | overlap = 1; |
| 456 | |
| 457 | } else if (overlap) |
| 458 | break; |
| 459 | } |
| 460 | |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 461 | /* We are here either because this is the first reserver node |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 462 | * or need to insert remaining non overlap addr range |
| 463 | */ |
| 464 | iova = __insert_new_range(iovad, pfn_lo, pfn_hi); |
| 465 | finish: |
| 466 | |
David Woodhouse | 3d39cec | 2009-07-08 15:23:30 +0100 | [diff] [blame] | 467 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 468 | return iova; |
| 469 | } |
| 470 | |
| 471 | /** |
| 472 | * copy_reserved_iova - copies the reserved between domains |
| 473 | * @from: - source doamin from where to copy |
| 474 | * @to: - destination domin where to copy |
| 475 | * This function copies reserved iova's from one doamin to |
| 476 | * other. |
| 477 | */ |
| 478 | void |
| 479 | copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) |
| 480 | { |
| 481 | unsigned long flags; |
| 482 | struct rb_node *node; |
| 483 | |
David Woodhouse | 3d39cec | 2009-07-08 15:23:30 +0100 | [diff] [blame] | 484 | spin_lock_irqsave(&from->iova_rbtree_lock, flags); |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 485 | for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { |
| 486 | struct iova *iova = container_of(node, struct iova, node); |
| 487 | struct iova *new_iova; |
Robert Callicotte | 733cac2 | 2015-04-16 23:32:47 -0500 | [diff] [blame] | 488 | |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 489 | new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); |
| 490 | if (!new_iova) |
| 491 | printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", |
| 492 | iova->pfn_lo, iova->pfn_lo); |
| 493 | } |
David Woodhouse | 3d39cec | 2009-07-08 15:23:30 +0100 | [diff] [blame] | 494 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
Keshavamurthy, Anil S | f8de50e | 2007-10-21 16:41:48 -0700 | [diff] [blame] | 495 | } |
Jiang Liu | 75f0556 | 2014-02-19 14:07:37 +0800 | [diff] [blame] | 496 | |
| 497 | struct iova * |
| 498 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, |
| 499 | unsigned long pfn_lo, unsigned long pfn_hi) |
| 500 | { |
| 501 | unsigned long flags; |
| 502 | struct iova *prev = NULL, *next = NULL; |
| 503 | |
| 504 | spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); |
| 505 | if (iova->pfn_lo < pfn_lo) { |
| 506 | prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); |
| 507 | if (prev == NULL) |
| 508 | goto error; |
| 509 | } |
| 510 | if (iova->pfn_hi > pfn_hi) { |
| 511 | next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); |
| 512 | if (next == NULL) |
| 513 | goto error; |
| 514 | } |
| 515 | |
| 516 | __cached_rbnode_delete_update(iovad, iova); |
| 517 | rb_erase(&iova->node, &iovad->rbroot); |
| 518 | |
| 519 | if (prev) { |
| 520 | iova_insert_rbtree(&iovad->rbroot, prev); |
| 521 | iova->pfn_lo = pfn_lo; |
| 522 | } |
| 523 | if (next) { |
| 524 | iova_insert_rbtree(&iovad->rbroot, next); |
| 525 | iova->pfn_hi = pfn_hi; |
| 526 | } |
| 527 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 528 | |
| 529 | return iova; |
| 530 | |
| 531 | error: |
| 532 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
| 533 | if (prev) |
| 534 | free_iova_mem(prev); |
| 535 | return NULL; |
| 536 | } |