blob: 15ff3033bbd7556a94dd8c2b94e840af270ceea6 [file] [log] [blame]
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07001/*
David Woodhousea15a5192009-07-01 18:49:06 +01002 * Copyright © 2006-2009, Intel Corporation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07003 *
David Woodhousea15a5192009-07-01 18:49:06 +01004 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07007 *
David Woodhousea15a5192009-07-01 18:49:06 +01008 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
mark gross98bcef52008-02-23 15:23:35 -080017 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070018 */
19
Kay, Allen M38717942008-09-09 18:37:29 +030020#include <linux/iova.h>
Sakari Ailus15bbdec2015-07-13 14:31:30 +030021#include <linux/module.h>
Robin Murphy85b45452015-01-12 17:51:14 +000022#include <linux/slab.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030023#include <linux/smp.h>
24#include <linux/bitops.h>
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +020025#include <linux/cpu.h>
Omer Peleg9257b4a2016-04-20 11:34:11 +030026
Robin Murphybb68b2f2017-09-21 16:52:46 +010027/* The anchor node sits above the top of the usable address space */
28#define IOVA_ANCHOR ~0UL
29
Omer Peleg9257b4a2016-04-20 11:34:11 +030030static bool iova_rcache_insert(struct iova_domain *iovad,
31 unsigned long pfn,
32 unsigned long size);
33static unsigned long iova_rcache_get(struct iova_domain *iovad,
34 unsigned long size,
35 unsigned long limit_pfn);
36static void init_iova_rcaches(struct iova_domain *iovad);
37static void free_iova_rcaches(struct iova_domain *iovad);
Joerg Roedel19282102017-08-10 15:49:44 +020038static void fq_destroy_all_entries(struct iova_domain *iovad);
Joerg Roedel9a005a82017-08-10 16:58:18 +020039static void fq_flush_timeout(unsigned long data);
Robin Murphy85b45452015-01-12 17:51:14 +000040
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070041void
Robin Murphy0fb5fe82015-01-12 17:51:16 +000042init_iova_domain(struct iova_domain *iovad, unsigned long granule,
Zhen Leiaa3ac942017-09-21 16:52:45 +010043 unsigned long start_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070044{
Robin Murphy0fb5fe82015-01-12 17:51:16 +000045 /*
46 * IOVA granularity will normally be equal to the smallest
47 * supported IOMMU page size; both *must* be capable of
48 * representing individual CPU pages exactly.
49 */
50 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
51
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070052 spin_lock_init(&iovad->iova_rbtree_lock);
53 iovad->rbroot = RB_ROOT;
Robin Murphy973f5fb2017-09-21 16:52:47 +010054 iovad->cached_node = &iovad->anchor.node;
55 iovad->cached32_node = &iovad->anchor.node;
Robin Murphy0fb5fe82015-01-12 17:51:16 +000056 iovad->granule = granule;
Robin Murphy1b722502015-01-12 17:51:15 +000057 iovad->start_pfn = start_pfn;
Zhen Leiaa3ac942017-09-21 16:52:45 +010058 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
Joerg Roedel42f87e72017-08-10 14:44:28 +020059 iovad->flush_cb = NULL;
60 iovad->fq = NULL;
Robin Murphybb68b2f2017-09-21 16:52:46 +010061 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
62 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
63 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
Omer Peleg9257b4a2016-04-20 11:34:11 +030064 init_iova_rcaches(iovad);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070065}
Sakari Ailus9b417602015-07-13 14:31:29 +030066EXPORT_SYMBOL_GPL(init_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070067
Joerg Roedel42f87e72017-08-10 14:44:28 +020068static void free_iova_flush_queue(struct iova_domain *iovad)
69{
70 if (!iovad->fq)
71 return;
72
Joerg Roedel9a005a82017-08-10 16:58:18 +020073 if (timer_pending(&iovad->fq_timer))
74 del_timer(&iovad->fq_timer);
75
Joerg Roedel19282102017-08-10 15:49:44 +020076 fq_destroy_all_entries(iovad);
Joerg Roedel9a005a82017-08-10 16:58:18 +020077
Joerg Roedel42f87e72017-08-10 14:44:28 +020078 free_percpu(iovad->fq);
79
80 iovad->fq = NULL;
81 iovad->flush_cb = NULL;
82 iovad->entry_dtor = NULL;
83}
84
85int init_iova_flush_queue(struct iova_domain *iovad,
86 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
87{
88 int cpu;
89
Joerg Roedelfb418da2017-08-10 16:14:59 +020090 atomic64_set(&iovad->fq_flush_start_cnt, 0);
91 atomic64_set(&iovad->fq_flush_finish_cnt, 0);
92
Joerg Roedel42f87e72017-08-10 14:44:28 +020093 iovad->fq = alloc_percpu(struct iova_fq);
94 if (!iovad->fq)
95 return -ENOMEM;
96
97 iovad->flush_cb = flush_cb;
98 iovad->entry_dtor = entry_dtor;
99
100 for_each_possible_cpu(cpu) {
101 struct iova_fq *fq;
102
103 fq = per_cpu_ptr(iovad->fq, cpu);
104 fq->head = 0;
105 fq->tail = 0;
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200106
107 spin_lock_init(&fq->lock);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200108 }
109
Joerg Roedel9a005a82017-08-10 16:58:18 +0200110 setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
111 atomic_set(&iovad->fq_timer_on, 0);
112
Joerg Roedel42f87e72017-08-10 14:44:28 +0200113 return 0;
114}
115EXPORT_SYMBOL_GPL(init_iova_flush_queue);
116
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700117static struct rb_node *
Robin Murphy973f5fb2017-09-21 16:52:47 +0100118__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700119{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100120 if (limit_pfn <= iovad->dma_32bit_pfn)
121 return iovad->cached32_node;
Robin Murphye60aa7b2017-09-21 16:52:44 +0100122
Robin Murphy973f5fb2017-09-21 16:52:47 +0100123 return iovad->cached_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700124}
125
126static void
Robin Murphye60aa7b2017-09-21 16:52:44 +0100127__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700128{
Robin Murphye60aa7b2017-09-21 16:52:44 +0100129 if (new->pfn_hi < iovad->dma_32bit_pfn)
130 iovad->cached32_node = &new->node;
131 else
132 iovad->cached_node = &new->node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700133}
134
135static void
136__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
137{
138 struct iova *cached_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700139
Robin Murphye60aa7b2017-09-21 16:52:44 +0100140 cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
141 if (free->pfn_hi < iovad->dma_32bit_pfn &&
Robin Murphy973f5fb2017-09-21 16:52:47 +0100142 free->pfn_lo >= cached_iova->pfn_lo)
Robin Murphye60aa7b2017-09-21 16:52:44 +0100143 iovad->cached32_node = rb_next(&free->node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700144
Robin Murphye60aa7b2017-09-21 16:52:44 +0100145 cached_iova = rb_entry(iovad->cached_node, struct iova, node);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100146 if (free->pfn_lo >= cached_iova->pfn_lo)
Robin Murphye60aa7b2017-09-21 16:52:44 +0100147 iovad->cached_node = rb_next(&free->node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700148}
149
Marek Szyprowskid7517512017-02-24 12:13:37 +0100150/* Insert the iova into domain rbtree by holding writer lock */
151static void
152iova_insert_rbtree(struct rb_root *root, struct iova *iova,
153 struct rb_node *start)
154{
155 struct rb_node **new, *parent = NULL;
156
157 new = (start) ? &start : &(root->rb_node);
158 /* Figure out where to put new node */
159 while (*new) {
160 struct iova *this = rb_entry(*new, struct iova, node);
161
162 parent = *new;
163
164 if (iova->pfn_lo < this->pfn_lo)
165 new = &((*new)->rb_left);
166 else if (iova->pfn_lo > this->pfn_lo)
167 new = &((*new)->rb_right);
168 else {
169 WARN_ON(1); /* this should not happen */
170 return;
171 }
172 }
173 /* Add new node and rebalance tree. */
174 rb_link_node(&iova->node, parent, new);
175 rb_insert_color(&iova->node, root);
176}
177
mark grossddf02882008-03-04 15:22:04 -0800178static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
179 unsigned long size, unsigned long limit_pfn,
180 struct iova *new, bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700181{
Robin Murphy973f5fb2017-09-21 16:52:47 +0100182 struct rb_node *curr, *prev;
183 struct iova *curr_iova;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700184 unsigned long flags;
Robin Murphye60aa7b2017-09-21 16:52:44 +0100185 unsigned long new_pfn;
Zhen Lei086c83a2017-09-21 16:52:43 +0100186 unsigned long align_mask = ~0UL;
187
188 if (size_aligned)
189 align_mask <<= fls_long(size - 1);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700190
191 /* Walk the tree backwards */
192 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100193 curr = __get_cached_rbnode(iovad, limit_pfn);
194 curr_iova = rb_entry(curr, struct iova, node);
195 do {
196 limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
197 new_pfn = (limit_pfn - size) & align_mask;
mark grossddf02882008-03-04 15:22:04 -0800198 prev = curr;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700199 curr = rb_prev(curr);
Robin Murphy973f5fb2017-09-21 16:52:47 +0100200 curr_iova = rb_entry(curr, struct iova, node);
201 } while (curr && new_pfn <= curr_iova->pfn_hi);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700202
Zhen Lei086c83a2017-09-21 16:52:43 +0100203 if (limit_pfn < size || new_pfn < iovad->start_pfn) {
204 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
205 return -ENOMEM;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700206 }
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700207
208 /* pfn_lo will point to size aligned address if size_aligned is set */
Zhen Lei086c83a2017-09-21 16:52:43 +0100209 new->pfn_lo = new_pfn;
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700210 new->pfn_hi = new->pfn_lo + size - 1;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700211
Marek Szyprowskid7517512017-02-24 12:13:37 +0100212 /* If we have 'prev', it's a valid place to start the insertion. */
213 iova_insert_rbtree(&iovad->rbroot, new, prev);
Robin Murphye60aa7b2017-09-21 16:52:44 +0100214 __cached_rbnode_insert_update(iovad, new);
mark grossddf02882008-03-04 15:22:04 -0800215
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700216 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
mark grossddf02882008-03-04 15:22:04 -0800217
218
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700219 return 0;
220}
221
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300222static struct kmem_cache *iova_cache;
223static unsigned int iova_cache_users;
224static DEFINE_MUTEX(iova_cache_mutex);
225
226struct iova *alloc_iova_mem(void)
227{
228 return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
229}
230EXPORT_SYMBOL(alloc_iova_mem);
231
232void free_iova_mem(struct iova *iova)
233{
Robin Murphybb68b2f2017-09-21 16:52:46 +0100234 if (iova->pfn_lo != IOVA_ANCHOR)
235 kmem_cache_free(iova_cache, iova);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300236}
237EXPORT_SYMBOL(free_iova_mem);
238
239int iova_cache_get(void)
240{
241 mutex_lock(&iova_cache_mutex);
242 if (!iova_cache_users) {
243 iova_cache = kmem_cache_create(
244 "iommu_iova", sizeof(struct iova), 0,
245 SLAB_HWCACHE_ALIGN, NULL);
246 if (!iova_cache) {
247 mutex_unlock(&iova_cache_mutex);
248 printk(KERN_ERR "Couldn't create iova cache\n");
249 return -ENOMEM;
250 }
251 }
252
253 iova_cache_users++;
254 mutex_unlock(&iova_cache_mutex);
255
256 return 0;
257}
Sakari Ailus9b417602015-07-13 14:31:29 +0300258EXPORT_SYMBOL_GPL(iova_cache_get);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300259
260void iova_cache_put(void)
261{
262 mutex_lock(&iova_cache_mutex);
263 if (WARN_ON(!iova_cache_users)) {
264 mutex_unlock(&iova_cache_mutex);
265 return;
266 }
267 iova_cache_users--;
268 if (!iova_cache_users)
269 kmem_cache_destroy(iova_cache);
270 mutex_unlock(&iova_cache_mutex);
271}
Sakari Ailus9b417602015-07-13 14:31:29 +0300272EXPORT_SYMBOL_GPL(iova_cache_put);
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300273
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700274/**
275 * alloc_iova - allocates an iova
Masanari Iida07db0402012-07-22 02:21:32 +0900276 * @iovad: - iova domain in question
277 * @size: - size of page frames to allocate
278 * @limit_pfn: - max limit address
279 * @size_aligned: - set if size_aligned address range is required
Robin Murphy1b722502015-01-12 17:51:15 +0000280 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
281 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700282 * flag is set then the allocated address iova->pfn_lo will be naturally
283 * aligned on roundup_power_of_two(size).
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700284 */
285struct iova *
286alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700287 unsigned long limit_pfn,
288 bool size_aligned)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700289{
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700290 struct iova *new_iova;
291 int ret;
292
293 new_iova = alloc_iova_mem();
294 if (!new_iova)
295 return NULL;
296
Robin Murphy757c3702017-05-16 12:26:48 +0100297 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
mark grossddf02882008-03-04 15:22:04 -0800298 new_iova, size_aligned);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700299
300 if (ret) {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700301 free_iova_mem(new_iova);
302 return NULL;
303 }
304
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700305 return new_iova;
306}
Sakari Ailus9b417602015-07-13 14:31:29 +0300307EXPORT_SYMBOL_GPL(alloc_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700308
Omer Peleg9257b4a2016-04-20 11:34:11 +0300309static struct iova *
310private_find_iova(struct iova_domain *iovad, unsigned long pfn)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700311{
Omer Peleg9257b4a2016-04-20 11:34:11 +0300312 struct rb_node *node = iovad->rbroot.rb_node;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700313
Omer Peleg9257b4a2016-04-20 11:34:11 +0300314 assert_spin_locked(&iovad->iova_rbtree_lock);
315
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700316 while (node) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800317 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700318
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700319 if (pfn < iova->pfn_lo)
320 node = node->rb_left;
Zhen Lei2070f942017-09-21 16:52:42 +0100321 else if (pfn > iova->pfn_hi)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700322 node = node->rb_right;
Zhen Lei2070f942017-09-21 16:52:42 +0100323 else
324 return iova; /* pfn falls within iova's range */
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700325 }
326
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700327 return NULL;
328}
Omer Peleg9257b4a2016-04-20 11:34:11 +0300329
330static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
331{
332 assert_spin_locked(&iovad->iova_rbtree_lock);
333 __cached_rbnode_delete_update(iovad, iova);
334 rb_erase(&iova->node, &iovad->rbroot);
335 free_iova_mem(iova);
336}
337
338/**
339 * find_iova - finds an iova for a given pfn
340 * @iovad: - iova domain in question.
341 * @pfn: - page frame number
342 * This function finds and returns an iova belonging to the
343 * given doamin which matches the given pfn.
344 */
345struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
346{
347 unsigned long flags;
348 struct iova *iova;
349
350 /* Take the lock so that no other thread is manipulating the rbtree */
351 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
352 iova = private_find_iova(iovad, pfn);
353 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
354 return iova;
355}
Sakari Ailus9b417602015-07-13 14:31:29 +0300356EXPORT_SYMBOL_GPL(find_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700357
358/**
359 * __free_iova - frees the given iova
360 * @iovad: iova domain in question.
361 * @iova: iova in question.
362 * Frees the given iova belonging to the giving domain
363 */
364void
365__free_iova(struct iova_domain *iovad, struct iova *iova)
366{
367 unsigned long flags;
368
369 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300370 private_free_iova(iovad, iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700371 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700372}
Sakari Ailus9b417602015-07-13 14:31:29 +0300373EXPORT_SYMBOL_GPL(__free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700374
375/**
376 * free_iova - finds and frees the iova for a given pfn
377 * @iovad: - iova domain in question.
378 * @pfn: - pfn that is allocated previously
379 * This functions finds an iova for a given pfn and then
380 * frees the iova from that domain.
381 */
382void
383free_iova(struct iova_domain *iovad, unsigned long pfn)
384{
385 struct iova *iova = find_iova(iovad, pfn);
Robert Callicotte733cac22015-04-16 23:32:47 -0500386
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700387 if (iova)
388 __free_iova(iovad, iova);
389
390}
Sakari Ailus9b417602015-07-13 14:31:29 +0300391EXPORT_SYMBOL_GPL(free_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700392
393/**
Omer Peleg9257b4a2016-04-20 11:34:11 +0300394 * alloc_iova_fast - allocates an iova from rcache
395 * @iovad: - iova domain in question
396 * @size: - size of page frames to allocate
397 * @limit_pfn: - max limit address
398 * This function tries to satisfy an iova allocation from the rcache,
399 * and falls back to regular allocation on failure.
400*/
401unsigned long
402alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
403 unsigned long limit_pfn)
404{
405 bool flushed_rcache = false;
406 unsigned long iova_pfn;
407 struct iova *new_iova;
408
Robin Murphyb826ee92017-09-19 14:48:40 +0100409 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300410 if (iova_pfn)
411 return iova_pfn;
412
413retry:
414 new_iova = alloc_iova(iovad, size, limit_pfn, true);
415 if (!new_iova) {
416 unsigned int cpu;
417
418 if (flushed_rcache)
419 return 0;
420
421 /* Try replenishing IOVAs by flushing rcache. */
422 flushed_rcache = true;
423 for_each_online_cpu(cpu)
424 free_cpu_cached_iovas(cpu, iovad);
425 goto retry;
426 }
427
428 return new_iova->pfn_lo;
429}
430EXPORT_SYMBOL_GPL(alloc_iova_fast);
431
432/**
433 * free_iova_fast - free iova pfn range into rcache
434 * @iovad: - iova domain in question.
435 * @pfn: - pfn that is allocated previously
436 * @size: - # of pages in range
437 * This functions frees an iova range by trying to put it into the rcache,
438 * falling back to regular iova deallocation via free_iova() if this fails.
439 */
440void
441free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
442{
443 if (iova_rcache_insert(iovad, pfn, size))
444 return;
445
446 free_iova(iovad, pfn);
447}
448EXPORT_SYMBOL_GPL(free_iova_fast);
449
Joerg Roedel19282102017-08-10 15:49:44 +0200450#define fq_ring_for_each(i, fq) \
451 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
452
453static inline bool fq_full(struct iova_fq *fq)
454{
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200455 assert_spin_locked(&fq->lock);
Joerg Roedel19282102017-08-10 15:49:44 +0200456 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
457}
458
459static inline unsigned fq_ring_add(struct iova_fq *fq)
460{
461 unsigned idx = fq->tail;
462
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200463 assert_spin_locked(&fq->lock);
464
Joerg Roedel19282102017-08-10 15:49:44 +0200465 fq->tail = (idx + 1) % IOVA_FQ_SIZE;
466
467 return idx;
468}
469
470static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
471{
Joerg Roedelfb418da2017-08-10 16:14:59 +0200472 u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200473 unsigned idx;
474
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200475 assert_spin_locked(&fq->lock);
476
Joerg Roedel19282102017-08-10 15:49:44 +0200477 fq_ring_for_each(idx, fq) {
478
Joerg Roedelfb418da2017-08-10 16:14:59 +0200479 if (fq->entries[idx].counter >= counter)
480 break;
481
Joerg Roedel19282102017-08-10 15:49:44 +0200482 if (iovad->entry_dtor)
483 iovad->entry_dtor(fq->entries[idx].data);
484
485 free_iova_fast(iovad,
486 fq->entries[idx].iova_pfn,
487 fq->entries[idx].pages);
Joerg Roedel19282102017-08-10 15:49:44 +0200488
Joerg Roedelfb418da2017-08-10 16:14:59 +0200489 fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
490 }
491}
492
493static void iova_domain_flush(struct iova_domain *iovad)
494{
495 atomic64_inc(&iovad->fq_flush_start_cnt);
496 iovad->flush_cb(iovad);
497 atomic64_inc(&iovad->fq_flush_finish_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200498}
499
500static void fq_destroy_all_entries(struct iova_domain *iovad)
501{
502 int cpu;
503
504 /*
505 * This code runs when the iova_domain is being detroyed, so don't
506 * bother to free iovas, just call the entry_dtor on all remaining
507 * entries.
508 */
509 if (!iovad->entry_dtor)
510 return;
511
512 for_each_possible_cpu(cpu) {
513 struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
514 int idx;
515
516 fq_ring_for_each(idx, fq)
517 iovad->entry_dtor(fq->entries[idx].data);
518 }
519}
520
Joerg Roedel9a005a82017-08-10 16:58:18 +0200521static void fq_flush_timeout(unsigned long data)
522{
523 struct iova_domain *iovad = (struct iova_domain *)data;
524 int cpu;
525
526 atomic_set(&iovad->fq_timer_on, 0);
527 iova_domain_flush(iovad);
528
529 for_each_possible_cpu(cpu) {
530 unsigned long flags;
531 struct iova_fq *fq;
532
533 fq = per_cpu_ptr(iovad->fq, cpu);
534 spin_lock_irqsave(&fq->lock, flags);
535 fq_ring_free(iovad, fq);
536 spin_unlock_irqrestore(&fq->lock, flags);
537 }
538}
539
Joerg Roedel19282102017-08-10 15:49:44 +0200540void queue_iova(struct iova_domain *iovad,
541 unsigned long pfn, unsigned long pages,
542 unsigned long data)
543{
544 struct iova_fq *fq = get_cpu_ptr(iovad->fq);
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200545 unsigned long flags;
Joerg Roedel19282102017-08-10 15:49:44 +0200546 unsigned idx;
547
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200548 spin_lock_irqsave(&fq->lock, flags);
549
Joerg Roedelfb418da2017-08-10 16:14:59 +0200550 /*
551 * First remove all entries from the flush queue that have already been
552 * flushed out on another CPU. This makes the fq_full() check below less
553 * likely to be true.
554 */
555 fq_ring_free(iovad, fq);
556
Joerg Roedel19282102017-08-10 15:49:44 +0200557 if (fq_full(fq)) {
Joerg Roedelfb418da2017-08-10 16:14:59 +0200558 iova_domain_flush(iovad);
Joerg Roedel19282102017-08-10 15:49:44 +0200559 fq_ring_free(iovad, fq);
560 }
561
562 idx = fq_ring_add(fq);
563
564 fq->entries[idx].iova_pfn = pfn;
565 fq->entries[idx].pages = pages;
566 fq->entries[idx].data = data;
Joerg Roedelfb418da2017-08-10 16:14:59 +0200567 fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
Joerg Roedel19282102017-08-10 15:49:44 +0200568
Joerg Roedel8109c2a2017-08-10 16:31:17 +0200569 spin_unlock_irqrestore(&fq->lock, flags);
Joerg Roedel9a005a82017-08-10 16:58:18 +0200570
571 if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
572 mod_timer(&iovad->fq_timer,
573 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
574
Joerg Roedel19282102017-08-10 15:49:44 +0200575 put_cpu_ptr(iovad->fq);
576}
577EXPORT_SYMBOL_GPL(queue_iova);
578
Omer Peleg9257b4a2016-04-20 11:34:11 +0300579/**
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700580 * put_iova_domain - destroys the iova doamin
581 * @iovad: - iova domain in question.
582 * All the iova's in that domain are destroyed.
583 */
584void put_iova_domain(struct iova_domain *iovad)
585{
Robin Murphy7595dc52017-09-19 14:48:39 +0100586 struct iova *iova, *tmp;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700587
Joerg Roedel42f87e72017-08-10 14:44:28 +0200588 free_iova_flush_queue(iovad);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300589 free_iova_rcaches(iovad);
Robin Murphy7595dc52017-09-19 14:48:39 +0100590 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700591 free_iova_mem(iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700592}
Sakari Ailus9b417602015-07-13 14:31:29 +0300593EXPORT_SYMBOL_GPL(put_iova_domain);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700594
595static int
596__is_range_overlap(struct rb_node *node,
597 unsigned long pfn_lo, unsigned long pfn_hi)
598{
Geliang Tangeba484b2016-12-19 22:46:58 +0800599 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700600
601 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
602 return 1;
603 return 0;
604}
605
Jiang Liu75f05562014-02-19 14:07:37 +0800606static inline struct iova *
607alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
608{
609 struct iova *iova;
610
611 iova = alloc_iova_mem();
612 if (iova) {
613 iova->pfn_lo = pfn_lo;
614 iova->pfn_hi = pfn_hi;
615 }
616
617 return iova;
618}
619
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700620static struct iova *
621__insert_new_range(struct iova_domain *iovad,
622 unsigned long pfn_lo, unsigned long pfn_hi)
623{
624 struct iova *iova;
625
Jiang Liu75f05562014-02-19 14:07:37 +0800626 iova = alloc_and_init_iova(pfn_lo, pfn_hi);
627 if (iova)
Marek Szyprowskid7517512017-02-24 12:13:37 +0100628 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700629
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700630 return iova;
631}
632
633static void
634__adjust_overlap_range(struct iova *iova,
635 unsigned long *pfn_lo, unsigned long *pfn_hi)
636{
637 if (*pfn_lo < iova->pfn_lo)
638 iova->pfn_lo = *pfn_lo;
639 if (*pfn_hi > iova->pfn_hi)
640 *pfn_lo = iova->pfn_hi + 1;
641}
642
643/**
644 * reserve_iova - reserves an iova in the given range
645 * @iovad: - iova domain pointer
646 * @pfn_lo: - lower page frame address
647 * @pfn_hi:- higher pfn adderss
648 * This function allocates reserves the address range from pfn_lo to pfn_hi so
649 * that this address is not dished out as part of alloc_iova.
650 */
651struct iova *
652reserve_iova(struct iova_domain *iovad,
653 unsigned long pfn_lo, unsigned long pfn_hi)
654{
655 struct rb_node *node;
656 unsigned long flags;
657 struct iova *iova;
658 unsigned int overlap = 0;
659
Robin Murphybb68b2f2017-09-21 16:52:46 +0100660 /* Don't allow nonsensical pfns */
661 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
662 return NULL;
663
David Woodhouse3d39cec2009-07-08 15:23:30 +0100664 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700665 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
666 if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800667 iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700668 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
669 if ((pfn_lo >= iova->pfn_lo) &&
670 (pfn_hi <= iova->pfn_hi))
671 goto finish;
672 overlap = 1;
673
674 } else if (overlap)
675 break;
676 }
677
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300678 /* We are here either because this is the first reserver node
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700679 * or need to insert remaining non overlap addr range
680 */
681 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
682finish:
683
David Woodhouse3d39cec2009-07-08 15:23:30 +0100684 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700685 return iova;
686}
Sakari Ailus9b417602015-07-13 14:31:29 +0300687EXPORT_SYMBOL_GPL(reserve_iova);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700688
689/**
690 * copy_reserved_iova - copies the reserved between domains
691 * @from: - source doamin from where to copy
692 * @to: - destination domin where to copy
693 * This function copies reserved iova's from one doamin to
694 * other.
695 */
696void
697copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
698{
699 unsigned long flags;
700 struct rb_node *node;
701
David Woodhouse3d39cec2009-07-08 15:23:30 +0100702 spin_lock_irqsave(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700703 for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
Geliang Tangeba484b2016-12-19 22:46:58 +0800704 struct iova *iova = rb_entry(node, struct iova, node);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700705 struct iova *new_iova;
Robert Callicotte733cac22015-04-16 23:32:47 -0500706
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700707 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
708 if (!new_iova)
709 printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
710 iova->pfn_lo, iova->pfn_lo);
711 }
David Woodhouse3d39cec2009-07-08 15:23:30 +0100712 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700713}
Sakari Ailus9b417602015-07-13 14:31:29 +0300714EXPORT_SYMBOL_GPL(copy_reserved_iova);
Jiang Liu75f05562014-02-19 14:07:37 +0800715
716struct iova *
717split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
718 unsigned long pfn_lo, unsigned long pfn_hi)
719{
720 unsigned long flags;
721 struct iova *prev = NULL, *next = NULL;
722
723 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
724 if (iova->pfn_lo < pfn_lo) {
725 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
726 if (prev == NULL)
727 goto error;
728 }
729 if (iova->pfn_hi > pfn_hi) {
730 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
731 if (next == NULL)
732 goto error;
733 }
734
735 __cached_rbnode_delete_update(iovad, iova);
736 rb_erase(&iova->node, &iovad->rbroot);
737
738 if (prev) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100739 iova_insert_rbtree(&iovad->rbroot, prev, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800740 iova->pfn_lo = pfn_lo;
741 }
742 if (next) {
Marek Szyprowskid7517512017-02-24 12:13:37 +0100743 iova_insert_rbtree(&iovad->rbroot, next, NULL);
Jiang Liu75f05562014-02-19 14:07:37 +0800744 iova->pfn_hi = pfn_hi;
745 }
746 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
747
748 return iova;
749
750error:
751 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
752 if (prev)
753 free_iova_mem(prev);
754 return NULL;
755}
Sakari Ailus15bbdec2015-07-13 14:31:30 +0300756
Omer Peleg9257b4a2016-04-20 11:34:11 +0300757/*
758 * Magazine caches for IOVA ranges. For an introduction to magazines,
759 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
760 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
761 * For simplicity, we use a static magazine size and don't implement the
762 * dynamic size tuning described in the paper.
763 */
764
765#define IOVA_MAG_SIZE 128
766
767struct iova_magazine {
768 unsigned long size;
769 unsigned long pfns[IOVA_MAG_SIZE];
770};
771
772struct iova_cpu_rcache {
773 spinlock_t lock;
774 struct iova_magazine *loaded;
775 struct iova_magazine *prev;
776};
777
778static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
779{
780 return kzalloc(sizeof(struct iova_magazine), flags);
781}
782
783static void iova_magazine_free(struct iova_magazine *mag)
784{
785 kfree(mag);
786}
787
788static void
789iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
790{
791 unsigned long flags;
792 int i;
793
794 if (!mag)
795 return;
796
797 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
798
799 for (i = 0 ; i < mag->size; ++i) {
800 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
801
802 BUG_ON(!iova);
803 private_free_iova(iovad, iova);
804 }
805
806 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
807
808 mag->size = 0;
809}
810
811static bool iova_magazine_full(struct iova_magazine *mag)
812{
813 return (mag && mag->size == IOVA_MAG_SIZE);
814}
815
816static bool iova_magazine_empty(struct iova_magazine *mag)
817{
818 return (!mag || mag->size == 0);
819}
820
821static unsigned long iova_magazine_pop(struct iova_magazine *mag,
822 unsigned long limit_pfn)
823{
824 BUG_ON(iova_magazine_empty(mag));
825
Robin Murphyb826ee92017-09-19 14:48:40 +0100826 if (mag->pfns[mag->size - 1] > limit_pfn)
Omer Peleg9257b4a2016-04-20 11:34:11 +0300827 return 0;
828
829 return mag->pfns[--mag->size];
830}
831
832static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
833{
834 BUG_ON(iova_magazine_full(mag));
835
836 mag->pfns[mag->size++] = pfn;
837}
838
839static void init_iova_rcaches(struct iova_domain *iovad)
840{
841 struct iova_cpu_rcache *cpu_rcache;
842 struct iova_rcache *rcache;
843 unsigned int cpu;
844 int i;
845
846 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
847 rcache = &iovad->rcaches[i];
848 spin_lock_init(&rcache->lock);
849 rcache->depot_size = 0;
850 rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
851 if (WARN_ON(!rcache->cpu_rcaches))
852 continue;
853 for_each_possible_cpu(cpu) {
854 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
855 spin_lock_init(&cpu_rcache->lock);
856 cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
857 cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
858 }
859 }
860}
861
862/*
863 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
864 * return true on success. Can fail if rcache is full and we can't free
865 * space, and free_iova() (our only caller) will then return the IOVA
866 * range to the rbtree instead.
867 */
868static bool __iova_rcache_insert(struct iova_domain *iovad,
869 struct iova_rcache *rcache,
870 unsigned long iova_pfn)
871{
872 struct iova_magazine *mag_to_free = NULL;
873 struct iova_cpu_rcache *cpu_rcache;
874 bool can_insert = false;
875 unsigned long flags;
876
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200877 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300878 spin_lock_irqsave(&cpu_rcache->lock, flags);
879
880 if (!iova_magazine_full(cpu_rcache->loaded)) {
881 can_insert = true;
882 } else if (!iova_magazine_full(cpu_rcache->prev)) {
883 swap(cpu_rcache->prev, cpu_rcache->loaded);
884 can_insert = true;
885 } else {
886 struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
887
888 if (new_mag) {
889 spin_lock(&rcache->lock);
890 if (rcache->depot_size < MAX_GLOBAL_MAGS) {
891 rcache->depot[rcache->depot_size++] =
892 cpu_rcache->loaded;
893 } else {
894 mag_to_free = cpu_rcache->loaded;
895 }
896 spin_unlock(&rcache->lock);
897
898 cpu_rcache->loaded = new_mag;
899 can_insert = true;
900 }
901 }
902
903 if (can_insert)
904 iova_magazine_push(cpu_rcache->loaded, iova_pfn);
905
906 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
907
908 if (mag_to_free) {
909 iova_magazine_free_pfns(mag_to_free, iovad);
910 iova_magazine_free(mag_to_free);
911 }
912
913 return can_insert;
914}
915
916static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
917 unsigned long size)
918{
919 unsigned int log_size = order_base_2(size);
920
921 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
922 return false;
923
924 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
925}
926
927/*
928 * Caller wants to allocate a new IOVA range from 'rcache'. If we can
929 * satisfy the request, return a matching non-NULL range and remove
930 * it from the 'rcache'.
931 */
932static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
933 unsigned long limit_pfn)
934{
935 struct iova_cpu_rcache *cpu_rcache;
936 unsigned long iova_pfn = 0;
937 bool has_pfn = false;
938 unsigned long flags;
939
Sebastian Andrzej Siewioraaffaa82017-06-27 18:16:47 +0200940 cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300941 spin_lock_irqsave(&cpu_rcache->lock, flags);
942
943 if (!iova_magazine_empty(cpu_rcache->loaded)) {
944 has_pfn = true;
945 } else if (!iova_magazine_empty(cpu_rcache->prev)) {
946 swap(cpu_rcache->prev, cpu_rcache->loaded);
947 has_pfn = true;
948 } else {
949 spin_lock(&rcache->lock);
950 if (rcache->depot_size > 0) {
951 iova_magazine_free(cpu_rcache->loaded);
952 cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
953 has_pfn = true;
954 }
955 spin_unlock(&rcache->lock);
956 }
957
958 if (has_pfn)
959 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
960
961 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
962
963 return iova_pfn;
964}
965
966/*
967 * Try to satisfy IOVA allocation range from rcache. Fail if requested
968 * size is too big or the DMA limit we are given isn't satisfied by the
969 * top element in the magazine.
970 */
971static unsigned long iova_rcache_get(struct iova_domain *iovad,
972 unsigned long size,
973 unsigned long limit_pfn)
974{
975 unsigned int log_size = order_base_2(size);
976
977 if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
978 return 0;
979
Robin Murphyb826ee92017-09-19 14:48:40 +0100980 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300981}
982
983/*
Omer Peleg9257b4a2016-04-20 11:34:11 +0300984 * free rcache data structures.
985 */
986static void free_iova_rcaches(struct iova_domain *iovad)
987{
988 struct iova_rcache *rcache;
Robin Murphy7595dc52017-09-19 14:48:39 +0100989 struct iova_cpu_rcache *cpu_rcache;
Omer Peleg9257b4a2016-04-20 11:34:11 +0300990 unsigned int cpu;
991 int i, j;
992
993 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
994 rcache = &iovad->rcaches[i];
Robin Murphy7595dc52017-09-19 14:48:39 +0100995 for_each_possible_cpu(cpu) {
996 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
997 iova_magazine_free(cpu_rcache->loaded);
998 iova_magazine_free(cpu_rcache->prev);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300999 }
Robin Murphy7595dc52017-09-19 14:48:39 +01001000 free_percpu(rcache->cpu_rcaches);
1001 for (j = 0; j < rcache->depot_size; ++j)
1002 iova_magazine_free(rcache->depot[j]);
Omer Peleg9257b4a2016-04-20 11:34:11 +03001003 }
1004}
1005
1006/*
1007 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
1008 */
1009void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
1010{
1011 struct iova_cpu_rcache *cpu_rcache;
1012 struct iova_rcache *rcache;
1013 unsigned long flags;
1014 int i;
1015
1016 for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
1017 rcache = &iovad->rcaches[i];
1018 cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
1019 spin_lock_irqsave(&cpu_rcache->lock, flags);
1020 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
1021 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
1022 spin_unlock_irqrestore(&cpu_rcache->lock, flags);
1023 }
1024}
1025
Sakari Ailus15bbdec2015-07-13 14:31:30 +03001026MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
1027MODULE_LICENSE("GPL");