blob: 1ae85248ec503c936b5f692824228dad238cbc5a [file] [log] [blame]
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
mark gross98bcef52008-02-23 15:23:35 -08006 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07008 *
9 */
10
11#ifndef _IOVA_H_
12#define _IOVA_H_
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/rbtree.h>
17#include <linux/dma-mapping.h>
18
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070019/* iova structure */
20struct iova {
21 struct rb_node node;
Omer Peleg9257b4a2016-04-20 11:34:11 +030022 unsigned long pfn_hi; /* Highest allocated pfn */
23 unsigned long pfn_lo; /* Lowest allocated pfn */
24};
25
26struct iova_magazine;
27struct iova_cpu_rcache;
28
29#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
30#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
31
32struct iova_rcache {
33 spinlock_t lock;
34 unsigned long depot_size;
35 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
36 struct iova_cpu_rcache __percpu *cpu_rcaches;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070037};
38
Joerg Roedel42f87e72017-08-10 14:44:28 +020039struct iova_domain;
40
41/* Call-Back from IOVA code into IOMMU drivers */
42typedef void (* iova_flush_cb)(struct iova_domain *domain);
43
44/* Destructor for per-entry data */
45typedef void (* iova_entry_dtor)(unsigned long data);
46
47/* Number of entries per Flush Queue */
48#define IOVA_FQ_SIZE 256
49
50/* Flush Queue entry for defered flushing */
51struct iova_fq_entry {
52 unsigned long iova_pfn;
53 unsigned long pages;
54 unsigned long data;
55};
56
57/* Per-CPU Flush Queue structure */
58struct iova_fq {
59 struct iova_fq_entry entries[IOVA_FQ_SIZE];
60 unsigned head, tail;
61};
62
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070063/* holds all the iova translations for a domain */
64struct iova_domain {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070065 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
66 struct rb_root rbroot; /* iova domain rbtree root */
67 struct rb_node *cached32_node; /* Save last alloced node */
Robin Murphy0fb5fe82015-01-12 17:51:16 +000068 unsigned long granule; /* pfn granularity for this domain */
Robin Murphy1b722502015-01-12 17:51:15 +000069 unsigned long start_pfn; /* Lower limit for this domain */
David Millerf6611972008-02-06 01:36:23 -080070 unsigned long dma_32bit_pfn;
Omer Peleg9257b4a2016-04-20 11:34:11 +030071 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
Joerg Roedel42f87e72017-08-10 14:44:28 +020072
73 iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
74 TLBs */
75
76 iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
77 iova entry */
78
79 struct iova_fq __percpu *fq; /* Flush Queue */
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070080};
81
Jiang Liua156ef92014-07-11 14:19:36 +080082static inline unsigned long iova_size(struct iova *iova)
83{
84 return iova->pfn_hi - iova->pfn_lo + 1;
85}
86
Robin Murphy0fb5fe82015-01-12 17:51:16 +000087static inline unsigned long iova_shift(struct iova_domain *iovad)
88{
89 return __ffs(iovad->granule);
90}
91
92static inline unsigned long iova_mask(struct iova_domain *iovad)
93{
94 return iovad->granule - 1;
95}
96
97static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
98{
99 return iova & iova_mask(iovad);
100}
101
102static inline size_t iova_align(struct iova_domain *iovad, size_t size)
103{
104 return ALIGN(size, iovad->granule);
105}
106
107static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
108{
109 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
110}
111
112static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
113{
114 return iova >> iova_shift(iovad);
115}
116
Joerg Roedelb4d8c7a2017-03-23 00:06:17 +0100117#if IS_ENABLED(CONFIG_IOMMU_IOVA)
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300118int iova_cache_get(void);
119void iova_cache_put(void);
Robin Murphy85b45452015-01-12 17:51:14 +0000120
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700121struct iova *alloc_iova_mem(void);
122void free_iova_mem(struct iova *iova);
123void free_iova(struct iova_domain *iovad, unsigned long pfn);
124void __free_iova(struct iova_domain *iovad, struct iova *iova);
125struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700126 unsigned long limit_pfn,
127 bool size_aligned);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300128void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
129 unsigned long size);
Joerg Roedel19282102017-08-10 15:49:44 +0200130void queue_iova(struct iova_domain *iovad,
131 unsigned long pfn, unsigned long pages,
132 unsigned long data);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300133unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
134 unsigned long limit_pfn);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700135struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
136 unsigned long pfn_hi);
137void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
Robin Murphy0fb5fe82015-01-12 17:51:16 +0000138void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
139 unsigned long start_pfn, unsigned long pfn_32bit);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200140int init_iova_flush_queue(struct iova_domain *iovad,
141 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700142struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
143void put_iova_domain(struct iova_domain *iovad);
Jiang Liu75f05562014-02-19 14:07:37 +0800144struct iova *split_and_remove_iova(struct iova_domain *iovad,
145 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300146void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
Thierry Reding21aff522017-03-20 20:11:28 +0100147#else
148static inline int iova_cache_get(void)
149{
150 return -ENOTSUPP;
151}
152
153static inline void iova_cache_put(void)
154{
155}
156
157static inline struct iova *alloc_iova_mem(void)
158{
159 return NULL;
160}
161
162static inline void free_iova_mem(struct iova *iova)
163{
164}
165
166static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
167{
168}
169
170static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
171{
172}
173
174static inline struct iova *alloc_iova(struct iova_domain *iovad,
175 unsigned long size,
176 unsigned long limit_pfn,
177 bool size_aligned)
178{
179 return NULL;
180}
181
182static inline void free_iova_fast(struct iova_domain *iovad,
183 unsigned long pfn,
184 unsigned long size)
185{
186}
187
Joerg Roedel19282102017-08-10 15:49:44 +0200188static inline void queue_iova(struct iova_domain *iovad,
189 unsigned long pfn, unsigned long pages,
190 unsigned long data)
191{
192}
193
Thierry Reding21aff522017-03-20 20:11:28 +0100194static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
195 unsigned long size,
196 unsigned long limit_pfn)
197{
198 return 0;
199}
200
201static inline struct iova *reserve_iova(struct iova_domain *iovad,
202 unsigned long pfn_lo,
203 unsigned long pfn_hi)
204{
205 return NULL;
206}
207
208static inline void copy_reserved_iova(struct iova_domain *from,
209 struct iova_domain *to)
210{
211}
212
213static inline void init_iova_domain(struct iova_domain *iovad,
214 unsigned long granule,
215 unsigned long start_pfn,
216 unsigned long pfn_32bit)
217{
218}
219
Joerg Roedel42f87e72017-08-10 14:44:28 +0200220static inline int init_iova_flush_queue(struct iova_domain *iovad,
221 iova_flush_cb flush_cb,
222 iova_entry_dtor entry_dtor)
223{
224 return -ENODEV;
225}
226
Thierry Reding21aff522017-03-20 20:11:28 +0100227static inline struct iova *find_iova(struct iova_domain *iovad,
228 unsigned long pfn)
229{
230 return NULL;
231}
232
233static inline void put_iova_domain(struct iova_domain *iovad)
234{
235}
236
237static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
238 struct iova *iova,
239 unsigned long pfn_lo,
240 unsigned long pfn_hi)
241{
242 return NULL;
243}
244
245static inline void free_cpu_cached_iovas(unsigned int cpu,
246 struct iova_domain *iovad)
247{
248}
249#endif
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700250
251#endif