blob: 8aa10896150e4fd98aef3ad288ed55faf33cf029 [file] [log] [blame]
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07001/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
mark gross98bcef52008-02-23 15:23:35 -08006 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -07008 *
9 */
10
11#ifndef _IOVA_H_
12#define _IOVA_H_
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/rbtree.h>
17#include <linux/dma-mapping.h>
18
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070019/* iova structure */
20struct iova {
21 struct rb_node node;
Omer Peleg9257b4a2016-04-20 11:34:11 +030022 unsigned long pfn_hi; /* Highest allocated pfn */
23 unsigned long pfn_lo; /* Lowest allocated pfn */
24};
25
26struct iova_magazine;
27struct iova_cpu_rcache;
28
29#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
30#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
31
32struct iova_rcache {
33 spinlock_t lock;
34 unsigned long depot_size;
35 struct iova_magazine *depot[MAX_GLOBAL_MAGS];
36 struct iova_cpu_rcache __percpu *cpu_rcaches;
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070037};
38
Joerg Roedel42f87e72017-08-10 14:44:28 +020039struct iova_domain;
40
41/* Call-Back from IOVA code into IOMMU drivers */
42typedef void (* iova_flush_cb)(struct iova_domain *domain);
43
44/* Destructor for per-entry data */
45typedef void (* iova_entry_dtor)(unsigned long data);
46
47/* Number of entries per Flush Queue */
48#define IOVA_FQ_SIZE 256
49
50/* Flush Queue entry for defered flushing */
51struct iova_fq_entry {
52 unsigned long iova_pfn;
53 unsigned long pages;
54 unsigned long data;
55};
56
57/* Per-CPU Flush Queue structure */
58struct iova_fq {
59 struct iova_fq_entry entries[IOVA_FQ_SIZE];
60 unsigned head, tail;
61};
62
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070063/* holds all the iova translations for a domain */
64struct iova_domain {
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070065 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
66 struct rb_root rbroot; /* iova domain rbtree root */
67 struct rb_node *cached32_node; /* Save last alloced node */
Robin Murphy0fb5fe82015-01-12 17:51:16 +000068 unsigned long granule; /* pfn granularity for this domain */
Robin Murphy1b722502015-01-12 17:51:15 +000069 unsigned long start_pfn; /* Lower limit for this domain */
David Millerf6611972008-02-06 01:36:23 -080070 unsigned long dma_32bit_pfn;
Omer Peleg9257b4a2016-04-20 11:34:11 +030071 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
Joerg Roedel42f87e72017-08-10 14:44:28 +020072
73 iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
74 TLBs */
75
76 iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
77 iova entry */
78
79 struct iova_fq __percpu *fq; /* Flush Queue */
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -070080};
81
Jiang Liua156ef92014-07-11 14:19:36 +080082static inline unsigned long iova_size(struct iova *iova)
83{
84 return iova->pfn_hi - iova->pfn_lo + 1;
85}
86
Robin Murphy0fb5fe82015-01-12 17:51:16 +000087static inline unsigned long iova_shift(struct iova_domain *iovad)
88{
89 return __ffs(iovad->granule);
90}
91
92static inline unsigned long iova_mask(struct iova_domain *iovad)
93{
94 return iovad->granule - 1;
95}
96
97static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
98{
99 return iova & iova_mask(iovad);
100}
101
102static inline size_t iova_align(struct iova_domain *iovad, size_t size)
103{
104 return ALIGN(size, iovad->granule);
105}
106
107static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
108{
109 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
110}
111
112static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
113{
114 return iova >> iova_shift(iovad);
115}
116
Joerg Roedelb4d8c7a2017-03-23 00:06:17 +0100117#if IS_ENABLED(CONFIG_IOMMU_IOVA)
Sakari Ailusae1ff3d2015-07-13 14:31:28 +0300118int iova_cache_get(void);
119void iova_cache_put(void);
Robin Murphy85b45452015-01-12 17:51:14 +0000120
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700121struct iova *alloc_iova_mem(void);
122void free_iova_mem(struct iova *iova);
123void free_iova(struct iova_domain *iovad, unsigned long pfn);
124void __free_iova(struct iova_domain *iovad, struct iova *iova);
125struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
Keshavamurthy, Anil Sf76aec72007-10-21 16:41:58 -0700126 unsigned long limit_pfn,
127 bool size_aligned);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300128void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
129 unsigned long size);
130unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
131 unsigned long limit_pfn);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700132struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
133 unsigned long pfn_hi);
134void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
Robin Murphy0fb5fe82015-01-12 17:51:16 +0000135void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
136 unsigned long start_pfn, unsigned long pfn_32bit);
Joerg Roedel42f87e72017-08-10 14:44:28 +0200137int init_iova_flush_queue(struct iova_domain *iovad,
138 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700139struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
140void put_iova_domain(struct iova_domain *iovad);
Jiang Liu75f05562014-02-19 14:07:37 +0800141struct iova *split_and_remove_iova(struct iova_domain *iovad,
142 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
Omer Peleg9257b4a2016-04-20 11:34:11 +0300143void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
Thierry Reding21aff522017-03-20 20:11:28 +0100144#else
145static inline int iova_cache_get(void)
146{
147 return -ENOTSUPP;
148}
149
150static inline void iova_cache_put(void)
151{
152}
153
154static inline struct iova *alloc_iova_mem(void)
155{
156 return NULL;
157}
158
159static inline void free_iova_mem(struct iova *iova)
160{
161}
162
163static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
164{
165}
166
167static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
168{
169}
170
171static inline struct iova *alloc_iova(struct iova_domain *iovad,
172 unsigned long size,
173 unsigned long limit_pfn,
174 bool size_aligned)
175{
176 return NULL;
177}
178
179static inline void free_iova_fast(struct iova_domain *iovad,
180 unsigned long pfn,
181 unsigned long size)
182{
183}
184
185static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
186 unsigned long size,
187 unsigned long limit_pfn)
188{
189 return 0;
190}
191
192static inline struct iova *reserve_iova(struct iova_domain *iovad,
193 unsigned long pfn_lo,
194 unsigned long pfn_hi)
195{
196 return NULL;
197}
198
199static inline void copy_reserved_iova(struct iova_domain *from,
200 struct iova_domain *to)
201{
202}
203
204static inline void init_iova_domain(struct iova_domain *iovad,
205 unsigned long granule,
206 unsigned long start_pfn,
207 unsigned long pfn_32bit)
208{
209}
210
Joerg Roedel42f87e72017-08-10 14:44:28 +0200211static inline int init_iova_flush_queue(struct iova_domain *iovad,
212 iova_flush_cb flush_cb,
213 iova_entry_dtor entry_dtor)
214{
215 return -ENODEV;
216}
217
Thierry Reding21aff522017-03-20 20:11:28 +0100218static inline struct iova *find_iova(struct iova_domain *iovad,
219 unsigned long pfn)
220{
221 return NULL;
222}
223
224static inline void put_iova_domain(struct iova_domain *iovad)
225{
226}
227
228static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
229 struct iova *iova,
230 unsigned long pfn_lo,
231 unsigned long pfn_hi)
232{
233 return NULL;
234}
235
236static inline void free_cpu_cached_iovas(unsigned int cpu,
237 struct iova_domain *iovad)
238{
239}
240#endif
Keshavamurthy, Anil Sf8de50e2007-10-21 16:41:48 -0700241
242#endif