blob: cedbf004c8ef93020ef69568ee4d797d0c589bb9 [file] [log] [blame]
Joerg Roedelb6c02712008-06-26 21:27:53 +02001/*
Joerg Roedel5d0d7152010-10-13 11:13:21 +02002 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01003 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelb6c02712008-06-26 21:27:53 +02004 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010020#include <linux/ratelimit.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020021#include <linux/pci.h>
Joerg Roedelcb41ed82011-04-05 11:00:53 +020022#include <linux/pci-ats.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080023#include <linux/bitmap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Joerg Roedel7f265082008-12-12 13:50:21 +010025#include <linux/debugfs.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020026#include <linux/scatterlist.h>
FUJITA Tomonori51491362009-01-05 23:47:25 +090027#include <linux/dma-mapping.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020028#include <linux/iommu-helper.h>
Joerg Roedelc156e342008-12-02 18:13:27 +010029#include <linux/iommu.h>
Joerg Roedel815b33f2011-04-06 17:26:49 +020030#include <linux/delay.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020031#include <linux/amd-iommu.h>
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010032#include <linux/notifier.h>
33#include <linux/export.h>
Joerg Roedel2b324502012-06-21 16:29:10 +020034#include <linux/irq.h>
35#include <linux/msi.h>
Joerg Roedel3b839a52015-04-01 14:58:47 +020036#include <linux/dma-contiguous.h>
Joerg Roedel2b324502012-06-21 16:29:10 +020037#include <asm/irq_remapping.h>
38#include <asm/io_apic.h>
39#include <asm/apic.h>
40#include <asm/hw_irq.h>
Joerg Roedel17f5b562011-07-06 17:14:44 +020041#include <asm/msidef.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020042#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090043#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010044#include <asm/gart.h>
Joerg Roedel27c21272011-05-30 15:56:24 +020045#include <asm/dma.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020046
47#include "amd_iommu_proto.h"
48#include "amd_iommu_types.h"
Joerg Roedel6b474b82012-06-26 16:46:04 +020049#include "irq_remapping.h"
Joerg Roedelb6c02712008-06-26 21:27:53 +020050
51#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
52
Joerg Roedel815b33f2011-04-06 17:26:49 +020053#define LOOP_TIMEOUT 100000
Joerg Roedel136f78a2008-07-11 17:14:27 +020054
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020055/*
56 * This bitmap is used to advertise the page sizes our hardware support
57 * to the IOMMU core, which will then use this information to split
58 * physically contiguous memory regions it is mapping into page sizes
59 * that we support.
60 *
Joerg Roedel954e3dd2012-12-02 15:35:37 +010061 * 512GB Pages are not supported due to a hardware bug
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020062 */
Joerg Roedel954e3dd2012-12-02 15:35:37 +010063#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020064
Joerg Roedelb6c02712008-06-26 21:27:53 +020065static DEFINE_RWLOCK(amd_iommu_devtable_lock);
66
Joerg Roedel8fa5f802011-06-09 12:24:45 +020067/* List of all available dev_data structures */
68static LIST_HEAD(dev_data_list);
69static DEFINE_SPINLOCK(dev_data_list_lock);
70
Joerg Roedel6efed632012-06-14 15:52:58 +020071LIST_HEAD(ioapic_map);
72LIST_HEAD(hpet_map);
73
Joerg Roedel0feae532009-08-26 15:26:30 +020074/*
75 * Domain for untranslated devices - only allocated
76 * if iommu=pt passed on kernel cmd line.
77 */
78static struct protection_domain *pt_domain;
79
Thierry Redingb22f6432014-06-27 09:03:12 +020080static const struct iommu_ops amd_iommu_ops;
Joerg Roedel26961ef2008-12-03 17:00:17 +010081
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010082static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
Joerg Roedel52815b72011-11-17 17:24:28 +010083int amd_iommu_max_glx_val = -1;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010084
Joerg Roedelac1534a2012-06-21 14:52:40 +020085static struct dma_map_ops amd_iommu_dma_ops;
86
Joerg Roedel431b2a22008-07-11 17:14:22 +020087/*
Joerg Roedel50917e22014-08-05 16:38:38 +020088 * This struct contains device specific data for the IOMMU
89 */
90struct iommu_dev_data {
91 struct list_head list; /* For domain->dev_list */
92 struct list_head dev_data_list; /* For global dev_data_list */
Joerg Roedelf251e182014-08-05 16:48:10 +020093 struct list_head alias_list; /* Link alias-groups together */
Joerg Roedel50917e22014-08-05 16:38:38 +020094 struct iommu_dev_data *alias_data;/* The alias dev_data */
95 struct protection_domain *domain; /* Domain the device is bound to */
Joerg Roedel50917e22014-08-05 16:38:38 +020096 u16 devid; /* PCI Device ID */
97 bool iommu_v2; /* Device can make use of IOMMUv2 */
98 bool passthrough; /* Default for device is pt_domain */
99 struct {
100 bool enabled;
101 int qdep;
102 } ats; /* ATS state */
103 bool pri_tlp; /* PASID TLB required for
104 PPR completions */
105 u32 errata; /* Bitmap for errata to apply */
106};
107
108/*
Joerg Roedel431b2a22008-07-11 17:14:22 +0200109 * general struct to manage commands send to an IOMMU
110 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200111struct iommu_cmd {
Joerg Roedelb6c02712008-06-26 21:27:53 +0200112 u32 data[4];
113};
114
Joerg Roedel05152a02012-06-15 16:53:51 +0200115struct kmem_cache *amd_iommu_irq_cache;
116
Joerg Roedel04bfdd82009-09-02 16:00:23 +0200117static void update_domain(struct protection_domain *domain);
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200118static int alloc_passthrough_domain(void);
Joerg Roedel7a5a5662015-06-30 08:56:11 +0200119static int protection_domain_init(struct protection_domain *domain);
Chris Wrightc1eee672009-05-21 00:56:58 -0700120
Joerg Roedel15898bb2009-11-24 15:39:42 +0100121/****************************************************************************
122 *
123 * Helper functions
124 *
125 ****************************************************************************/
126
Joerg Roedel3f4b87b2015-03-26 13:43:07 +0100127static struct protection_domain *to_pdomain(struct iommu_domain *dom)
128{
129 return container_of(dom, struct protection_domain, domain);
130}
131
Joerg Roedelf62dda62011-06-09 12:55:35 +0200132static struct iommu_dev_data *alloc_dev_data(u16 devid)
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200133{
134 struct iommu_dev_data *dev_data;
135 unsigned long flags;
136
137 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
138 if (!dev_data)
139 return NULL;
140
Joerg Roedelf251e182014-08-05 16:48:10 +0200141 INIT_LIST_HEAD(&dev_data->alias_list);
142
Joerg Roedelf62dda62011-06-09 12:55:35 +0200143 dev_data->devid = devid;
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200144
145 spin_lock_irqsave(&dev_data_list_lock, flags);
146 list_add_tail(&dev_data->dev_data_list, &dev_data_list);
147 spin_unlock_irqrestore(&dev_data_list_lock, flags);
148
149 return dev_data;
150}
151
152static void free_dev_data(struct iommu_dev_data *dev_data)
153{
154 unsigned long flags;
155
156 spin_lock_irqsave(&dev_data_list_lock, flags);
157 list_del(&dev_data->dev_data_list);
158 spin_unlock_irqrestore(&dev_data_list_lock, flags);
159
160 kfree(dev_data);
161}
162
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200163static struct iommu_dev_data *search_dev_data(u16 devid)
164{
165 struct iommu_dev_data *dev_data;
166 unsigned long flags;
167
168 spin_lock_irqsave(&dev_data_list_lock, flags);
169 list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
170 if (dev_data->devid == devid)
171 goto out_unlock;
172 }
173
174 dev_data = NULL;
175
176out_unlock:
177 spin_unlock_irqrestore(&dev_data_list_lock, flags);
178
179 return dev_data;
180}
181
182static struct iommu_dev_data *find_dev_data(u16 devid)
183{
184 struct iommu_dev_data *dev_data;
185
186 dev_data = search_dev_data(devid);
187
188 if (dev_data == NULL)
189 dev_data = alloc_dev_data(devid);
190
191 return dev_data;
192}
193
Joerg Roedel15898bb2009-11-24 15:39:42 +0100194static inline u16 get_device_id(struct device *dev)
195{
196 struct pci_dev *pdev = to_pci_dev(dev);
197
Shuah Khan6f2729b2013-02-27 17:07:30 -0700198 return PCI_DEVID(pdev->bus->number, pdev->devfn);
Joerg Roedel15898bb2009-11-24 15:39:42 +0100199}
200
Joerg Roedel657cbb62009-11-23 15:26:46 +0100201static struct iommu_dev_data *get_dev_data(struct device *dev)
202{
203 return dev->archdata.iommu;
204}
205
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100206static bool pci_iommuv2_capable(struct pci_dev *pdev)
207{
208 static const int caps[] = {
209 PCI_EXT_CAP_ID_ATS,
Joerg Roedel46277b72011-12-07 14:34:02 +0100210 PCI_EXT_CAP_ID_PRI,
211 PCI_EXT_CAP_ID_PASID,
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100212 };
213 int i, pos;
214
215 for (i = 0; i < 3; ++i) {
216 pos = pci_find_ext_capability(pdev, caps[i]);
217 if (pos == 0)
218 return false;
219 }
220
221 return true;
222}
223
Joerg Roedel6a113dd2011-12-01 12:04:58 +0100224static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
225{
226 struct iommu_dev_data *dev_data;
227
228 dev_data = get_dev_data(&pdev->dev);
229
230 return dev_data->errata & (1 << erratum) ? true : false;
231}
232
Joerg Roedel71c70982009-11-24 16:43:06 +0100233/*
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200234 * This function actually applies the mapping to the page table of the
235 * dma_ops domain.
Joerg Roedel71c70982009-11-24 16:43:06 +0100236 */
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200237static void alloc_unity_mapping(struct dma_ops_domain *dma_dom,
238 struct unity_map_entry *e)
Joerg Roedel71c70982009-11-24 16:43:06 +0100239{
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200240 u64 addr;
Joerg Roedel71c70982009-11-24 16:43:06 +0100241
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200242 for (addr = e->address_start; addr < e->address_end;
243 addr += PAGE_SIZE) {
244 if (addr < dma_dom->aperture_size)
245 __set_bit(addr >> PAGE_SHIFT,
246 dma_dom->aperture[0]->bitmap);
Joerg Roedel71c70982009-11-24 16:43:06 +0100247 }
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200248}
Joerg Roedel71c70982009-11-24 16:43:06 +0100249
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200250/*
251 * Inits the unity mappings required for a specific device
252 */
253static void init_unity_mappings_for_device(struct device *dev,
254 struct dma_ops_domain *dma_dom)
255{
256 struct unity_map_entry *e;
257 u16 devid;
Joerg Roedel71c70982009-11-24 16:43:06 +0100258
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200259 devid = get_device_id(dev);
260
261 list_for_each_entry(e, &amd_iommu_unity_map, list) {
262 if (!(devid >= e->devid_start && devid <= e->devid_end))
263 continue;
264 alloc_unity_mapping(dma_dom, e);
265 }
Joerg Roedel71c70982009-11-24 16:43:06 +0100266}
267
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100268/*
269 * This function checks if the driver got a valid device from the caller to
270 * avoid dereferencing invalid pointers.
271 */
272static bool check_device(struct device *dev)
273{
274 u16 devid;
275
276 if (!dev || !dev->dma_mask)
277 return false;
278
Yijing Wangb82a2272013-12-05 19:42:41 +0800279 /* No PCI device */
280 if (!dev_is_pci(dev))
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100281 return false;
282
283 devid = get_device_id(dev);
284
285 /* Out of our scope? */
286 if (devid > amd_iommu_last_bdf)
287 return false;
288
289 if (amd_iommu_rlookup_table[devid] == NULL)
290 return false;
291
292 return true;
293}
294
Alex Williamson25b11ce2014-09-19 10:03:13 -0600295static void init_iommu_group(struct device *dev)
Alex Williamson2851db22012-10-08 22:49:41 -0600296{
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200297 struct dma_ops_domain *dma_domain;
298 struct iommu_domain *domain;
Alex Williamson2851db22012-10-08 22:49:41 -0600299 struct iommu_group *group;
Alex Williamson2851db22012-10-08 22:49:41 -0600300
Alex Williamson65d53522014-07-03 09:51:30 -0600301 group = iommu_group_get_for_dev(dev);
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200302 if (IS_ERR(group))
303 return;
304
305 domain = iommu_group_default_domain(group);
306 if (!domain)
307 goto out;
308
309 dma_domain = to_pdomain(domain)->priv;
310
311 init_unity_mappings_for_device(dev, dma_domain);
312out:
313 iommu_group_put(group);
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600314}
315
Alex Williamsonc1931092014-07-03 09:51:24 -0600316static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
317{
318 *(u16 *)data = alias;
319 return 0;
320}
321
322static u16 get_alias(struct device *dev)
323{
324 struct pci_dev *pdev = to_pci_dev(dev);
325 u16 devid, ivrs_alias, pci_alias;
326
327 devid = get_device_id(dev);
328 ivrs_alias = amd_iommu_alias_table[devid];
329 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
330
331 if (ivrs_alias == pci_alias)
332 return ivrs_alias;
333
334 /*
335 * DMA alias showdown
336 *
337 * The IVRS is fairly reliable in telling us about aliases, but it
338 * can't know about every screwy device. If we don't have an IVRS
339 * reported alias, use the PCI reported alias. In that case we may
340 * still need to initialize the rlookup and dev_table entries if the
341 * alias is to a non-existent device.
342 */
343 if (ivrs_alias == devid) {
344 if (!amd_iommu_rlookup_table[pci_alias]) {
345 amd_iommu_rlookup_table[pci_alias] =
346 amd_iommu_rlookup_table[devid];
347 memcpy(amd_iommu_dev_table[pci_alias].data,
348 amd_iommu_dev_table[devid].data,
349 sizeof(amd_iommu_dev_table[pci_alias].data));
350 }
351
352 return pci_alias;
353 }
354
355 pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
356 "for device %s[%04x:%04x], kernel reported alias "
357 "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
358 PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
359 PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
360 PCI_FUNC(pci_alias));
361
362 /*
363 * If we don't have a PCI DMA alias and the IVRS alias is on the same
364 * bus, then the IVRS table may know about a quirk that we don't.
365 */
366 if (pci_alias == devid &&
367 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
368 pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
369 pdev->dma_alias_devfn = ivrs_alias & 0xff;
370 pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
371 PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
372 dev_name(dev));
373 }
374
375 return ivrs_alias;
376}
377
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600378static int iommu_init_device(struct device *dev)
379{
380 struct pci_dev *pdev = to_pci_dev(dev);
381 struct iommu_dev_data *dev_data;
382 u16 alias;
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600383
384 if (dev->archdata.iommu)
385 return 0;
386
387 dev_data = find_dev_data(get_device_id(dev));
388 if (!dev_data)
389 return -ENOMEM;
390
Alex Williamsonc1931092014-07-03 09:51:24 -0600391 alias = get_alias(dev);
392
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600393 if (alias != dev_data->devid) {
394 struct iommu_dev_data *alias_data;
395
396 alias_data = find_dev_data(alias);
397 if (alias_data == NULL) {
398 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
399 dev_name(dev));
400 free_dev_data(dev_data);
401 return -ENOTSUPP;
402 }
403 dev_data->alias_data = alias_data;
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600404
Joerg Roedelf251e182014-08-05 16:48:10 +0200405 /* Add device to the alias_list */
406 list_add(&dev_data->alias_list, &alias_data->alias_list);
Radmila Kompováe644a012013-05-02 17:24:25 +0200407 }
Alex Williamson9dcd6132012-05-30 14:19:07 -0600408
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100409 if (pci_iommuv2_capable(pdev)) {
410 struct amd_iommu *iommu;
411
412 iommu = amd_iommu_rlookup_table[dev_data->devid];
413 dev_data->iommu_v2 = iommu->is_iommu_v2;
414 }
415
Joerg Roedel657cbb62009-11-23 15:26:46 +0100416 dev->archdata.iommu = dev_data;
417
Alex Williamson066f2e92014-06-12 16:12:37 -0600418 iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
419 dev);
420
Joerg Roedel657cbb62009-11-23 15:26:46 +0100421 return 0;
422}
423
Joerg Roedel26018872011-06-06 16:50:14 +0200424static void iommu_ignore_device(struct device *dev)
425{
426 u16 devid, alias;
427
428 devid = get_device_id(dev);
429 alias = amd_iommu_alias_table[devid];
430
431 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
432 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
433
434 amd_iommu_rlookup_table[devid] = NULL;
435 amd_iommu_rlookup_table[alias] = NULL;
436}
437
Joerg Roedel657cbb62009-11-23 15:26:46 +0100438static void iommu_uninit_device(struct device *dev)
439{
Alex Williamsonc1931092014-07-03 09:51:24 -0600440 struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev));
441
442 if (!dev_data)
443 return;
444
Alex Williamson066f2e92014-06-12 16:12:37 -0600445 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
446 dev);
447
Alex Williamson9dcd6132012-05-30 14:19:07 -0600448 iommu_group_remove_device(dev);
449
Alex Williamsonc1931092014-07-03 09:51:24 -0600450 /* Unlink from alias, it may change if another device is re-plugged */
451 dev_data->alias_data = NULL;
452
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200453 /* Remove dma-ops */
454 dev->archdata.dma_ops = NULL;
455
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200456 /*
Alex Williamsonc1931092014-07-03 09:51:24 -0600457 * We keep dev_data around for unplugged devices and reuse it when the
458 * device is re-plugged - not doing so would introduce a ton of races.
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200459 */
Joerg Roedel657cbb62009-11-23 15:26:46 +0100460}
Joerg Roedelb7cc9552009-12-10 11:03:39 +0100461
Joerg Roedel7f265082008-12-12 13:50:21 +0100462#ifdef CONFIG_AMD_IOMMU_STATS
463
464/*
465 * Initialization code for statistics collection
466 */
467
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100468DECLARE_STATS_COUNTER(compl_wait);
Joerg Roedel0f2a86f2008-12-12 15:05:16 +0100469DECLARE_STATS_COUNTER(cnt_map_single);
Joerg Roedel146a6912008-12-12 15:07:12 +0100470DECLARE_STATS_COUNTER(cnt_unmap_single);
Joerg Roedeld03f067a2008-12-12 15:09:48 +0100471DECLARE_STATS_COUNTER(cnt_map_sg);
Joerg Roedel55877a62008-12-12 15:12:14 +0100472DECLARE_STATS_COUNTER(cnt_unmap_sg);
Joerg Roedelc8f0fb32008-12-12 15:14:21 +0100473DECLARE_STATS_COUNTER(cnt_alloc_coherent);
Joerg Roedel5d31ee72008-12-12 15:16:38 +0100474DECLARE_STATS_COUNTER(cnt_free_coherent);
Joerg Roedelc1858972008-12-12 15:42:39 +0100475DECLARE_STATS_COUNTER(cross_page);
Joerg Roedelf57d98a2008-12-12 15:46:29 +0100476DECLARE_STATS_COUNTER(domain_flush_single);
Joerg Roedel18811f52008-12-12 15:48:28 +0100477DECLARE_STATS_COUNTER(domain_flush_all);
Joerg Roedel5774f7c2008-12-12 15:57:30 +0100478DECLARE_STATS_COUNTER(alloced_io_mem);
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +0100479DECLARE_STATS_COUNTER(total_map_requests);
Joerg Roedel399be2f2011-12-01 16:53:47 +0100480DECLARE_STATS_COUNTER(complete_ppr);
481DECLARE_STATS_COUNTER(invalidate_iotlb);
482DECLARE_STATS_COUNTER(invalidate_iotlb_all);
483DECLARE_STATS_COUNTER(pri_requests);
484
Joerg Roedel7f265082008-12-12 13:50:21 +0100485static struct dentry *stats_dir;
Joerg Roedel7f265082008-12-12 13:50:21 +0100486static struct dentry *de_fflush;
487
488static void amd_iommu_stats_add(struct __iommu_counter *cnt)
489{
490 if (stats_dir == NULL)
491 return;
492
493 cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
494 &cnt->value);
495}
496
497static void amd_iommu_stats_init(void)
498{
499 stats_dir = debugfs_create_dir("amd-iommu", NULL);
500 if (stats_dir == NULL)
501 return;
502
Joerg Roedel7f265082008-12-12 13:50:21 +0100503 de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
Dan Carpenter3775d482012-06-27 12:09:18 +0300504 &amd_iommu_unmap_flush);
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100505
506 amd_iommu_stats_add(&compl_wait);
Joerg Roedel0f2a86f2008-12-12 15:05:16 +0100507 amd_iommu_stats_add(&cnt_map_single);
Joerg Roedel146a6912008-12-12 15:07:12 +0100508 amd_iommu_stats_add(&cnt_unmap_single);
Joerg Roedeld03f067a2008-12-12 15:09:48 +0100509 amd_iommu_stats_add(&cnt_map_sg);
Joerg Roedel55877a62008-12-12 15:12:14 +0100510 amd_iommu_stats_add(&cnt_unmap_sg);
Joerg Roedelc8f0fb32008-12-12 15:14:21 +0100511 amd_iommu_stats_add(&cnt_alloc_coherent);
Joerg Roedel5d31ee72008-12-12 15:16:38 +0100512 amd_iommu_stats_add(&cnt_free_coherent);
Joerg Roedelc1858972008-12-12 15:42:39 +0100513 amd_iommu_stats_add(&cross_page);
Joerg Roedelf57d98a2008-12-12 15:46:29 +0100514 amd_iommu_stats_add(&domain_flush_single);
Joerg Roedel18811f52008-12-12 15:48:28 +0100515 amd_iommu_stats_add(&domain_flush_all);
Joerg Roedel5774f7c2008-12-12 15:57:30 +0100516 amd_iommu_stats_add(&alloced_io_mem);
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +0100517 amd_iommu_stats_add(&total_map_requests);
Joerg Roedel399be2f2011-12-01 16:53:47 +0100518 amd_iommu_stats_add(&complete_ppr);
519 amd_iommu_stats_add(&invalidate_iotlb);
520 amd_iommu_stats_add(&invalidate_iotlb_all);
521 amd_iommu_stats_add(&pri_requests);
Joerg Roedel7f265082008-12-12 13:50:21 +0100522}
523
524#endif
525
Joerg Roedel431b2a22008-07-11 17:14:22 +0200526/****************************************************************************
527 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200528 * Interrupt handling functions
529 *
530 ****************************************************************************/
531
Joerg Roedele3e59872009-09-03 14:02:10 +0200532static void dump_dte_entry(u16 devid)
533{
534 int i;
535
Joerg Roedelee6c2862011-11-09 12:06:03 +0100536 for (i = 0; i < 4; ++i)
537 pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
Joerg Roedele3e59872009-09-03 14:02:10 +0200538 amd_iommu_dev_table[devid].data[i]);
539}
540
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200541static void dump_command(unsigned long phys_addr)
542{
543 struct iommu_cmd *cmd = phys_to_virt(phys_addr);
544 int i;
545
546 for (i = 0; i < 4; ++i)
547 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
548}
549
Joerg Roedela345b232009-09-03 15:01:43 +0200550static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
Joerg Roedel90008ee2008-09-09 16:41:05 +0200551{
Joerg Roedel3d06fca2012-04-12 14:12:00 +0200552 int type, devid, domid, flags;
553 volatile u32 *event = __evt;
554 int count = 0;
555 u64 address;
556
557retry:
558 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
559 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
560 domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
561 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
562 address = (u64)(((u64)event[3]) << 32) | event[2];
563
564 if (type == 0) {
565 /* Did we hit the erratum? */
566 if (++count == LOOP_TIMEOUT) {
567 pr_err("AMD-Vi: No event written to event log\n");
568 return;
569 }
570 udelay(1);
571 goto retry;
572 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200573
Joerg Roedel4c6f40d2009-09-01 16:43:58 +0200574 printk(KERN_ERR "AMD-Vi: Event logged [");
Joerg Roedel90008ee2008-09-09 16:41:05 +0200575
576 switch (type) {
577 case EVENT_TYPE_ILL_DEV:
578 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
579 "address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700580 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200581 address, flags);
Joerg Roedele3e59872009-09-03 14:02:10 +0200582 dump_dte_entry(devid);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200583 break;
584 case EVENT_TYPE_IO_FAULT:
585 printk("IO_PAGE_FAULT device=%02x:%02x.%x "
586 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700587 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200588 domid, address, flags);
589 break;
590 case EVENT_TYPE_DEV_TAB_ERR:
591 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
592 "address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700593 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200594 address, flags);
595 break;
596 case EVENT_TYPE_PAGE_TAB_ERR:
597 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
598 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700599 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200600 domid, address, flags);
601 break;
602 case EVENT_TYPE_ILL_CMD:
603 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200604 dump_command(address);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200605 break;
606 case EVENT_TYPE_CMD_HARD_ERR:
607 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
608 "flags=0x%04x]\n", address, flags);
609 break;
610 case EVENT_TYPE_IOTLB_INV_TO:
611 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
612 "address=0x%016llx]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700613 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200614 address);
615 break;
616 case EVENT_TYPE_INV_DEV_REQ:
617 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
618 "address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700619 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200620 address, flags);
621 break;
622 default:
623 printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
624 }
Joerg Roedel3d06fca2012-04-12 14:12:00 +0200625
626 memset(__evt, 0, 4 * sizeof(u32));
Joerg Roedel90008ee2008-09-09 16:41:05 +0200627}
628
629static void iommu_poll_events(struct amd_iommu *iommu)
630{
631 u32 head, tail;
Joerg Roedel90008ee2008-09-09 16:41:05 +0200632
633 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
634 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
635
636 while (head != tail) {
Joerg Roedela345b232009-09-03 15:01:43 +0200637 iommu_print_event(iommu, iommu->evt_buf + head);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200638 head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size;
639 }
640
641 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200642}
643
Joerg Roedeleee53532012-06-01 15:20:23 +0200644static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100645{
646 struct amd_iommu_fault fault;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100647
Joerg Roedel399be2f2011-12-01 16:53:47 +0100648 INC_STATS_COUNTER(pri_requests);
649
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100650 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
651 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
652 return;
653 }
654
655 fault.address = raw[1];
656 fault.pasid = PPR_PASID(raw[0]);
657 fault.device_id = PPR_DEVID(raw[0]);
658 fault.tag = PPR_TAG(raw[0]);
659 fault.flags = PPR_FLAGS(raw[0]);
660
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100661 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
662}
663
664static void iommu_poll_ppr_log(struct amd_iommu *iommu)
665{
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100666 u32 head, tail;
667
668 if (iommu->ppr_log == NULL)
669 return;
670
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100671 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
672 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
673
674 while (head != tail) {
Joerg Roedeleee53532012-06-01 15:20:23 +0200675 volatile u64 *raw;
676 u64 entry[2];
677 int i;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100678
Joerg Roedeleee53532012-06-01 15:20:23 +0200679 raw = (u64 *)(iommu->ppr_log + head);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100680
Joerg Roedeleee53532012-06-01 15:20:23 +0200681 /*
682 * Hardware bug: Interrupt may arrive before the entry is
683 * written to memory. If this happens we need to wait for the
684 * entry to arrive.
685 */
686 for (i = 0; i < LOOP_TIMEOUT; ++i) {
687 if (PPR_REQ_TYPE(raw[0]) != 0)
688 break;
689 udelay(1);
690 }
691
692 /* Avoid memcpy function-call overhead */
693 entry[0] = raw[0];
694 entry[1] = raw[1];
695
696 /*
697 * To detect the hardware bug we need to clear the entry
698 * back to zero.
699 */
700 raw[0] = raw[1] = 0UL;
701
702 /* Update head pointer of hardware ring-buffer */
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100703 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
704 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
Joerg Roedeleee53532012-06-01 15:20:23 +0200705
Joerg Roedeleee53532012-06-01 15:20:23 +0200706 /* Handle PPR entry */
707 iommu_handle_ppr_entry(iommu, entry);
708
Joerg Roedeleee53532012-06-01 15:20:23 +0200709 /* Refresh ring-buffer information */
710 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100711 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
712 }
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100713}
714
Joerg Roedel72fe00f2011-05-10 10:50:42 +0200715irqreturn_t amd_iommu_int_thread(int irq, void *data)
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200716{
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500717 struct amd_iommu *iommu = (struct amd_iommu *) data;
718 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200719
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500720 while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
721 /* Enable EVT and PPR interrupts again */
722 writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
723 iommu->mmio_base + MMIO_STATUS_OFFSET);
724
725 if (status & MMIO_STATUS_EVT_INT_MASK) {
726 pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
727 iommu_poll_events(iommu);
728 }
729
730 if (status & MMIO_STATUS_PPR_INT_MASK) {
731 pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
732 iommu_poll_ppr_log(iommu);
733 }
734
735 /*
736 * Hardware bug: ERBT1312
737 * When re-enabling interrupt (by writing 1
738 * to clear the bit), the hardware might also try to set
739 * the interrupt bit in the event status register.
740 * In this scenario, the bit will be set, and disable
741 * subsequent interrupts.
742 *
743 * Workaround: The IOMMU driver should read back the
744 * status register and check if the interrupt bits are cleared.
745 * If not, driver will need to go through the interrupt handler
746 * again and re-clear the bits
747 */
748 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100749 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200750 return IRQ_HANDLED;
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200751}
752
Joerg Roedel72fe00f2011-05-10 10:50:42 +0200753irqreturn_t amd_iommu_int_handler(int irq, void *data)
754{
755 return IRQ_WAKE_THREAD;
756}
757
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200758/****************************************************************************
759 *
Joerg Roedel431b2a22008-07-11 17:14:22 +0200760 * IOMMU command queuing functions
761 *
762 ****************************************************************************/
763
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200764static int wait_on_sem(volatile u64 *sem)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200765{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200766 int i = 0;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200767
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200768 while (*sem == 0 && i < LOOP_TIMEOUT) {
769 udelay(1);
770 i += 1;
771 }
772
773 if (i == LOOP_TIMEOUT) {
774 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
775 return -EIO;
776 }
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200777
778 return 0;
779}
780
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200781static void copy_cmd_to_buffer(struct amd_iommu *iommu,
782 struct iommu_cmd *cmd,
783 u32 tail)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200784{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200785 u8 *target;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200786
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200787 target = iommu->cmd_buf + tail;
788 tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200789
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200790 /* Copy command to buffer */
791 memcpy(target, cmd, sizeof(*cmd));
792
793 /* Tell the IOMMU about it */
794 writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
795}
796
Joerg Roedel815b33f2011-04-06 17:26:49 +0200797static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
Joerg Roedelded46732011-04-06 10:53:48 +0200798{
Joerg Roedel815b33f2011-04-06 17:26:49 +0200799 WARN_ON(address & 0x7ULL);
800
Joerg Roedelded46732011-04-06 10:53:48 +0200801 memset(cmd, 0, sizeof(*cmd));
Joerg Roedel815b33f2011-04-06 17:26:49 +0200802 cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK;
803 cmd->data[1] = upper_32_bits(__pa(address));
804 cmd->data[2] = 1;
Joerg Roedelded46732011-04-06 10:53:48 +0200805 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
806}
807
Joerg Roedel94fe79e2011-04-06 11:07:21 +0200808static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
809{
810 memset(cmd, 0, sizeof(*cmd));
811 cmd->data[0] = devid;
812 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
813}
814
Joerg Roedel11b64022011-04-06 11:49:28 +0200815static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
816 size_t size, u16 domid, int pde)
817{
818 u64 pages;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100819 bool s;
Joerg Roedel11b64022011-04-06 11:49:28 +0200820
821 pages = iommu_num_pages(address, size, PAGE_SIZE);
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100822 s = false;
Joerg Roedel11b64022011-04-06 11:49:28 +0200823
824 if (pages > 1) {
825 /*
826 * If we have to flush more than one page, flush all
827 * TLB entries for this domain
828 */
829 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100830 s = true;
Joerg Roedel11b64022011-04-06 11:49:28 +0200831 }
832
833 address &= PAGE_MASK;
834
835 memset(cmd, 0, sizeof(*cmd));
836 cmd->data[1] |= domid;
837 cmd->data[2] = lower_32_bits(address);
838 cmd->data[3] = upper_32_bits(address);
839 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
840 if (s) /* size bit - we flush more than one 4kb page */
841 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
Frank Arnolddf805ab2012-08-27 19:21:04 +0200842 if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
Joerg Roedel11b64022011-04-06 11:49:28 +0200843 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
844}
845
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200846static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
847 u64 address, size_t size)
848{
849 u64 pages;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100850 bool s;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200851
852 pages = iommu_num_pages(address, size, PAGE_SIZE);
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100853 s = false;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200854
855 if (pages > 1) {
856 /*
857 * If we have to flush more than one page, flush all
858 * TLB entries for this domain
859 */
860 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100861 s = true;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200862 }
863
864 address &= PAGE_MASK;
865
866 memset(cmd, 0, sizeof(*cmd));
867 cmd->data[0] = devid;
868 cmd->data[0] |= (qdep & 0xff) << 24;
869 cmd->data[1] = devid;
870 cmd->data[2] = lower_32_bits(address);
871 cmd->data[3] = upper_32_bits(address);
872 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
873 if (s)
874 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
875}
876
Joerg Roedel22e266c2011-11-21 15:59:08 +0100877static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
878 u64 address, bool size)
879{
880 memset(cmd, 0, sizeof(*cmd));
881
882 address &= ~(0xfffULL);
883
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600884 cmd->data[0] = pasid;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100885 cmd->data[1] = domid;
886 cmd->data[2] = lower_32_bits(address);
887 cmd->data[3] = upper_32_bits(address);
888 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
889 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
890 if (size)
891 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
892 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
893}
894
895static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
896 int qdep, u64 address, bool size)
897{
898 memset(cmd, 0, sizeof(*cmd));
899
900 address &= ~(0xfffULL);
901
902 cmd->data[0] = devid;
Jay Cornwalle8d2d822014-02-26 15:49:31 -0600903 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100904 cmd->data[0] |= (qdep & 0xff) << 24;
905 cmd->data[1] = devid;
Jay Cornwalle8d2d822014-02-26 15:49:31 -0600906 cmd->data[1] |= (pasid & 0xff) << 16;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100907 cmd->data[2] = lower_32_bits(address);
908 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
909 cmd->data[3] = upper_32_bits(address);
910 if (size)
911 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
912 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
913}
914
Joerg Roedelc99afa22011-11-21 18:19:25 +0100915static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
916 int status, int tag, bool gn)
917{
918 memset(cmd, 0, sizeof(*cmd));
919
920 cmd->data[0] = devid;
921 if (gn) {
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600922 cmd->data[1] = pasid;
Joerg Roedelc99afa22011-11-21 18:19:25 +0100923 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
924 }
925 cmd->data[3] = tag & 0x1ff;
926 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
927
928 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
929}
930
Joerg Roedel58fc7f12011-04-11 11:13:24 +0200931static void build_inv_all(struct iommu_cmd *cmd)
932{
933 memset(cmd, 0, sizeof(*cmd));
934 CMD_SET_TYPE(cmd, CMD_INV_ALL);
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200935}
936
Joerg Roedel7ef27982012-06-21 16:46:04 +0200937static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
938{
939 memset(cmd, 0, sizeof(*cmd));
940 cmd->data[0] = devid;
941 CMD_SET_TYPE(cmd, CMD_INV_IRT);
942}
943
Joerg Roedel431b2a22008-07-11 17:14:22 +0200944/*
Joerg Roedelb6c02712008-06-26 21:27:53 +0200945 * Writes the command to the IOMMUs command buffer and informs the
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200946 * hardware about the new command.
Joerg Roedel431b2a22008-07-11 17:14:22 +0200947 */
Joerg Roedelf1ca1512011-09-02 14:10:32 +0200948static int iommu_queue_command_sync(struct amd_iommu *iommu,
949 struct iommu_cmd *cmd,
950 bool sync)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200951{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200952 u32 left, tail, head, next_tail;
Joerg Roedel815b33f2011-04-06 17:26:49 +0200953 unsigned long flags;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200954
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200955 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
Joerg Roedelda49f6d2008-12-12 14:59:58 +0100956
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200957again:
Joerg Roedel815b33f2011-04-06 17:26:49 +0200958 spin_lock_irqsave(&iommu->lock, flags);
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200959
960 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
961 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
962 next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size;
963 left = (head - next_tail) % iommu->cmd_buf_size;
964
965 if (left <= 2) {
966 struct iommu_cmd sync_cmd;
967 volatile u64 sem = 0;
968 int ret;
969
970 build_completion_wait(&sync_cmd, (u64)&sem);
971 copy_cmd_to_buffer(iommu, &sync_cmd, tail);
972
973 spin_unlock_irqrestore(&iommu->lock, flags);
974
975 if ((ret = wait_on_sem(&sem)) != 0)
976 return ret;
977
978 goto again;
Joerg Roedel136f78a2008-07-11 17:14:27 +0200979 }
980
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200981 copy_cmd_to_buffer(iommu, cmd, tail);
Joerg Roedel519c31b2008-08-14 19:55:15 +0200982
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200983 /* We need to sync now to make sure all commands are processed */
Joerg Roedelf1ca1512011-09-02 14:10:32 +0200984 iommu->need_sync = sync;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200985
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200986 spin_unlock_irqrestore(&iommu->lock, flags);
987
Joerg Roedel815b33f2011-04-06 17:26:49 +0200988 return 0;
Joerg Roedel8d201962008-12-02 20:34:41 +0100989}
990
Joerg Roedelf1ca1512011-09-02 14:10:32 +0200991static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
992{
993 return iommu_queue_command_sync(iommu, cmd, true);
994}
995
Joerg Roedel8d201962008-12-02 20:34:41 +0100996/*
997 * This function queues a completion wait command into the command
998 * buffer of an IOMMU
999 */
Joerg Roedel8d201962008-12-02 20:34:41 +01001000static int iommu_completion_wait(struct amd_iommu *iommu)
1001{
Joerg Roedel815b33f2011-04-06 17:26:49 +02001002 struct iommu_cmd cmd;
1003 volatile u64 sem = 0;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001004 int ret;
Joerg Roedel8d201962008-12-02 20:34:41 +01001005
1006 if (!iommu->need_sync)
Joerg Roedel815b33f2011-04-06 17:26:49 +02001007 return 0;
Joerg Roedel8d201962008-12-02 20:34:41 +01001008
Joerg Roedel815b33f2011-04-06 17:26:49 +02001009 build_completion_wait(&cmd, (u64)&sem);
Joerg Roedel8d201962008-12-02 20:34:41 +01001010
Joerg Roedelf1ca1512011-09-02 14:10:32 +02001011 ret = iommu_queue_command_sync(iommu, &cmd, false);
Joerg Roedel8d201962008-12-02 20:34:41 +01001012 if (ret)
Joerg Roedel815b33f2011-04-06 17:26:49 +02001013 return ret;
Joerg Roedel8d201962008-12-02 20:34:41 +01001014
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001015 return wait_on_sem(&sem);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001016}
1017
Joerg Roedeld8c13082011-04-06 18:51:26 +02001018static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001019{
1020 struct iommu_cmd cmd;
1021
Joerg Roedeld8c13082011-04-06 18:51:26 +02001022 build_inv_dte(&cmd, devid);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001023
Joerg Roedeld8c13082011-04-06 18:51:26 +02001024 return iommu_queue_command(iommu, &cmd);
1025}
1026
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001027static void iommu_flush_dte_all(struct amd_iommu *iommu)
1028{
1029 u32 devid;
1030
1031 for (devid = 0; devid <= 0xffff; ++devid)
1032 iommu_flush_dte(iommu, devid);
1033
1034 iommu_completion_wait(iommu);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001035}
1036
1037/*
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001038 * This function uses heavy locking and may disable irqs for some time. But
1039 * this is no issue because it is only called during resume.
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001040 */
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001041static void iommu_flush_tlb_all(struct amd_iommu *iommu)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001042{
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001043 u32 dom_id;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001044
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001045 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1046 struct iommu_cmd cmd;
1047 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1048 dom_id, 1);
1049 iommu_queue_command(iommu, &cmd);
1050 }
Joerg Roedel431b2a22008-07-11 17:14:22 +02001051
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001052 iommu_completion_wait(iommu);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001053}
1054
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001055static void iommu_flush_all(struct amd_iommu *iommu)
1056{
1057 struct iommu_cmd cmd;
1058
1059 build_inv_all(&cmd);
1060
1061 iommu_queue_command(iommu, &cmd);
1062 iommu_completion_wait(iommu);
1063}
1064
Joerg Roedel7ef27982012-06-21 16:46:04 +02001065static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1066{
1067 struct iommu_cmd cmd;
1068
1069 build_inv_irt(&cmd, devid);
1070
1071 iommu_queue_command(iommu, &cmd);
1072}
1073
1074static void iommu_flush_irt_all(struct amd_iommu *iommu)
1075{
1076 u32 devid;
1077
1078 for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1079 iommu_flush_irt(iommu, devid);
1080
1081 iommu_completion_wait(iommu);
1082}
1083
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001084void iommu_flush_all_caches(struct amd_iommu *iommu)
1085{
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001086 if (iommu_feature(iommu, FEATURE_IA)) {
1087 iommu_flush_all(iommu);
1088 } else {
1089 iommu_flush_dte_all(iommu);
Joerg Roedel7ef27982012-06-21 16:46:04 +02001090 iommu_flush_irt_all(iommu);
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001091 iommu_flush_tlb_all(iommu);
1092 }
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001093}
1094
Joerg Roedel431b2a22008-07-11 17:14:22 +02001095/*
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001096 * Command send function for flushing on-device TLB
1097 */
Joerg Roedel6c542042011-06-09 17:07:31 +02001098static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1099 u64 address, size_t size)
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001100{
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001101 struct amd_iommu *iommu;
1102 struct iommu_cmd cmd;
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001103 int qdep;
1104
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001105 qdep = dev_data->ats.qdep;
1106 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001107
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001108 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001109
1110 return iommu_queue_command(iommu, &cmd);
1111}
1112
1113/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001114 * Command send function for invalidating a device table entry
1115 */
Joerg Roedel6c542042011-06-09 17:07:31 +02001116static int device_flush_dte(struct iommu_dev_data *dev_data)
Joerg Roedel3fa43652009-11-26 15:04:38 +01001117{
1118 struct amd_iommu *iommu;
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001119 int ret;
Joerg Roedel3fa43652009-11-26 15:04:38 +01001120
Joerg Roedel6c542042011-06-09 17:07:31 +02001121 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel3fa43652009-11-26 15:04:38 +01001122
Joerg Roedelf62dda62011-06-09 12:55:35 +02001123 ret = iommu_flush_dte(iommu, dev_data->devid);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001124 if (ret)
1125 return ret;
1126
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001127 if (dev_data->ats.enabled)
Joerg Roedel6c542042011-06-09 17:07:31 +02001128 ret = device_flush_iotlb(dev_data, 0, ~0UL);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001129
1130 return ret;
Joerg Roedel3fa43652009-11-26 15:04:38 +01001131}
1132
Joerg Roedel431b2a22008-07-11 17:14:22 +02001133/*
1134 * TLB invalidation function which is called from the mapping functions.
1135 * It invalidates a single PTE if the range to flush is within a single
1136 * page. Otherwise it flushes the whole TLB of the IOMMU.
1137 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001138static void __domain_flush_pages(struct protection_domain *domain,
1139 u64 address, size_t size, int pde)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001140{
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001141 struct iommu_dev_data *dev_data;
Joerg Roedel11b64022011-04-06 11:49:28 +02001142 struct iommu_cmd cmd;
1143 int ret = 0, i;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001144
Joerg Roedel11b64022011-04-06 11:49:28 +02001145 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
Joerg Roedel999ba412008-07-03 19:35:08 +02001146
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001147 for (i = 0; i < amd_iommus_present; ++i) {
1148 if (!domain->dev_iommu[i])
1149 continue;
1150
1151 /*
1152 * Devices of this domain are behind this IOMMU
1153 * We need a TLB flush
1154 */
Joerg Roedel11b64022011-04-06 11:49:28 +02001155 ret |= iommu_queue_command(amd_iommus[i], &cmd);
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001156 }
1157
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001158 list_for_each_entry(dev_data, &domain->dev_list, list) {
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001159
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001160 if (!dev_data->ats.enabled)
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001161 continue;
1162
Joerg Roedel6c542042011-06-09 17:07:31 +02001163 ret |= device_flush_iotlb(dev_data, address, size);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001164 }
1165
Joerg Roedel11b64022011-04-06 11:49:28 +02001166 WARN_ON(ret);
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001167}
1168
Joerg Roedel17b124b2011-04-06 18:01:35 +02001169static void domain_flush_pages(struct protection_domain *domain,
1170 u64 address, size_t size)
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001171{
Joerg Roedel17b124b2011-04-06 18:01:35 +02001172 __domain_flush_pages(domain, address, size, 0);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001173}
Joerg Roedelb6c02712008-06-26 21:27:53 +02001174
Joerg Roedel1c655772008-09-04 18:40:05 +02001175/* Flush the whole IO/TLB for a given protection domain */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001176static void domain_flush_tlb(struct protection_domain *domain)
Joerg Roedel1c655772008-09-04 18:40:05 +02001177{
Joerg Roedel17b124b2011-04-06 18:01:35 +02001178 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
Joerg Roedel1c655772008-09-04 18:40:05 +02001179}
1180
Chris Wright42a49f92009-06-15 15:42:00 +02001181/* Flush the whole IO/TLB for a given protection domain - including PDE */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001182static void domain_flush_tlb_pde(struct protection_domain *domain)
Chris Wright42a49f92009-06-15 15:42:00 +02001183{
Joerg Roedel17b124b2011-04-06 18:01:35 +02001184 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1185}
1186
1187static void domain_flush_complete(struct protection_domain *domain)
Joerg Roedelb6c02712008-06-26 21:27:53 +02001188{
1189 int i;
1190
1191 for (i = 0; i < amd_iommus_present; ++i) {
1192 if (!domain->dev_iommu[i])
1193 continue;
1194
1195 /*
1196 * Devices of this domain are behind this IOMMU
1197 * We need to wait for completion of all commands.
1198 */
1199 iommu_completion_wait(amd_iommus[i]);
1200 }
1201}
1202
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001203
Joerg Roedel43f49602008-12-02 21:01:12 +01001204/*
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001205 * This function flushes the DTEs for all devices in domain
Joerg Roedel43f49602008-12-02 21:01:12 +01001206 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001207static void domain_flush_devices(struct protection_domain *domain)
Joerg Roedelbfd1be12009-05-05 15:33:57 +02001208{
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001209 struct iommu_dev_data *dev_data;
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001210
1211 list_for_each_entry(dev_data, &domain->dev_list, list)
Joerg Roedel6c542042011-06-09 17:07:31 +02001212 device_flush_dte(dev_data);
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001213}
1214
Joerg Roedel431b2a22008-07-11 17:14:22 +02001215/****************************************************************************
1216 *
1217 * The functions below are used the create the page table mappings for
1218 * unity mapped regions.
1219 *
1220 ****************************************************************************/
1221
1222/*
Joerg Roedel308973d2009-11-24 17:43:32 +01001223 * This function is used to add another level to an IO page table. Adding
1224 * another level increases the size of the address space by 9 bits to a size up
1225 * to 64 bits.
1226 */
1227static bool increase_address_space(struct protection_domain *domain,
1228 gfp_t gfp)
1229{
1230 u64 *pte;
1231
1232 if (domain->mode == PAGE_MODE_6_LEVEL)
1233 /* address space already 64 bit large */
1234 return false;
1235
1236 pte = (void *)get_zeroed_page(gfp);
1237 if (!pte)
1238 return false;
1239
1240 *pte = PM_LEVEL_PDE(domain->mode,
1241 virt_to_phys(domain->pt_root));
1242 domain->pt_root = pte;
1243 domain->mode += 1;
1244 domain->updated = true;
1245
1246 return true;
1247}
1248
1249static u64 *alloc_pte(struct protection_domain *domain,
1250 unsigned long address,
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001251 unsigned long page_size,
Joerg Roedel308973d2009-11-24 17:43:32 +01001252 u64 **pte_page,
1253 gfp_t gfp)
1254{
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001255 int level, end_lvl;
Joerg Roedel308973d2009-11-24 17:43:32 +01001256 u64 *pte, *page;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001257
1258 BUG_ON(!is_power_of_2(page_size));
Joerg Roedel308973d2009-11-24 17:43:32 +01001259
1260 while (address > PM_LEVEL_SIZE(domain->mode))
1261 increase_address_space(domain, gfp);
1262
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001263 level = domain->mode - 1;
1264 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1265 address = PAGE_SIZE_ALIGN(address, page_size);
1266 end_lvl = PAGE_SIZE_LEVEL(page_size);
Joerg Roedel308973d2009-11-24 17:43:32 +01001267
1268 while (level > end_lvl) {
1269 if (!IOMMU_PTE_PRESENT(*pte)) {
1270 page = (u64 *)get_zeroed_page(gfp);
1271 if (!page)
1272 return NULL;
1273 *pte = PM_LEVEL_PDE(level, virt_to_phys(page));
1274 }
1275
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001276 /* No level skipping support yet */
1277 if (PM_PTE_LEVEL(*pte) != level)
1278 return NULL;
1279
Joerg Roedel308973d2009-11-24 17:43:32 +01001280 level -= 1;
1281
1282 pte = IOMMU_PTE_PAGE(*pte);
1283
1284 if (pte_page && level == end_lvl)
1285 *pte_page = pte;
1286
1287 pte = &pte[PM_LEVEL_INDEX(level, address)];
1288 }
1289
1290 return pte;
1291}
1292
1293/*
1294 * This function checks if there is a PTE for a given dma address. If
1295 * there is one, it returns the pointer to it.
1296 */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001297static u64 *fetch_pte(struct protection_domain *domain,
1298 unsigned long address,
1299 unsigned long *page_size)
Joerg Roedel308973d2009-11-24 17:43:32 +01001300{
1301 int level;
1302 u64 *pte;
1303
Joerg Roedel24cd7722010-01-19 17:27:39 +01001304 if (address > PM_LEVEL_SIZE(domain->mode))
1305 return NULL;
Joerg Roedel308973d2009-11-24 17:43:32 +01001306
Joerg Roedel3039ca12015-04-01 14:58:48 +02001307 level = domain->mode - 1;
1308 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1309 *page_size = PTE_LEVEL_PAGE_SIZE(level);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001310
1311 while (level > 0) {
1312
1313 /* Not Present */
Joerg Roedel308973d2009-11-24 17:43:32 +01001314 if (!IOMMU_PTE_PRESENT(*pte))
1315 return NULL;
1316
Joerg Roedel24cd7722010-01-19 17:27:39 +01001317 /* Large PTE */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001318 if (PM_PTE_LEVEL(*pte) == 7 ||
1319 PM_PTE_LEVEL(*pte) == 0)
1320 break;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001321
1322 /* No level skipping support yet */
1323 if (PM_PTE_LEVEL(*pte) != level)
1324 return NULL;
1325
Joerg Roedel308973d2009-11-24 17:43:32 +01001326 level -= 1;
1327
Joerg Roedel24cd7722010-01-19 17:27:39 +01001328 /* Walk to the next level */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001329 pte = IOMMU_PTE_PAGE(*pte);
1330 pte = &pte[PM_LEVEL_INDEX(level, address)];
1331 *page_size = PTE_LEVEL_PAGE_SIZE(level);
1332 }
1333
1334 if (PM_PTE_LEVEL(*pte) == 0x07) {
1335 unsigned long pte_mask;
1336
1337 /*
1338 * If we have a series of large PTEs, make
1339 * sure to return a pointer to the first one.
1340 */
1341 *page_size = pte_mask = PTE_PAGE_SIZE(*pte);
1342 pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1343 pte = (u64 *)(((unsigned long)pte) & pte_mask);
Joerg Roedel308973d2009-11-24 17:43:32 +01001344 }
1345
1346 return pte;
1347}
1348
1349/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001350 * Generic mapping functions. It maps a physical address into a DMA
1351 * address space. It allocates the page table pages if necessary.
1352 * In the future it can be extended to a generic mapping function
1353 * supporting all features of AMD IOMMU page tables like level skipping
1354 * and full 64 bit address spaces.
1355 */
Joerg Roedel38e817f2008-12-02 17:27:52 +01001356static int iommu_map_page(struct protection_domain *dom,
1357 unsigned long bus_addr,
1358 unsigned long phys_addr,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001359 int prot,
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001360 unsigned long page_size)
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001361{
Joerg Roedel8bda3092009-05-12 12:02:46 +02001362 u64 __pte, *pte;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001363 int i, count;
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001364
Joerg Roedeld4b03662015-04-01 14:58:52 +02001365 BUG_ON(!IS_ALIGNED(bus_addr, page_size));
1366 BUG_ON(!IS_ALIGNED(phys_addr, page_size));
1367
Joerg Roedelbad1cac2009-09-02 16:52:23 +02001368 if (!(prot & IOMMU_PROT_MASK))
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001369 return -EINVAL;
1370
Joerg Roedeld4b03662015-04-01 14:58:52 +02001371 count = PAGE_SIZE_PTE_COUNT(page_size);
1372 pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001373
Maurizio Lombardi63eaa752014-09-11 12:28:03 +02001374 if (!pte)
1375 return -ENOMEM;
1376
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001377 for (i = 0; i < count; ++i)
1378 if (IOMMU_PTE_PRESENT(pte[i]))
1379 return -EBUSY;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001380
Joerg Roedeld4b03662015-04-01 14:58:52 +02001381 if (count > 1) {
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001382 __pte = PAGE_SIZE_PTE(phys_addr, page_size);
1383 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
1384 } else
1385 __pte = phys_addr | IOMMU_PTE_P | IOMMU_PTE_FC;
1386
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001387 if (prot & IOMMU_PROT_IR)
1388 __pte |= IOMMU_PTE_IR;
1389 if (prot & IOMMU_PROT_IW)
1390 __pte |= IOMMU_PTE_IW;
1391
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001392 for (i = 0; i < count; ++i)
1393 pte[i] = __pte;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001394
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001395 update_domain(dom);
1396
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001397 return 0;
1398}
1399
Joerg Roedel24cd7722010-01-19 17:27:39 +01001400static unsigned long iommu_unmap_page(struct protection_domain *dom,
1401 unsigned long bus_addr,
1402 unsigned long page_size)
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001403{
Joerg Roedel71b390e2015-04-01 14:58:49 +02001404 unsigned long long unmapped;
1405 unsigned long unmap_size;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001406 u64 *pte;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001407
Joerg Roedel24cd7722010-01-19 17:27:39 +01001408 BUG_ON(!is_power_of_2(page_size));
1409
1410 unmapped = 0;
1411
1412 while (unmapped < page_size) {
1413
Joerg Roedel71b390e2015-04-01 14:58:49 +02001414 pte = fetch_pte(dom, bus_addr, &unmap_size);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001415
Joerg Roedel71b390e2015-04-01 14:58:49 +02001416 if (pte) {
1417 int i, count;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001418
Joerg Roedel71b390e2015-04-01 14:58:49 +02001419 count = PAGE_SIZE_PTE_COUNT(unmap_size);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001420 for (i = 0; i < count; i++)
1421 pte[i] = 0ULL;
1422 }
1423
1424 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1425 unmapped += unmap_size;
1426 }
1427
Alex Williamson60d0ca32013-06-21 14:33:19 -06001428 BUG_ON(unmapped && !is_power_of_2(unmapped));
Joerg Roedel24cd7722010-01-19 17:27:39 +01001429
1430 return unmapped;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001431}
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001432
Joerg Roedel431b2a22008-07-11 17:14:22 +02001433/****************************************************************************
1434 *
1435 * The next functions belong to the address allocator for the dma_ops
1436 * interface functions. They work like the allocators in the other IOMMU
1437 * drivers. Its basically a bitmap which marks the allocated pages in
1438 * the aperture. Maybe it could be enhanced in the future to a more
1439 * efficient allocator.
1440 *
1441 ****************************************************************************/
Joerg Roedeld3086442008-06-26 21:27:57 +02001442
Joerg Roedel431b2a22008-07-11 17:14:22 +02001443/*
Joerg Roedel384de722009-05-15 12:30:05 +02001444 * The address allocator core functions.
Joerg Roedel431b2a22008-07-11 17:14:22 +02001445 *
1446 * called with domain->lock held
1447 */
Joerg Roedel384de722009-05-15 12:30:05 +02001448
Joerg Roedel9cabe892009-05-18 16:38:55 +02001449/*
Joerg Roedel171e7b32009-11-24 17:47:56 +01001450 * Used to reserve address ranges in the aperture (e.g. for exclusion
1451 * ranges.
1452 */
1453static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
1454 unsigned long start_page,
1455 unsigned int pages)
1456{
1457 unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
1458
1459 if (start_page + pages > last_page)
1460 pages = last_page - start_page;
1461
1462 for (i = start_page; i < start_page + pages; ++i) {
1463 int index = i / APERTURE_RANGE_PAGES;
1464 int page = i % APERTURE_RANGE_PAGES;
1465 __set_bit(page, dom->aperture[index]->bitmap);
1466 }
1467}
1468
1469/*
Joerg Roedel9cabe892009-05-18 16:38:55 +02001470 * This function is used to add a new aperture range to an existing
1471 * aperture in case of dma_ops domain allocation or address allocation
1472 * failure.
1473 */
Joerg Roedel576175c2009-11-23 19:08:46 +01001474static int alloc_new_range(struct dma_ops_domain *dma_dom,
Joerg Roedel9cabe892009-05-18 16:38:55 +02001475 bool populate, gfp_t gfp)
1476{
1477 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
Joerg Roedel576175c2009-11-23 19:08:46 +01001478 struct amd_iommu *iommu;
Joerg Roedel5d7c94c2015-04-01 14:58:50 +02001479 unsigned long i, old_size, pte_pgsize;
Joerg Roedel9cabe892009-05-18 16:38:55 +02001480
Joerg Roedelf5e97052009-05-22 12:31:53 +02001481#ifdef CONFIG_IOMMU_STRESS
1482 populate = false;
1483#endif
1484
Joerg Roedel9cabe892009-05-18 16:38:55 +02001485 if (index >= APERTURE_MAX_RANGES)
1486 return -ENOMEM;
1487
1488 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp);
1489 if (!dma_dom->aperture[index])
1490 return -ENOMEM;
1491
1492 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp);
1493 if (!dma_dom->aperture[index]->bitmap)
1494 goto out_free;
1495
1496 dma_dom->aperture[index]->offset = dma_dom->aperture_size;
1497
1498 if (populate) {
1499 unsigned long address = dma_dom->aperture_size;
1500 int i, num_ptes = APERTURE_RANGE_PAGES / 512;
1501 u64 *pte, *pte_page;
1502
1503 for (i = 0; i < num_ptes; ++i) {
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001504 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE,
Joerg Roedel9cabe892009-05-18 16:38:55 +02001505 &pte_page, gfp);
1506 if (!pte)
1507 goto out_free;
1508
1509 dma_dom->aperture[index]->pte_pages[i] = pte_page;
1510
1511 address += APERTURE_RANGE_SIZE / 64;
1512 }
1513 }
1514
Joerg Roedel17f5b562011-07-06 17:14:44 +02001515 old_size = dma_dom->aperture_size;
Joerg Roedel9cabe892009-05-18 16:38:55 +02001516 dma_dom->aperture_size += APERTURE_RANGE_SIZE;
1517
Joerg Roedel17f5b562011-07-06 17:14:44 +02001518 /* Reserve address range used for MSI messages */
1519 if (old_size < MSI_ADDR_BASE_LO &&
1520 dma_dom->aperture_size > MSI_ADDR_BASE_LO) {
1521 unsigned long spage;
1522 int pages;
1523
1524 pages = iommu_num_pages(MSI_ADDR_BASE_LO, 0x10000, PAGE_SIZE);
1525 spage = MSI_ADDR_BASE_LO >> PAGE_SHIFT;
1526
1527 dma_ops_reserve_addresses(dma_dom, spage, pages);
1528 }
1529
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001530 /* Initialize the exclusion range if necessary */
Joerg Roedel576175c2009-11-23 19:08:46 +01001531 for_each_iommu(iommu) {
1532 if (iommu->exclusion_start &&
1533 iommu->exclusion_start >= dma_dom->aperture[index]->offset
1534 && iommu->exclusion_start < dma_dom->aperture_size) {
1535 unsigned long startpage;
1536 int pages = iommu_num_pages(iommu->exclusion_start,
1537 iommu->exclusion_length,
1538 PAGE_SIZE);
1539 startpage = iommu->exclusion_start >> PAGE_SHIFT;
1540 dma_ops_reserve_addresses(dma_dom, startpage, pages);
1541 }
Joerg Roedel00cd1222009-05-19 09:52:40 +02001542 }
1543
1544 /*
1545 * Check for areas already mapped as present in the new aperture
1546 * range and mark those pages as reserved in the allocator. Such
1547 * mappings may already exist as a result of requested unity
1548 * mappings for devices.
1549 */
1550 for (i = dma_dom->aperture[index]->offset;
1551 i < dma_dom->aperture_size;
Joerg Roedel5d7c94c2015-04-01 14:58:50 +02001552 i += pte_pgsize) {
Joerg Roedel3039ca12015-04-01 14:58:48 +02001553 u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
Joerg Roedel00cd1222009-05-19 09:52:40 +02001554 if (!pte || !IOMMU_PTE_PRESENT(*pte))
1555 continue;
1556
Joerg Roedel5d7c94c2015-04-01 14:58:50 +02001557 dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
1558 pte_pgsize >> 12);
Joerg Roedel00cd1222009-05-19 09:52:40 +02001559 }
1560
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001561 update_domain(&dma_dom->domain);
1562
Joerg Roedel9cabe892009-05-18 16:38:55 +02001563 return 0;
1564
1565out_free:
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001566 update_domain(&dma_dom->domain);
1567
Joerg Roedel9cabe892009-05-18 16:38:55 +02001568 free_page((unsigned long)dma_dom->aperture[index]->bitmap);
1569
1570 kfree(dma_dom->aperture[index]);
1571 dma_dom->aperture[index] = NULL;
1572
1573 return -ENOMEM;
1574}
1575
Joerg Roedel384de722009-05-15 12:30:05 +02001576static unsigned long dma_ops_area_alloc(struct device *dev,
1577 struct dma_ops_domain *dom,
1578 unsigned int pages,
1579 unsigned long align_mask,
1580 u64 dma_mask,
1581 unsigned long start)
1582{
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001583 unsigned long next_bit = dom->next_address % APERTURE_RANGE_SIZE;
Joerg Roedel384de722009-05-15 12:30:05 +02001584 int max_index = dom->aperture_size >> APERTURE_RANGE_SHIFT;
1585 int i = start >> APERTURE_RANGE_SHIFT;
Joerg Roedele6aabee2015-05-27 09:26:09 +02001586 unsigned long boundary_size, mask;
Joerg Roedel384de722009-05-15 12:30:05 +02001587 unsigned long address = -1;
1588 unsigned long limit;
1589
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001590 next_bit >>= PAGE_SHIFT;
1591
Joerg Roedele6aabee2015-05-27 09:26:09 +02001592 mask = dma_get_seg_boundary(dev);
1593
1594 boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
1595 1UL << (BITS_PER_LONG - PAGE_SHIFT);
Joerg Roedel384de722009-05-15 12:30:05 +02001596
1597 for (;i < max_index; ++i) {
1598 unsigned long offset = dom->aperture[i]->offset >> PAGE_SHIFT;
1599
1600 if (dom->aperture[i]->offset >= dma_mask)
1601 break;
1602
1603 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1604 dma_mask >> PAGE_SHIFT);
1605
1606 address = iommu_area_alloc(dom->aperture[i]->bitmap,
1607 limit, next_bit, pages, 0,
1608 boundary_size, align_mask);
1609 if (address != -1) {
1610 address = dom->aperture[i]->offset +
1611 (address << PAGE_SHIFT);
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001612 dom->next_address = address + (pages << PAGE_SHIFT);
Joerg Roedel384de722009-05-15 12:30:05 +02001613 break;
1614 }
1615
1616 next_bit = 0;
1617 }
1618
1619 return address;
1620}
1621
Joerg Roedeld3086442008-06-26 21:27:57 +02001622static unsigned long dma_ops_alloc_addresses(struct device *dev,
1623 struct dma_ops_domain *dom,
Joerg Roedel6d4f3432008-09-04 19:18:02 +02001624 unsigned int pages,
Joerg Roedel832a90c2008-09-18 15:54:23 +02001625 unsigned long align_mask,
1626 u64 dma_mask)
Joerg Roedeld3086442008-06-26 21:27:57 +02001627{
Joerg Roedeld3086442008-06-26 21:27:57 +02001628 unsigned long address;
Joerg Roedeld3086442008-06-26 21:27:57 +02001629
Joerg Roedelfe16f082009-05-22 12:27:53 +02001630#ifdef CONFIG_IOMMU_STRESS
1631 dom->next_address = 0;
1632 dom->need_flush = true;
1633#endif
Joerg Roedeld3086442008-06-26 21:27:57 +02001634
Joerg Roedel384de722009-05-15 12:30:05 +02001635 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001636 dma_mask, dom->next_address);
Joerg Roedeld3086442008-06-26 21:27:57 +02001637
Joerg Roedel1c655772008-09-04 18:40:05 +02001638 if (address == -1) {
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001639 dom->next_address = 0;
Joerg Roedel384de722009-05-15 12:30:05 +02001640 address = dma_ops_area_alloc(dev, dom, pages, align_mask,
1641 dma_mask, 0);
Joerg Roedel1c655772008-09-04 18:40:05 +02001642 dom->need_flush = true;
1643 }
Joerg Roedeld3086442008-06-26 21:27:57 +02001644
Joerg Roedel384de722009-05-15 12:30:05 +02001645 if (unlikely(address == -1))
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09001646 address = DMA_ERROR_CODE;
Joerg Roedeld3086442008-06-26 21:27:57 +02001647
1648 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1649
1650 return address;
1651}
1652
Joerg Roedel431b2a22008-07-11 17:14:22 +02001653/*
1654 * The address free function.
1655 *
1656 * called with domain->lock held
1657 */
Joerg Roedeld3086442008-06-26 21:27:57 +02001658static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1659 unsigned long address,
1660 unsigned int pages)
1661{
Joerg Roedel384de722009-05-15 12:30:05 +02001662 unsigned i = address >> APERTURE_RANGE_SHIFT;
1663 struct aperture_range *range = dom->aperture[i];
Joerg Roedel80be3082008-11-06 14:59:05 +01001664
Joerg Roedel384de722009-05-15 12:30:05 +02001665 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1666
Joerg Roedel47bccd62009-05-22 12:40:54 +02001667#ifdef CONFIG_IOMMU_STRESS
1668 if (i < 4)
1669 return;
1670#endif
1671
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001672 if (address >= dom->next_address)
Joerg Roedel80be3082008-11-06 14:59:05 +01001673 dom->need_flush = true;
Joerg Roedel384de722009-05-15 12:30:05 +02001674
1675 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001676
Akinobu Mitaa66022c2009-12-15 16:48:28 -08001677 bitmap_clear(range->bitmap, address, pages);
Joerg Roedel384de722009-05-15 12:30:05 +02001678
Joerg Roedeld3086442008-06-26 21:27:57 +02001679}
1680
Joerg Roedel431b2a22008-07-11 17:14:22 +02001681/****************************************************************************
1682 *
1683 * The next functions belong to the domain allocation. A domain is
1684 * allocated for every IOMMU as the default domain. If device isolation
1685 * is enabled, every device get its own domain. The most important thing
1686 * about domains is the page table mapping the DMA address space they
1687 * contain.
1688 *
1689 ****************************************************************************/
1690
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001691/*
1692 * This function adds a protection domain to the global protection domain list
1693 */
1694static void add_domain_to_list(struct protection_domain *domain)
1695{
1696 unsigned long flags;
1697
1698 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1699 list_add(&domain->list, &amd_iommu_pd_list);
1700 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1701}
1702
1703/*
1704 * This function removes a protection domain to the global
1705 * protection domain list
1706 */
1707static void del_domain_from_list(struct protection_domain *domain)
1708{
1709 unsigned long flags;
1710
1711 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1712 list_del(&domain->list);
1713 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1714}
1715
Joerg Roedelec487d12008-06-26 21:27:58 +02001716static u16 domain_id_alloc(void)
1717{
1718 unsigned long flags;
1719 int id;
1720
1721 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1722 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1723 BUG_ON(id == 0);
1724 if (id > 0 && id < MAX_DOMAIN_ID)
1725 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1726 else
1727 id = 0;
1728 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1729
1730 return id;
1731}
1732
Joerg Roedela2acfb72008-12-02 18:28:53 +01001733static void domain_id_free(int id)
1734{
1735 unsigned long flags;
1736
1737 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1738 if (id > 0 && id < MAX_DOMAIN_ID)
1739 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1740 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1741}
Joerg Roedela2acfb72008-12-02 18:28:53 +01001742
Joerg Roedel5c34c402013-06-20 20:22:58 +02001743#define DEFINE_FREE_PT_FN(LVL, FN) \
1744static void free_pt_##LVL (unsigned long __pt) \
1745{ \
1746 unsigned long p; \
1747 u64 *pt; \
1748 int i; \
1749 \
1750 pt = (u64 *)__pt; \
1751 \
1752 for (i = 0; i < 512; ++i) { \
Joerg Roedel0b3fff52015-06-18 10:48:34 +02001753 /* PTE present? */ \
Joerg Roedel5c34c402013-06-20 20:22:58 +02001754 if (!IOMMU_PTE_PRESENT(pt[i])) \
1755 continue; \
1756 \
Joerg Roedel0b3fff52015-06-18 10:48:34 +02001757 /* Large PTE? */ \
1758 if (PM_PTE_LEVEL(pt[i]) == 0 || \
1759 PM_PTE_LEVEL(pt[i]) == 7) \
1760 continue; \
1761 \
Joerg Roedel5c34c402013-06-20 20:22:58 +02001762 p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
1763 FN(p); \
1764 } \
1765 free_page((unsigned long)pt); \
1766}
1767
1768DEFINE_FREE_PT_FN(l2, free_page)
1769DEFINE_FREE_PT_FN(l3, free_pt_l2)
1770DEFINE_FREE_PT_FN(l4, free_pt_l3)
1771DEFINE_FREE_PT_FN(l5, free_pt_l4)
1772DEFINE_FREE_PT_FN(l6, free_pt_l5)
1773
Joerg Roedel86db2e52008-12-02 18:20:21 +01001774static void free_pagetable(struct protection_domain *domain)
Joerg Roedelec487d12008-06-26 21:27:58 +02001775{
Joerg Roedel5c34c402013-06-20 20:22:58 +02001776 unsigned long root = (unsigned long)domain->pt_root;
Joerg Roedelec487d12008-06-26 21:27:58 +02001777
Joerg Roedel5c34c402013-06-20 20:22:58 +02001778 switch (domain->mode) {
1779 case PAGE_MODE_NONE:
1780 break;
1781 case PAGE_MODE_1_LEVEL:
1782 free_page(root);
1783 break;
1784 case PAGE_MODE_2_LEVEL:
1785 free_pt_l2(root);
1786 break;
1787 case PAGE_MODE_3_LEVEL:
1788 free_pt_l3(root);
1789 break;
1790 case PAGE_MODE_4_LEVEL:
1791 free_pt_l4(root);
1792 break;
1793 case PAGE_MODE_5_LEVEL:
1794 free_pt_l5(root);
1795 break;
1796 case PAGE_MODE_6_LEVEL:
1797 free_pt_l6(root);
1798 break;
1799 default:
1800 BUG();
Joerg Roedelec487d12008-06-26 21:27:58 +02001801 }
Joerg Roedelec487d12008-06-26 21:27:58 +02001802}
1803
Joerg Roedelb16137b2011-11-21 16:50:23 +01001804static void free_gcr3_tbl_level1(u64 *tbl)
1805{
1806 u64 *ptr;
1807 int i;
1808
1809 for (i = 0; i < 512; ++i) {
1810 if (!(tbl[i] & GCR3_VALID))
1811 continue;
1812
1813 ptr = __va(tbl[i] & PAGE_MASK);
1814
1815 free_page((unsigned long)ptr);
1816 }
1817}
1818
1819static void free_gcr3_tbl_level2(u64 *tbl)
1820{
1821 u64 *ptr;
1822 int i;
1823
1824 for (i = 0; i < 512; ++i) {
1825 if (!(tbl[i] & GCR3_VALID))
1826 continue;
1827
1828 ptr = __va(tbl[i] & PAGE_MASK);
1829
1830 free_gcr3_tbl_level1(ptr);
1831 }
1832}
1833
Joerg Roedel52815b72011-11-17 17:24:28 +01001834static void free_gcr3_table(struct protection_domain *domain)
1835{
Joerg Roedelb16137b2011-11-21 16:50:23 +01001836 if (domain->glx == 2)
1837 free_gcr3_tbl_level2(domain->gcr3_tbl);
1838 else if (domain->glx == 1)
1839 free_gcr3_tbl_level1(domain->gcr3_tbl);
1840 else if (domain->glx != 0)
1841 BUG();
1842
Joerg Roedel52815b72011-11-17 17:24:28 +01001843 free_page((unsigned long)domain->gcr3_tbl);
1844}
1845
Joerg Roedel431b2a22008-07-11 17:14:22 +02001846/*
1847 * Free a domain, only used if something went wrong in the
1848 * allocation path and we need to free an already allocated page table
1849 */
Joerg Roedelec487d12008-06-26 21:27:58 +02001850static void dma_ops_domain_free(struct dma_ops_domain *dom)
1851{
Joerg Roedel384de722009-05-15 12:30:05 +02001852 int i;
1853
Joerg Roedelec487d12008-06-26 21:27:58 +02001854 if (!dom)
1855 return;
1856
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001857 del_domain_from_list(&dom->domain);
1858
Joerg Roedel86db2e52008-12-02 18:20:21 +01001859 free_pagetable(&dom->domain);
Joerg Roedelec487d12008-06-26 21:27:58 +02001860
Joerg Roedel384de722009-05-15 12:30:05 +02001861 for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
1862 if (!dom->aperture[i])
1863 continue;
1864 free_page((unsigned long)dom->aperture[i]->bitmap);
1865 kfree(dom->aperture[i]);
1866 }
Joerg Roedelec487d12008-06-26 21:27:58 +02001867
1868 kfree(dom);
1869}
1870
Joerg Roedel431b2a22008-07-11 17:14:22 +02001871/*
1872 * Allocates a new protection domain usable for the dma_ops functions.
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001873 * It also initializes the page table and the address allocator data
Joerg Roedel431b2a22008-07-11 17:14:22 +02001874 * structures required for the dma_ops interface
1875 */
Joerg Roedel87a64d52009-11-24 17:26:43 +01001876static struct dma_ops_domain *dma_ops_domain_alloc(void)
Joerg Roedelec487d12008-06-26 21:27:58 +02001877{
1878 struct dma_ops_domain *dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001879
1880 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1881 if (!dma_dom)
1882 return NULL;
1883
Joerg Roedel7a5a5662015-06-30 08:56:11 +02001884 if (protection_domain_init(&dma_dom->domain))
Joerg Roedelec487d12008-06-26 21:27:58 +02001885 goto free_dma_dom;
Joerg Roedel7a5a5662015-06-30 08:56:11 +02001886
Joerg Roedel8f7a0172009-09-02 16:55:24 +02001887 dma_dom->domain.mode = PAGE_MODE_2_LEVEL;
Joerg Roedelec487d12008-06-26 21:27:58 +02001888 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
Joerg Roedel9fdb19d2008-12-02 17:46:25 +01001889 dma_dom->domain.flags = PD_DMA_OPS_MASK;
Joerg Roedelec487d12008-06-26 21:27:58 +02001890 dma_dom->domain.priv = dma_dom;
1891 if (!dma_dom->domain.pt_root)
1892 goto free_dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001893
Joerg Roedel1c655772008-09-04 18:40:05 +02001894 dma_dom->need_flush = false;
1895
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001896 add_domain_to_list(&dma_dom->domain);
1897
Joerg Roedel576175c2009-11-23 19:08:46 +01001898 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
Joerg Roedelec487d12008-06-26 21:27:58 +02001899 goto free_dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001900
Joerg Roedel431b2a22008-07-11 17:14:22 +02001901 /*
Joerg Roedelec487d12008-06-26 21:27:58 +02001902 * mark the first page as allocated so we never return 0 as
1903 * a valid dma-address. So we can use 0 as error value
Joerg Roedel431b2a22008-07-11 17:14:22 +02001904 */
Joerg Roedel384de722009-05-15 12:30:05 +02001905 dma_dom->aperture[0]->bitmap[0] = 1;
Joerg Roedel803b8cb2009-05-18 15:32:48 +02001906 dma_dom->next_address = 0;
Joerg Roedelec487d12008-06-26 21:27:58 +02001907
Joerg Roedelec487d12008-06-26 21:27:58 +02001908
1909 return dma_dom;
1910
1911free_dma_dom:
1912 dma_ops_domain_free(dma_dom);
1913
1914 return NULL;
1915}
1916
Joerg Roedel431b2a22008-07-11 17:14:22 +02001917/*
Joerg Roedel5b28df62008-12-02 17:49:42 +01001918 * little helper function to check whether a given protection domain is a
1919 * dma_ops domain
1920 */
1921static bool dma_ops_domain(struct protection_domain *domain)
1922{
1923 return domain->flags & PD_DMA_OPS_MASK;
1924}
1925
Joerg Roedelfd7b5532011-04-05 15:31:08 +02001926static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001927{
Joerg Roedel132bd682011-11-17 14:18:46 +01001928 u64 pte_root = 0;
Joerg Roedelee6c2862011-11-09 12:06:03 +01001929 u64 flags = 0;
Joerg Roedel863c74e2008-12-02 17:56:36 +01001930
Joerg Roedel132bd682011-11-17 14:18:46 +01001931 if (domain->mode != PAGE_MODE_NONE)
1932 pte_root = virt_to_phys(domain->pt_root);
1933
Joerg Roedel38ddf412008-09-11 10:38:32 +02001934 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1935 << DEV_ENTRY_MODE_SHIFT;
1936 pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001937
Joerg Roedelee6c2862011-11-09 12:06:03 +01001938 flags = amd_iommu_dev_table[devid].data[1];
1939
Joerg Roedelfd7b5532011-04-05 15:31:08 +02001940 if (ats)
1941 flags |= DTE_FLAG_IOTLB;
1942
Joerg Roedel52815b72011-11-17 17:24:28 +01001943 if (domain->flags & PD_IOMMUV2_MASK) {
1944 u64 gcr3 = __pa(domain->gcr3_tbl);
1945 u64 glx = domain->glx;
1946 u64 tmp;
1947
1948 pte_root |= DTE_FLAG_GV;
1949 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1950
1951 /* First mask out possible old values for GCR3 table */
1952 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1953 flags &= ~tmp;
1954
1955 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1956 flags &= ~tmp;
1957
1958 /* Encode GCR3 table into DTE */
1959 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1960 pte_root |= tmp;
1961
1962 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1963 flags |= tmp;
1964
1965 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1966 flags |= tmp;
1967 }
1968
Joerg Roedelee6c2862011-11-09 12:06:03 +01001969 flags &= ~(0xffffUL);
1970 flags |= domain->id;
1971
1972 amd_iommu_dev_table[devid].data[1] = flags;
1973 amd_iommu_dev_table[devid].data[0] = pte_root;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001974}
1975
Joerg Roedel15898bb2009-11-24 15:39:42 +01001976static void clear_dte_entry(u16 devid)
Joerg Roedel355bf552008-12-08 12:02:41 +01001977{
Joerg Roedel355bf552008-12-08 12:02:41 +01001978 /* remove entry from the device table seen by the hardware */
1979 amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
1980 amd_iommu_dev_table[devid].data[1] = 0;
Joerg Roedel355bf552008-12-08 12:02:41 +01001981
Joerg Roedelc5cca142009-10-09 18:31:20 +02001982 amd_iommu_apply_erratum_63(devid);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001983}
1984
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001985static void do_attach(struct iommu_dev_data *dev_data,
1986 struct protection_domain *domain)
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001987{
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001988 struct amd_iommu *iommu;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001989 bool ats;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001990
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001991 iommu = amd_iommu_rlookup_table[dev_data->devid];
1992 ats = dev_data->ats.enabled;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001993
1994 /* Update data structures */
1995 dev_data->domain = domain;
1996 list_add(&dev_data->list, &domain->dev_list);
Joerg Roedelf62dda62011-06-09 12:55:35 +02001997 set_dte_entry(dev_data->devid, domain, ats);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001998
1999 /* Do reference counting */
2000 domain->dev_iommu[iommu->index] += 1;
2001 domain->dev_cnt += 1;
2002
2003 /* Flush the DTE entry */
Joerg Roedel6c542042011-06-09 17:07:31 +02002004 device_flush_dte(dev_data);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002005}
2006
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002007static void do_detach(struct iommu_dev_data *dev_data)
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002008{
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002009 struct amd_iommu *iommu;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002010
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002011 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedelc5cca142009-10-09 18:31:20 +02002012
Joerg Roedelc4596112009-11-20 14:57:32 +01002013 /* decrease reference counters */
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002014 dev_data->domain->dev_iommu[iommu->index] -= 1;
2015 dev_data->domain->dev_cnt -= 1;
Joerg Roedel355bf552008-12-08 12:02:41 +01002016
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002017 /* Update data structures */
2018 dev_data->domain = NULL;
2019 list_del(&dev_data->list);
Joerg Roedelf62dda62011-06-09 12:55:35 +02002020 clear_dte_entry(dev_data->devid);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002021
2022 /* Flush the DTE entry */
Joerg Roedel6c542042011-06-09 17:07:31 +02002023 device_flush_dte(dev_data);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002024}
2025
2026/*
2027 * If a device is not yet associated with a domain, this function does
2028 * assigns it visible for the hardware
2029 */
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002030static int __attach_device(struct iommu_dev_data *dev_data,
Joerg Roedel15898bb2009-11-24 15:39:42 +01002031 struct protection_domain *domain)
2032{
Joerg Roedel397111a2014-08-05 17:31:51 +02002033 struct iommu_dev_data *head, *entry;
Julia Lawall84fe6c12010-05-27 12:31:51 +02002034 int ret;
Joerg Roedel657cbb62009-11-23 15:26:46 +01002035
Joerg Roedel15898bb2009-11-24 15:39:42 +01002036 /* lock domain */
2037 spin_lock(&domain->lock);
2038
Joerg Roedel397111a2014-08-05 17:31:51 +02002039 head = dev_data;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002040
Joerg Roedel397111a2014-08-05 17:31:51 +02002041 if (head->alias_data != NULL)
2042 head = head->alias_data;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002043
Joerg Roedel397111a2014-08-05 17:31:51 +02002044 /* Now we have the root of the alias group, if any */
Joerg Roedel2b02b092011-06-09 17:48:39 +02002045
Joerg Roedel397111a2014-08-05 17:31:51 +02002046 ret = -EBUSY;
2047 if (head->domain != NULL)
2048 goto out_unlock;
Joerg Roedel24100052009-11-25 15:59:57 +01002049
Joerg Roedel397111a2014-08-05 17:31:51 +02002050 /* Attach alias group root */
2051 do_attach(head, domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002052
Joerg Roedel397111a2014-08-05 17:31:51 +02002053 /* Attach other devices in the alias group */
2054 list_for_each_entry(entry, &head->alias_list, alias_list)
2055 do_attach(entry, domain);
Joerg Roedel24100052009-11-25 15:59:57 +01002056
Julia Lawall84fe6c12010-05-27 12:31:51 +02002057 ret = 0;
2058
2059out_unlock:
2060
Joerg Roedel355bf552008-12-08 12:02:41 +01002061 /* ready */
2062 spin_unlock(&domain->lock);
Joerg Roedel21129f72009-09-01 11:59:42 +02002063
Julia Lawall84fe6c12010-05-27 12:31:51 +02002064 return ret;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002065}
2066
Joerg Roedel52815b72011-11-17 17:24:28 +01002067
2068static void pdev_iommuv2_disable(struct pci_dev *pdev)
2069{
2070 pci_disable_ats(pdev);
2071 pci_disable_pri(pdev);
2072 pci_disable_pasid(pdev);
2073}
2074
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002075/* FIXME: Change generic reset-function to do the same */
2076static int pri_reset_while_enabled(struct pci_dev *pdev)
2077{
2078 u16 control;
2079 int pos;
2080
Joerg Roedel46277b72011-12-07 14:34:02 +01002081 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002082 if (!pos)
2083 return -EINVAL;
2084
Joerg Roedel46277b72011-12-07 14:34:02 +01002085 pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
2086 control |= PCI_PRI_CTRL_RESET;
2087 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002088
2089 return 0;
2090}
2091
Joerg Roedel52815b72011-11-17 17:24:28 +01002092static int pdev_iommuv2_enable(struct pci_dev *pdev)
2093{
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002094 bool reset_enable;
2095 int reqs, ret;
2096
2097 /* FIXME: Hardcode number of outstanding requests for now */
2098 reqs = 32;
2099 if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2100 reqs = 1;
2101 reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
Joerg Roedel52815b72011-11-17 17:24:28 +01002102
2103 /* Only allow access to user-accessible pages */
2104 ret = pci_enable_pasid(pdev, 0);
2105 if (ret)
2106 goto out_err;
2107
2108 /* First reset the PRI state of the device */
2109 ret = pci_reset_pri(pdev);
2110 if (ret)
2111 goto out_err;
2112
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002113 /* Enable PRI */
2114 ret = pci_enable_pri(pdev, reqs);
Joerg Roedel52815b72011-11-17 17:24:28 +01002115 if (ret)
2116 goto out_err;
2117
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002118 if (reset_enable) {
2119 ret = pri_reset_while_enabled(pdev);
2120 if (ret)
2121 goto out_err;
2122 }
2123
Joerg Roedel52815b72011-11-17 17:24:28 +01002124 ret = pci_enable_ats(pdev, PAGE_SHIFT);
2125 if (ret)
2126 goto out_err;
2127
2128 return 0;
2129
2130out_err:
2131 pci_disable_pri(pdev);
2132 pci_disable_pasid(pdev);
2133
2134 return ret;
2135}
2136
Joerg Roedelc99afa22011-11-21 18:19:25 +01002137/* FIXME: Move this to PCI code */
Joerg Roedela3b93122012-04-12 12:49:26 +02002138#define PCI_PRI_TLP_OFF (1 << 15)
Joerg Roedelc99afa22011-11-21 18:19:25 +01002139
Joerg Roedel98f1ad22012-07-06 13:28:37 +02002140static bool pci_pri_tlp_required(struct pci_dev *pdev)
Joerg Roedelc99afa22011-11-21 18:19:25 +01002141{
Joerg Roedela3b93122012-04-12 12:49:26 +02002142 u16 status;
Joerg Roedelc99afa22011-11-21 18:19:25 +01002143 int pos;
2144
Joerg Roedel46277b72011-12-07 14:34:02 +01002145 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
Joerg Roedelc99afa22011-11-21 18:19:25 +01002146 if (!pos)
2147 return false;
2148
Joerg Roedela3b93122012-04-12 12:49:26 +02002149 pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
Joerg Roedelc99afa22011-11-21 18:19:25 +01002150
Joerg Roedela3b93122012-04-12 12:49:26 +02002151 return (status & PCI_PRI_TLP_OFF) ? true : false;
Joerg Roedelc99afa22011-11-21 18:19:25 +01002152}
2153
Joerg Roedel15898bb2009-11-24 15:39:42 +01002154/*
Frank Arnolddf805ab2012-08-27 19:21:04 +02002155 * If a device is not yet associated with a domain, this function
Joerg Roedel15898bb2009-11-24 15:39:42 +01002156 * assigns it visible for the hardware
2157 */
2158static int attach_device(struct device *dev,
2159 struct protection_domain *domain)
2160{
Joerg Roedelfd7b5532011-04-05 15:31:08 +02002161 struct pci_dev *pdev = to_pci_dev(dev);
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002162 struct iommu_dev_data *dev_data;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002163 unsigned long flags;
2164 int ret;
2165
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002166 dev_data = get_dev_data(dev);
2167
Joerg Roedel52815b72011-11-17 17:24:28 +01002168 if (domain->flags & PD_IOMMUV2_MASK) {
2169 if (!dev_data->iommu_v2 || !dev_data->passthrough)
2170 return -EINVAL;
2171
2172 if (pdev_iommuv2_enable(pdev) != 0)
2173 return -EINVAL;
2174
2175 dev_data->ats.enabled = true;
2176 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
Joerg Roedelc99afa22011-11-21 18:19:25 +01002177 dev_data->pri_tlp = pci_pri_tlp_required(pdev);
Joerg Roedel52815b72011-11-17 17:24:28 +01002178 } else if (amd_iommu_iotlb_sup &&
2179 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002180 dev_data->ats.enabled = true;
2181 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2182 }
Joerg Roedelfd7b5532011-04-05 15:31:08 +02002183
Joerg Roedel15898bb2009-11-24 15:39:42 +01002184 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002185 ret = __attach_device(dev_data, domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002186 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2187
2188 /*
2189 * We might boot into a crash-kernel here. The crashed kernel
2190 * left the caches in the IOMMU dirty. So we have to flush
2191 * here to evict all dirty stuff.
2192 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02002193 domain_flush_tlb_pde(domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002194
2195 return ret;
2196}
2197
2198/*
2199 * Removes a device from a protection domain (unlocked)
2200 */
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002201static void __detach_device(struct iommu_dev_data *dev_data)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002202{
Joerg Roedel397111a2014-08-05 17:31:51 +02002203 struct iommu_dev_data *head, *entry;
Joerg Roedel2ca76272010-01-22 16:45:31 +01002204 struct protection_domain *domain;
Joerg Roedel7c392cb2009-11-26 11:13:32 +01002205 unsigned long flags;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002206
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002207 BUG_ON(!dev_data->domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002208
Joerg Roedel2ca76272010-01-22 16:45:31 +01002209 domain = dev_data->domain;
2210
2211 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedel24100052009-11-25 15:59:57 +01002212
Joerg Roedel397111a2014-08-05 17:31:51 +02002213 head = dev_data;
2214 if (head->alias_data != NULL)
2215 head = head->alias_data;
Joerg Roedel71f77582011-06-09 19:03:15 +02002216
Joerg Roedel397111a2014-08-05 17:31:51 +02002217 list_for_each_entry(entry, &head->alias_list, alias_list)
2218 do_detach(entry);
Joerg Roedel24100052009-11-25 15:59:57 +01002219
Joerg Roedel397111a2014-08-05 17:31:51 +02002220 do_detach(head);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01002221
Joerg Roedel2ca76272010-01-22 16:45:31 +01002222 spin_unlock_irqrestore(&domain->lock, flags);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002223
Joerg Roedel21129f72009-09-01 11:59:42 +02002224 /*
2225 * If we run in passthrough mode the device must be assigned to the
Joerg Roedeld3ad9372010-01-22 17:55:27 +01002226 * passthrough domain if it is detached from any other domain.
2227 * Make sure we can deassign from the pt_domain itself.
Joerg Roedel21129f72009-09-01 11:59:42 +02002228 */
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002229 if (dev_data->passthrough &&
Joerg Roedeld3ad9372010-01-22 17:55:27 +01002230 (dev_data->domain == NULL && domain != pt_domain))
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002231 __attach_device(dev_data, pt_domain);
Joerg Roedel355bf552008-12-08 12:02:41 +01002232}
2233
2234/*
2235 * Removes a device from a protection domain (with devtable_lock held)
2236 */
Joerg Roedel15898bb2009-11-24 15:39:42 +01002237static void detach_device(struct device *dev)
Joerg Roedel355bf552008-12-08 12:02:41 +01002238{
Joerg Roedel52815b72011-11-17 17:24:28 +01002239 struct protection_domain *domain;
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002240 struct iommu_dev_data *dev_data;
Joerg Roedel355bf552008-12-08 12:02:41 +01002241 unsigned long flags;
2242
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002243 dev_data = get_dev_data(dev);
Joerg Roedel52815b72011-11-17 17:24:28 +01002244 domain = dev_data->domain;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002245
Joerg Roedel355bf552008-12-08 12:02:41 +01002246 /* lock device table */
2247 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002248 __detach_device(dev_data);
Joerg Roedel355bf552008-12-08 12:02:41 +01002249 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
Joerg Roedelfd7b5532011-04-05 15:31:08 +02002250
Joerg Roedel52815b72011-11-17 17:24:28 +01002251 if (domain->flags & PD_IOMMUV2_MASK)
2252 pdev_iommuv2_disable(to_pci_dev(dev));
2253 else if (dev_data->ats.enabled)
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002254 pci_disable_ats(to_pci_dev(dev));
Joerg Roedel52815b72011-11-17 17:24:28 +01002255
2256 dev_data->ats.enabled = false;
Joerg Roedel355bf552008-12-08 12:02:41 +01002257}
Joerg Roedele275a2a2008-12-10 18:27:25 +01002258
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002259static int amd_iommu_add_device(struct device *dev)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002260{
Joerg Roedel71f77582011-06-09 19:03:15 +02002261 struct iommu_dev_data *dev_data;
Joerg Roedel07ee8692015-05-28 18:41:42 +02002262 struct iommu_domain *domain;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002263 struct amd_iommu *iommu;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002264 u16 devid;
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002265 int ret;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002266
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002267 if (!check_device(dev) || get_dev_data(dev))
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002268 return 0;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002269
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002270 devid = get_device_id(dev);
2271 iommu = amd_iommu_rlookup_table[devid];
Joerg Roedele275a2a2008-12-10 18:27:25 +01002272
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002273 ret = iommu_init_device(dev);
Joerg Roedel4d58b8a2015-06-11 09:21:39 +02002274 if (ret) {
2275 if (ret != -ENOTSUPP)
2276 pr_err("Failed to initialize device %s - trying to proceed anyway\n",
2277 dev_name(dev));
Joerg Roedel657cbb62009-11-23 15:26:46 +01002278
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002279 iommu_ignore_device(dev);
Joerg Roedel343e9ca2015-05-28 18:41:43 +02002280 dev->archdata.dma_ops = &nommu_dma_ops;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002281 goto out;
2282 }
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002283 init_iommu_group(dev);
Joerg Roedele275a2a2008-12-10 18:27:25 +01002284
Joerg Roedel07ee8692015-05-28 18:41:42 +02002285 dev_data = get_dev_data(dev);
Joerg Roedel4d58b8a2015-06-11 09:21:39 +02002286
2287 BUG_ON(!dev_data);
2288
2289 if (dev_data->iommu_v2)
Joerg Roedel07ee8692015-05-28 18:41:42 +02002290 iommu_request_dm_for_dev(dev);
2291
2292 /* Domains are initialized for this device - have a look what we ended up with */
2293 domain = iommu_get_domain_for_dev(dev);
2294 if (domain->type == IOMMU_DOMAIN_IDENTITY) {
2295 dev_data->passthrough = true;
2296 dev->archdata.dma_ops = &nommu_dma_ops;
2297 } else {
2298 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2299 }
Joerg Roedele275a2a2008-12-10 18:27:25 +01002300
2301out:
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002302 iommu_completion_wait(iommu);
2303
Joerg Roedele275a2a2008-12-10 18:27:25 +01002304 return 0;
2305}
2306
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002307static void amd_iommu_remove_device(struct device *dev)
Joerg Roedel8638c492009-12-10 11:12:25 +01002308{
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002309 struct amd_iommu *iommu;
2310 u16 devid;
2311
2312 if (!check_device(dev))
2313 return;
2314
2315 devid = get_device_id(dev);
2316 iommu = amd_iommu_rlookup_table[devid];
2317
2318 iommu_uninit_device(dev);
2319 iommu_completion_wait(iommu);
Joerg Roedel8638c492009-12-10 11:12:25 +01002320}
2321
Joerg Roedel431b2a22008-07-11 17:14:22 +02002322/*****************************************************************************
2323 *
2324 * The next functions belong to the dma_ops mapping/unmapping code.
2325 *
2326 *****************************************************************************/
2327
2328/*
2329 * In the dma_ops path we only have the struct device. This function
2330 * finds the corresponding IOMMU, the protection domain and the
2331 * requestor id for a given device.
2332 * If the device is not yet associated with a domain this is also done
2333 * in this function.
2334 */
Joerg Roedel94f6d192009-11-24 16:40:02 +01002335static struct protection_domain *get_domain(struct device *dev)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002336{
Joerg Roedel94f6d192009-11-24 16:40:02 +01002337 struct protection_domain *domain;
Joerg Roedel063071d2015-05-28 18:41:38 +02002338 struct iommu_domain *io_domain;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002339
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002340 if (!check_device(dev))
Joerg Roedel94f6d192009-11-24 16:40:02 +01002341 return ERR_PTR(-EINVAL);
Joerg Roedeldbcc1122008-09-04 15:04:26 +02002342
Joerg Roedel063071d2015-05-28 18:41:38 +02002343 io_domain = iommu_get_domain_for_dev(dev);
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002344 if (!io_domain)
2345 return NULL;
Joerg Roedel063071d2015-05-28 18:41:38 +02002346
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002347 domain = to_pdomain(io_domain);
2348 if (!dma_ops_domain(domain))
Joerg Roedel94f6d192009-11-24 16:40:02 +01002349 return ERR_PTR(-EBUSY);
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002350
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002351 return domain;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002352}
2353
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002354static void update_device_table(struct protection_domain *domain)
2355{
Joerg Roedel492667d2009-11-27 13:25:47 +01002356 struct iommu_dev_data *dev_data;
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002357
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002358 list_for_each_entry(dev_data, &domain->dev_list, list)
2359 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002360}
2361
2362static void update_domain(struct protection_domain *domain)
2363{
2364 if (!domain->updated)
2365 return;
2366
2367 update_device_table(domain);
Joerg Roedel17b124b2011-04-06 18:01:35 +02002368
2369 domain_flush_devices(domain);
2370 domain_flush_tlb_pde(domain);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002371
2372 domain->updated = false;
2373}
2374
Joerg Roedel431b2a22008-07-11 17:14:22 +02002375/*
Joerg Roedel8bda3092009-05-12 12:02:46 +02002376 * This function fetches the PTE for a given address in the aperture
2377 */
2378static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
2379 unsigned long address)
2380{
Joerg Roedel384de722009-05-15 12:30:05 +02002381 struct aperture_range *aperture;
Joerg Roedel8bda3092009-05-12 12:02:46 +02002382 u64 *pte, *pte_page;
2383
Joerg Roedel384de722009-05-15 12:30:05 +02002384 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2385 if (!aperture)
2386 return NULL;
2387
2388 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
Joerg Roedel8bda3092009-05-12 12:02:46 +02002389 if (!pte) {
Joerg Roedelcbb9d722010-01-15 14:41:15 +01002390 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02002391 GFP_ATOMIC);
Joerg Roedel384de722009-05-15 12:30:05 +02002392 aperture->pte_pages[APERTURE_PAGE_INDEX(address)] = pte_page;
2393 } else
Joerg Roedel8c8c1432009-09-02 17:30:00 +02002394 pte += PM_LEVEL_INDEX(0, address);
Joerg Roedel8bda3092009-05-12 12:02:46 +02002395
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002396 update_domain(&dom->domain);
Joerg Roedel8bda3092009-05-12 12:02:46 +02002397
2398 return pte;
2399}
2400
2401/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02002402 * This is the generic map function. It maps one 4kb page at paddr to
2403 * the given address in the DMA address space for the domain.
2404 */
Joerg Roedel680525e2009-11-23 18:44:42 +01002405static dma_addr_t dma_ops_domain_map(struct dma_ops_domain *dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002406 unsigned long address,
2407 phys_addr_t paddr,
2408 int direction)
2409{
2410 u64 *pte, __pte;
2411
2412 WARN_ON(address > dom->aperture_size);
2413
2414 paddr &= PAGE_MASK;
2415
Joerg Roedel8bda3092009-05-12 12:02:46 +02002416 pte = dma_ops_get_pte(dom, address);
Joerg Roedel53812c12009-05-12 12:17:38 +02002417 if (!pte)
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002418 return DMA_ERROR_CODE;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002419
2420 __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC;
2421
2422 if (direction == DMA_TO_DEVICE)
2423 __pte |= IOMMU_PTE_IR;
2424 else if (direction == DMA_FROM_DEVICE)
2425 __pte |= IOMMU_PTE_IW;
2426 else if (direction == DMA_BIDIRECTIONAL)
2427 __pte |= IOMMU_PTE_IR | IOMMU_PTE_IW;
2428
2429 WARN_ON(*pte);
2430
2431 *pte = __pte;
2432
2433 return (dma_addr_t)address;
2434}
2435
Joerg Roedel431b2a22008-07-11 17:14:22 +02002436/*
2437 * The generic unmapping function for on page in the DMA address space.
2438 */
Joerg Roedel680525e2009-11-23 18:44:42 +01002439static void dma_ops_domain_unmap(struct dma_ops_domain *dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002440 unsigned long address)
2441{
Joerg Roedel384de722009-05-15 12:30:05 +02002442 struct aperture_range *aperture;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002443 u64 *pte;
2444
2445 if (address >= dom->aperture_size)
2446 return;
2447
Joerg Roedel384de722009-05-15 12:30:05 +02002448 aperture = dom->aperture[APERTURE_RANGE_INDEX(address)];
2449 if (!aperture)
2450 return;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002451
Joerg Roedel384de722009-05-15 12:30:05 +02002452 pte = aperture->pte_pages[APERTURE_PAGE_INDEX(address)];
2453 if (!pte)
2454 return;
2455
Joerg Roedel8c8c1432009-09-02 17:30:00 +02002456 pte += PM_LEVEL_INDEX(0, address);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002457
2458 WARN_ON(!*pte);
2459
2460 *pte = 0ULL;
2461}
2462
Joerg Roedel431b2a22008-07-11 17:14:22 +02002463/*
2464 * This function contains common code for mapping of a physically
Joerg Roedel24f81162008-12-08 14:25:39 +01002465 * contiguous memory region into DMA address space. It is used by all
2466 * mapping functions provided with this IOMMU driver.
Joerg Roedel431b2a22008-07-11 17:14:22 +02002467 * Must be called with the domain lock held.
2468 */
Joerg Roedelcb76c322008-06-26 21:28:00 +02002469static dma_addr_t __map_single(struct device *dev,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002470 struct dma_ops_domain *dma_dom,
2471 phys_addr_t paddr,
2472 size_t size,
Joerg Roedel6d4f3432008-09-04 19:18:02 +02002473 int dir,
Joerg Roedel832a90c2008-09-18 15:54:23 +02002474 bool align,
2475 u64 dma_mask)
Joerg Roedelcb76c322008-06-26 21:28:00 +02002476{
2477 dma_addr_t offset = paddr & ~PAGE_MASK;
Joerg Roedel53812c12009-05-12 12:17:38 +02002478 dma_addr_t address, start, ret;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002479 unsigned int pages;
Joerg Roedel6d4f3432008-09-04 19:18:02 +02002480 unsigned long align_mask = 0;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002481 int i;
2482
Joerg Roedele3c449f2008-10-15 22:02:11 -07002483 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002484 paddr &= PAGE_MASK;
2485
Joerg Roedel8ecaf8f2008-12-12 16:13:04 +01002486 INC_STATS_COUNTER(total_map_requests);
2487
Joerg Roedelc1858972008-12-12 15:42:39 +01002488 if (pages > 1)
2489 INC_STATS_COUNTER(cross_page);
2490
Joerg Roedel6d4f3432008-09-04 19:18:02 +02002491 if (align)
2492 align_mask = (1UL << get_order(size)) - 1;
2493
Joerg Roedel11b83882009-05-19 10:23:15 +02002494retry:
Joerg Roedel832a90c2008-09-18 15:54:23 +02002495 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
2496 dma_mask);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002497 if (unlikely(address == DMA_ERROR_CODE)) {
Joerg Roedel11b83882009-05-19 10:23:15 +02002498 /*
2499 * setting next_address here will let the address
2500 * allocator only scan the new allocated range in the
2501 * first run. This is a small optimization.
2502 */
2503 dma_dom->next_address = dma_dom->aperture_size;
2504
Joerg Roedel576175c2009-11-23 19:08:46 +01002505 if (alloc_new_range(dma_dom, false, GFP_ATOMIC))
Joerg Roedel11b83882009-05-19 10:23:15 +02002506 goto out;
2507
2508 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002509 * aperture was successfully enlarged by 128 MB, try
Joerg Roedel11b83882009-05-19 10:23:15 +02002510 * allocation again
2511 */
2512 goto retry;
2513 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02002514
2515 start = address;
2516 for (i = 0; i < pages; ++i) {
Joerg Roedel680525e2009-11-23 18:44:42 +01002517 ret = dma_ops_domain_map(dma_dom, start, paddr, dir);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002518 if (ret == DMA_ERROR_CODE)
Joerg Roedel53812c12009-05-12 12:17:38 +02002519 goto out_unmap;
2520
Joerg Roedelcb76c322008-06-26 21:28:00 +02002521 paddr += PAGE_SIZE;
2522 start += PAGE_SIZE;
2523 }
2524 address += offset;
2525
Joerg Roedel5774f7c2008-12-12 15:57:30 +01002526 ADD_STATS_COUNTER(alloced_io_mem, size);
2527
FUJITA Tomonoriafa9fdc2008-09-20 01:23:30 +09002528 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
Joerg Roedel17b124b2011-04-06 18:01:35 +02002529 domain_flush_tlb(&dma_dom->domain);
Joerg Roedel1c655772008-09-04 18:40:05 +02002530 dma_dom->need_flush = false;
Joerg Roedel318afd42009-11-23 18:32:38 +01002531 } else if (unlikely(amd_iommu_np_cache))
Joerg Roedel17b124b2011-04-06 18:01:35 +02002532 domain_flush_pages(&dma_dom->domain, address, size);
Joerg Roedel270cab242008-09-04 15:49:46 +02002533
Joerg Roedelcb76c322008-06-26 21:28:00 +02002534out:
2535 return address;
Joerg Roedel53812c12009-05-12 12:17:38 +02002536
2537out_unmap:
2538
2539 for (--i; i >= 0; --i) {
2540 start -= PAGE_SIZE;
Joerg Roedel680525e2009-11-23 18:44:42 +01002541 dma_ops_domain_unmap(dma_dom, start);
Joerg Roedel53812c12009-05-12 12:17:38 +02002542 }
2543
2544 dma_ops_free_addresses(dma_dom, address, pages);
2545
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002546 return DMA_ERROR_CODE;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002547}
2548
Joerg Roedel431b2a22008-07-11 17:14:22 +02002549/*
2550 * Does the reverse of the __map_single function. Must be called with
2551 * the domain lock held too
2552 */
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002553static void __unmap_single(struct dma_ops_domain *dma_dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002554 dma_addr_t dma_addr,
2555 size_t size,
2556 int dir)
2557{
Joerg Roedel04e04632010-09-23 16:12:48 +02002558 dma_addr_t flush_addr;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002559 dma_addr_t i, start;
2560 unsigned int pages;
2561
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002562 if ((dma_addr == DMA_ERROR_CODE) ||
Joerg Roedelb8d99052008-12-08 14:40:26 +01002563 (dma_addr + size > dma_dom->aperture_size))
Joerg Roedelcb76c322008-06-26 21:28:00 +02002564 return;
2565
Joerg Roedel04e04632010-09-23 16:12:48 +02002566 flush_addr = dma_addr;
Joerg Roedele3c449f2008-10-15 22:02:11 -07002567 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002568 dma_addr &= PAGE_MASK;
2569 start = dma_addr;
2570
2571 for (i = 0; i < pages; ++i) {
Joerg Roedel680525e2009-11-23 18:44:42 +01002572 dma_ops_domain_unmap(dma_dom, start);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002573 start += PAGE_SIZE;
2574 }
2575
Joerg Roedel5774f7c2008-12-12 15:57:30 +01002576 SUB_STATS_COUNTER(alloced_io_mem, size);
2577
Joerg Roedelcb76c322008-06-26 21:28:00 +02002578 dma_ops_free_addresses(dma_dom, dma_addr, pages);
Joerg Roedel270cab242008-09-04 15:49:46 +02002579
Joerg Roedel80be3082008-11-06 14:59:05 +01002580 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
Joerg Roedel17b124b2011-04-06 18:01:35 +02002581 domain_flush_pages(&dma_dom->domain, flush_addr, size);
Joerg Roedel80be3082008-11-06 14:59:05 +01002582 dma_dom->need_flush = false;
2583 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02002584}
2585
Joerg Roedel431b2a22008-07-11 17:14:22 +02002586/*
2587 * The exported map_single function for dma_ops.
2588 */
FUJITA Tomonori51491362009-01-05 23:47:25 +09002589static dma_addr_t map_page(struct device *dev, struct page *page,
2590 unsigned long offset, size_t size,
2591 enum dma_data_direction dir,
2592 struct dma_attrs *attrs)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002593{
2594 unsigned long flags;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002595 struct protection_domain *domain;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002596 dma_addr_t addr;
Joerg Roedel832a90c2008-09-18 15:54:23 +02002597 u64 dma_mask;
FUJITA Tomonori51491362009-01-05 23:47:25 +09002598 phys_addr_t paddr = page_to_phys(page) + offset;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002599
Joerg Roedel0f2a86f2008-12-12 15:05:16 +01002600 INC_STATS_COUNTER(cnt_map_single);
2601
Joerg Roedel94f6d192009-11-24 16:40:02 +01002602 domain = get_domain(dev);
2603 if (PTR_ERR(domain) == -EINVAL)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002604 return (dma_addr_t)paddr;
Joerg Roedel94f6d192009-11-24 16:40:02 +01002605 else if (IS_ERR(domain))
2606 return DMA_ERROR_CODE;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002607
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002608 dma_mask = *dev->dma_mask;
2609
Joerg Roedel4da70b92008-06-26 21:28:01 +02002610 spin_lock_irqsave(&domain->lock, flags);
Joerg Roedel94f6d192009-11-24 16:40:02 +01002611
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002612 addr = __map_single(dev, domain->priv, paddr, size, dir, false,
Joerg Roedel832a90c2008-09-18 15:54:23 +02002613 dma_mask);
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002614 if (addr == DMA_ERROR_CODE)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002615 goto out;
2616
Joerg Roedel17b124b2011-04-06 18:01:35 +02002617 domain_flush_complete(domain);
Joerg Roedel4da70b92008-06-26 21:28:01 +02002618
2619out:
2620 spin_unlock_irqrestore(&domain->lock, flags);
2621
2622 return addr;
2623}
2624
Joerg Roedel431b2a22008-07-11 17:14:22 +02002625/*
2626 * The exported unmap_single function for dma_ops.
2627 */
FUJITA Tomonori51491362009-01-05 23:47:25 +09002628static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2629 enum dma_data_direction dir, struct dma_attrs *attrs)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002630{
2631 unsigned long flags;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002632 struct protection_domain *domain;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002633
Joerg Roedel146a6912008-12-12 15:07:12 +01002634 INC_STATS_COUNTER(cnt_unmap_single);
2635
Joerg Roedel94f6d192009-11-24 16:40:02 +01002636 domain = get_domain(dev);
2637 if (IS_ERR(domain))
Joerg Roedel5b28df62008-12-02 17:49:42 +01002638 return;
2639
Joerg Roedel4da70b92008-06-26 21:28:01 +02002640 spin_lock_irqsave(&domain->lock, flags);
2641
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002642 __unmap_single(domain->priv, dma_addr, size, dir);
Joerg Roedel4da70b92008-06-26 21:28:01 +02002643
Joerg Roedel17b124b2011-04-06 18:01:35 +02002644 domain_flush_complete(domain);
Joerg Roedel4da70b92008-06-26 21:28:01 +02002645
2646 spin_unlock_irqrestore(&domain->lock, flags);
2647}
2648
Joerg Roedel431b2a22008-07-11 17:14:22 +02002649/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02002650 * The exported map_sg function for dma_ops (handles scatter-gather
2651 * lists).
2652 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02002653static int map_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002654 int nelems, enum dma_data_direction dir,
2655 struct dma_attrs *attrs)
Joerg Roedel65b050a2008-06-26 21:28:02 +02002656{
2657 unsigned long flags;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002658 struct protection_domain *domain;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002659 int i;
2660 struct scatterlist *s;
2661 phys_addr_t paddr;
2662 int mapped_elems = 0;
Joerg Roedel832a90c2008-09-18 15:54:23 +02002663 u64 dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002664
Joerg Roedeld03f067a2008-12-12 15:09:48 +01002665 INC_STATS_COUNTER(cnt_map_sg);
2666
Joerg Roedel94f6d192009-11-24 16:40:02 +01002667 domain = get_domain(dev);
Joerg Roedela0e191b2013-04-09 15:04:36 +02002668 if (IS_ERR(domain))
Joerg Roedel94f6d192009-11-24 16:40:02 +01002669 return 0;
Joerg Roedeldbcc1122008-09-04 15:04:26 +02002670
Joerg Roedel832a90c2008-09-18 15:54:23 +02002671 dma_mask = *dev->dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002672
Joerg Roedel65b050a2008-06-26 21:28:02 +02002673 spin_lock_irqsave(&domain->lock, flags);
2674
2675 for_each_sg(sglist, s, nelems, i) {
2676 paddr = sg_phys(s);
2677
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002678 s->dma_address = __map_single(dev, domain->priv,
Joerg Roedel832a90c2008-09-18 15:54:23 +02002679 paddr, s->length, dir, false,
2680 dma_mask);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002681
2682 if (s->dma_address) {
2683 s->dma_length = s->length;
2684 mapped_elems++;
2685 } else
2686 goto unmap;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002687 }
2688
Joerg Roedel17b124b2011-04-06 18:01:35 +02002689 domain_flush_complete(domain);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002690
2691out:
2692 spin_unlock_irqrestore(&domain->lock, flags);
2693
2694 return mapped_elems;
2695unmap:
2696 for_each_sg(sglist, s, mapped_elems, i) {
2697 if (s->dma_address)
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002698 __unmap_single(domain->priv, s->dma_address,
Joerg Roedel65b050a2008-06-26 21:28:02 +02002699 s->dma_length, dir);
2700 s->dma_address = s->dma_length = 0;
2701 }
2702
2703 mapped_elems = 0;
2704
2705 goto out;
2706}
2707
Joerg Roedel431b2a22008-07-11 17:14:22 +02002708/*
2709 * The exported map_sg function for dma_ops (handles scatter-gather
2710 * lists).
2711 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02002712static void unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002713 int nelems, enum dma_data_direction dir,
2714 struct dma_attrs *attrs)
Joerg Roedel65b050a2008-06-26 21:28:02 +02002715{
2716 unsigned long flags;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002717 struct protection_domain *domain;
2718 struct scatterlist *s;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002719 int i;
2720
Joerg Roedel55877a62008-12-12 15:12:14 +01002721 INC_STATS_COUNTER(cnt_unmap_sg);
2722
Joerg Roedel94f6d192009-11-24 16:40:02 +01002723 domain = get_domain(dev);
2724 if (IS_ERR(domain))
Joerg Roedel5b28df62008-12-02 17:49:42 +01002725 return;
2726
Joerg Roedel65b050a2008-06-26 21:28:02 +02002727 spin_lock_irqsave(&domain->lock, flags);
2728
2729 for_each_sg(sglist, s, nelems, i) {
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002730 __unmap_single(domain->priv, s->dma_address,
Joerg Roedel65b050a2008-06-26 21:28:02 +02002731 s->dma_length, dir);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002732 s->dma_address = s->dma_length = 0;
2733 }
2734
Joerg Roedel17b124b2011-04-06 18:01:35 +02002735 domain_flush_complete(domain);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002736
2737 spin_unlock_irqrestore(&domain->lock, flags);
2738}
2739
Joerg Roedel431b2a22008-07-11 17:14:22 +02002740/*
2741 * The exported alloc_coherent function for dma_ops.
2742 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002743static void *alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002744 dma_addr_t *dma_addr, gfp_t flag,
2745 struct dma_attrs *attrs)
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002746{
Joerg Roedel832a90c2008-09-18 15:54:23 +02002747 u64 dma_mask = dev->coherent_dma_mask;
Joerg Roedel3b839a52015-04-01 14:58:47 +02002748 struct protection_domain *domain;
2749 unsigned long flags;
2750 struct page *page;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002751
Joerg Roedelc8f0fb32008-12-12 15:14:21 +01002752 INC_STATS_COUNTER(cnt_alloc_coherent);
2753
Joerg Roedel94f6d192009-11-24 16:40:02 +01002754 domain = get_domain(dev);
2755 if (PTR_ERR(domain) == -EINVAL) {
Joerg Roedel3b839a52015-04-01 14:58:47 +02002756 page = alloc_pages(flag, get_order(size));
2757 *dma_addr = page_to_phys(page);
2758 return page_address(page);
Joerg Roedel94f6d192009-11-24 16:40:02 +01002759 } else if (IS_ERR(domain))
2760 return NULL;
Joerg Roedeldbcc1122008-09-04 15:04:26 +02002761
Joerg Roedel3b839a52015-04-01 14:58:47 +02002762 size = PAGE_ALIGN(size);
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002763 dma_mask = dev->coherent_dma_mask;
2764 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
Joerg Roedel2d0ec7a2015-06-01 17:30:57 +02002765 flag |= __GFP_ZERO;
FUJITA Tomonori13d9fea2008-09-10 20:19:40 +09002766
Joerg Roedel3b839a52015-04-01 14:58:47 +02002767 page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
2768 if (!page) {
2769 if (!(flag & __GFP_WAIT))
2770 return NULL;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002771
Joerg Roedel3b839a52015-04-01 14:58:47 +02002772 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
2773 get_order(size));
2774 if (!page)
2775 return NULL;
2776 }
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002777
Joerg Roedel832a90c2008-09-18 15:54:23 +02002778 if (!dma_mask)
2779 dma_mask = *dev->dma_mask;
2780
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002781 spin_lock_irqsave(&domain->lock, flags);
2782
Joerg Roedel3b839a52015-04-01 14:58:47 +02002783 *dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
Joerg Roedel832a90c2008-09-18 15:54:23 +02002784 size, DMA_BIDIRECTIONAL, true, dma_mask);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002785
FUJITA Tomonori8fd524b2009-11-15 21:19:53 +09002786 if (*dma_addr == DMA_ERROR_CODE) {
Jiri Slaby367d04c2009-05-28 09:54:48 +02002787 spin_unlock_irqrestore(&domain->lock, flags);
Joerg Roedel5b28df62008-12-02 17:49:42 +01002788 goto out_free;
Jiri Slaby367d04c2009-05-28 09:54:48 +02002789 }
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002790
Joerg Roedel17b124b2011-04-06 18:01:35 +02002791 domain_flush_complete(domain);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002792
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002793 spin_unlock_irqrestore(&domain->lock, flags);
2794
Joerg Roedel3b839a52015-04-01 14:58:47 +02002795 return page_address(page);
Joerg Roedel5b28df62008-12-02 17:49:42 +01002796
2797out_free:
2798
Joerg Roedel3b839a52015-04-01 14:58:47 +02002799 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2800 __free_pages(page, get_order(size));
Joerg Roedel5b28df62008-12-02 17:49:42 +01002801
2802 return NULL;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002803}
2804
Joerg Roedel431b2a22008-07-11 17:14:22 +02002805/*
2806 * The exported free_coherent function for dma_ops.
Joerg Roedel431b2a22008-07-11 17:14:22 +02002807 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002808static void free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002809 void *virt_addr, dma_addr_t dma_addr,
2810 struct dma_attrs *attrs)
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002811{
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002812 struct protection_domain *domain;
Joerg Roedel3b839a52015-04-01 14:58:47 +02002813 unsigned long flags;
2814 struct page *page;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002815
Joerg Roedel5d31ee72008-12-12 15:16:38 +01002816 INC_STATS_COUNTER(cnt_free_coherent);
2817
Joerg Roedel3b839a52015-04-01 14:58:47 +02002818 page = virt_to_page(virt_addr);
2819 size = PAGE_ALIGN(size);
2820
Joerg Roedel94f6d192009-11-24 16:40:02 +01002821 domain = get_domain(dev);
2822 if (IS_ERR(domain))
Joerg Roedel5b28df62008-12-02 17:49:42 +01002823 goto free_mem;
2824
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002825 spin_lock_irqsave(&domain->lock, flags);
2826
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002827 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002828
Joerg Roedel17b124b2011-04-06 18:01:35 +02002829 domain_flush_complete(domain);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002830
2831 spin_unlock_irqrestore(&domain->lock, flags);
2832
2833free_mem:
Joerg Roedel3b839a52015-04-01 14:58:47 +02002834 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2835 __free_pages(page, get_order(size));
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002836}
2837
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002838/*
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002839 * This function is called by the DMA layer to find out if we can handle a
2840 * particular device. It is part of the dma_ops.
2841 */
2842static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2843{
Joerg Roedel420aef82009-11-23 16:14:57 +01002844 return check_device(dev);
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002845}
2846
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002847static struct dma_map_ops amd_iommu_dma_ops = {
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002848 .alloc = alloc_coherent,
2849 .free = free_coherent,
FUJITA Tomonori51491362009-01-05 23:47:25 +09002850 .map_page = map_page,
2851 .unmap_page = unmap_page,
Joerg Roedel6631ee92008-06-26 21:28:05 +02002852 .map_sg = map_sg,
2853 .unmap_sg = unmap_sg,
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002854 .dma_supported = amd_iommu_dma_supported,
Joerg Roedel6631ee92008-06-26 21:28:05 +02002855};
2856
Joerg Roedel3a18404c2015-05-28 18:41:45 +02002857int __init amd_iommu_init_api(void)
Joerg Roedel27c21272011-05-30 15:56:24 +02002858{
Joerg Roedel3a18404c2015-05-28 18:41:45 +02002859 return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
Joerg Roedelf5325092010-01-22 17:44:35 +01002860}
2861
Joerg Roedel6631ee92008-06-26 21:28:05 +02002862int __init amd_iommu_init_dma_ops(void)
2863{
Joerg Roedel6631ee92008-06-26 21:28:05 +02002864 iommu_detected = 1;
FUJITA Tomonori75f1cdf2009-11-10 19:46:20 +09002865 swiotlb = 0;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002866
Joerg Roedel7f265082008-12-12 13:50:21 +01002867 amd_iommu_stats_init();
2868
Joerg Roedel62410ee2012-06-12 16:42:43 +02002869 if (amd_iommu_unmap_flush)
2870 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
2871 else
2872 pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
2873
Joerg Roedel6631ee92008-06-26 21:28:05 +02002874 return 0;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002875}
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002876
2877/*****************************************************************************
2878 *
2879 * The following functions belong to the exported interface of AMD IOMMU
2880 *
2881 * This interface allows access to lower level functions of the IOMMU
2882 * like protection domain handling and assignement of devices to domains
2883 * which is not possible with the dma_ops interface.
2884 *
2885 *****************************************************************************/
2886
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002887static void cleanup_domain(struct protection_domain *domain)
2888{
Joerg Roedel9b29d3c2014-08-05 17:50:15 +02002889 struct iommu_dev_data *entry;
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002890 unsigned long flags;
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002891
2892 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2893
Joerg Roedel9b29d3c2014-08-05 17:50:15 +02002894 while (!list_empty(&domain->dev_list)) {
2895 entry = list_first_entry(&domain->dev_list,
2896 struct iommu_dev_data, list);
2897 __detach_device(entry);
Joerg Roedel492667d2009-11-27 13:25:47 +01002898 }
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002899
2900 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2901}
2902
Joerg Roedel26508152009-08-26 16:52:40 +02002903static void protection_domain_free(struct protection_domain *domain)
2904{
2905 if (!domain)
2906 return;
2907
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002908 del_domain_from_list(domain);
2909
Joerg Roedel26508152009-08-26 16:52:40 +02002910 if (domain->id)
2911 domain_id_free(domain->id);
2912
2913 kfree(domain);
2914}
2915
Joerg Roedel7a5a5662015-06-30 08:56:11 +02002916static int protection_domain_init(struct protection_domain *domain)
2917{
2918 spin_lock_init(&domain->lock);
2919 mutex_init(&domain->api_lock);
2920 domain->id = domain_id_alloc();
2921 if (!domain->id)
2922 return -ENOMEM;
2923 INIT_LIST_HEAD(&domain->dev_list);
2924
2925 return 0;
2926}
2927
Joerg Roedel26508152009-08-26 16:52:40 +02002928static struct protection_domain *protection_domain_alloc(void)
Joerg Roedelc156e342008-12-02 18:13:27 +01002929{
2930 struct protection_domain *domain;
2931
2932 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2933 if (!domain)
Joerg Roedel26508152009-08-26 16:52:40 +02002934 return NULL;
Joerg Roedelc156e342008-12-02 18:13:27 +01002935
Joerg Roedel7a5a5662015-06-30 08:56:11 +02002936 if (protection_domain_init(domain))
Joerg Roedel26508152009-08-26 16:52:40 +02002937 goto out_err;
2938
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002939 add_domain_to_list(domain);
2940
Joerg Roedel26508152009-08-26 16:52:40 +02002941 return domain;
2942
2943out_err:
2944 kfree(domain);
2945
2946 return NULL;
2947}
2948
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002949static int alloc_passthrough_domain(void)
Joerg Roedel5abcdba2011-12-01 15:49:45 +01002950{
2951 if (pt_domain != NULL)
2952 return 0;
2953
2954 /* allocate passthrough domain */
2955 pt_domain = protection_domain_alloc();
2956 if (!pt_domain)
2957 return -ENOMEM;
2958
2959 pt_domain->mode = PAGE_MODE_NONE;
2960
2961 return 0;
2962}
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002963
2964static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2965{
2966 struct protection_domain *pdomain;
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002967 struct dma_ops_domain *dma_domain;
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002968
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002969 switch (type) {
2970 case IOMMU_DOMAIN_UNMANAGED:
2971 pdomain = protection_domain_alloc();
2972 if (!pdomain)
2973 return NULL;
2974
2975 pdomain->mode = PAGE_MODE_3_LEVEL;
2976 pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2977 if (!pdomain->pt_root) {
2978 protection_domain_free(pdomain);
2979 return NULL;
2980 }
2981
2982 pdomain->domain.geometry.aperture_start = 0;
2983 pdomain->domain.geometry.aperture_end = ~0ULL;
2984 pdomain->domain.geometry.force_aperture = true;
2985
2986 break;
2987 case IOMMU_DOMAIN_DMA:
2988 dma_domain = dma_ops_domain_alloc();
2989 if (!dma_domain) {
2990 pr_err("AMD-Vi: Failed to allocate\n");
2991 return NULL;
2992 }
2993 pdomain = &dma_domain->domain;
2994 break;
Joerg Roedel07f643a2015-05-28 18:41:41 +02002995 case IOMMU_DOMAIN_IDENTITY:
2996 pdomain = protection_domain_alloc();
2997 if (!pdomain)
2998 return NULL;
2999
3000 pdomain->mode = PAGE_MODE_NONE;
3001 break;
Joerg Roedel0bb6e242015-05-28 18:41:40 +02003002 default:
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003003 return NULL;
Joerg Roedel0bb6e242015-05-28 18:41:40 +02003004 }
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003005
3006 return &pdomain->domain;
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003007}
3008
3009static void amd_iommu_domain_free(struct iommu_domain *dom)
Joerg Roedel26508152009-08-26 16:52:40 +02003010{
3011 struct protection_domain *domain;
3012
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003013 if (!dom)
Joerg Roedel98383fc2008-12-02 18:34:12 +01003014 return;
3015
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003016 domain = to_pdomain(dom);
3017
Joerg Roedel98383fc2008-12-02 18:34:12 +01003018 if (domain->dev_cnt > 0)
3019 cleanup_domain(domain);
3020
3021 BUG_ON(domain->dev_cnt != 0);
3022
Joerg Roedel132bd682011-11-17 14:18:46 +01003023 if (domain->mode != PAGE_MODE_NONE)
3024 free_pagetable(domain);
Joerg Roedel98383fc2008-12-02 18:34:12 +01003025
Joerg Roedel52815b72011-11-17 17:24:28 +01003026 if (domain->flags & PD_IOMMUV2_MASK)
3027 free_gcr3_table(domain);
3028
Joerg Roedel8b408fe2010-03-08 14:20:07 +01003029 protection_domain_free(domain);
Joerg Roedel98383fc2008-12-02 18:34:12 +01003030}
3031
Joerg Roedel684f2882008-12-08 12:07:44 +01003032static void amd_iommu_detach_device(struct iommu_domain *dom,
3033 struct device *dev)
3034{
Joerg Roedel657cbb62009-11-23 15:26:46 +01003035 struct iommu_dev_data *dev_data = dev->archdata.iommu;
Joerg Roedel684f2882008-12-08 12:07:44 +01003036 struct amd_iommu *iommu;
Joerg Roedel684f2882008-12-08 12:07:44 +01003037 u16 devid;
3038
Joerg Roedel98fc5a62009-11-24 17:19:23 +01003039 if (!check_device(dev))
Joerg Roedel684f2882008-12-08 12:07:44 +01003040 return;
3041
Joerg Roedel98fc5a62009-11-24 17:19:23 +01003042 devid = get_device_id(dev);
Joerg Roedel684f2882008-12-08 12:07:44 +01003043
Joerg Roedel657cbb62009-11-23 15:26:46 +01003044 if (dev_data->domain != NULL)
Joerg Roedel15898bb2009-11-24 15:39:42 +01003045 detach_device(dev);
Joerg Roedel684f2882008-12-08 12:07:44 +01003046
3047 iommu = amd_iommu_rlookup_table[devid];
3048 if (!iommu)
3049 return;
3050
Joerg Roedel684f2882008-12-08 12:07:44 +01003051 iommu_completion_wait(iommu);
3052}
3053
Joerg Roedel01106062008-12-02 19:34:11 +01003054static int amd_iommu_attach_device(struct iommu_domain *dom,
3055 struct device *dev)
3056{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003057 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel657cbb62009-11-23 15:26:46 +01003058 struct iommu_dev_data *dev_data;
Joerg Roedel01106062008-12-02 19:34:11 +01003059 struct amd_iommu *iommu;
Joerg Roedel15898bb2009-11-24 15:39:42 +01003060 int ret;
Joerg Roedel01106062008-12-02 19:34:11 +01003061
Joerg Roedel98fc5a62009-11-24 17:19:23 +01003062 if (!check_device(dev))
Joerg Roedel01106062008-12-02 19:34:11 +01003063 return -EINVAL;
3064
Joerg Roedel657cbb62009-11-23 15:26:46 +01003065 dev_data = dev->archdata.iommu;
3066
Joerg Roedelf62dda62011-06-09 12:55:35 +02003067 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel01106062008-12-02 19:34:11 +01003068 if (!iommu)
3069 return -EINVAL;
3070
Joerg Roedel657cbb62009-11-23 15:26:46 +01003071 if (dev_data->domain)
Joerg Roedel15898bb2009-11-24 15:39:42 +01003072 detach_device(dev);
Joerg Roedel01106062008-12-02 19:34:11 +01003073
Joerg Roedel15898bb2009-11-24 15:39:42 +01003074 ret = attach_device(dev, domain);
Joerg Roedel01106062008-12-02 19:34:11 +01003075
3076 iommu_completion_wait(iommu);
3077
Joerg Roedel15898bb2009-11-24 15:39:42 +01003078 return ret;
Joerg Roedel01106062008-12-02 19:34:11 +01003079}
3080
Joerg Roedel468e2362010-01-21 16:37:36 +01003081static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003082 phys_addr_t paddr, size_t page_size, int iommu_prot)
Joerg Roedelc6229ca2008-12-02 19:48:43 +01003083{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003084 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelc6229ca2008-12-02 19:48:43 +01003085 int prot = 0;
3086 int ret;
3087
Joerg Roedel132bd682011-11-17 14:18:46 +01003088 if (domain->mode == PAGE_MODE_NONE)
3089 return -EINVAL;
3090
Joerg Roedelc6229ca2008-12-02 19:48:43 +01003091 if (iommu_prot & IOMMU_READ)
3092 prot |= IOMMU_PROT_IR;
3093 if (iommu_prot & IOMMU_WRITE)
3094 prot |= IOMMU_PROT_IW;
3095
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003096 mutex_lock(&domain->api_lock);
Joerg Roedel795e74f2010-05-11 17:40:57 +02003097 ret = iommu_map_page(domain, iova, paddr, prot, page_size);
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003098 mutex_unlock(&domain->api_lock);
3099
Joerg Roedel795e74f2010-05-11 17:40:57 +02003100 return ret;
Joerg Roedelc6229ca2008-12-02 19:48:43 +01003101}
3102
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003103static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3104 size_t page_size)
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003105{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003106 struct protection_domain *domain = to_pdomain(dom);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003107 size_t unmap_size;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003108
Joerg Roedel132bd682011-11-17 14:18:46 +01003109 if (domain->mode == PAGE_MODE_NONE)
3110 return -EINVAL;
3111
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003112 mutex_lock(&domain->api_lock);
Joerg Roedel468e2362010-01-21 16:37:36 +01003113 unmap_size = iommu_unmap_page(domain, iova, page_size);
Joerg Roedel795e74f2010-05-11 17:40:57 +02003114 mutex_unlock(&domain->api_lock);
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003115
Joerg Roedel17b124b2011-04-06 18:01:35 +02003116 domain_flush_tlb_pde(domain);
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003117
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003118 return unmap_size;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003119}
3120
Joerg Roedel645c4c82008-12-02 20:05:50 +01003121static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
Varun Sethibb5547ac2013-03-29 01:23:58 +05303122 dma_addr_t iova)
Joerg Roedel645c4c82008-12-02 20:05:50 +01003123{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003124 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel3039ca12015-04-01 14:58:48 +02003125 unsigned long offset_mask, pte_pgsize;
Joerg Roedelf03152b2010-01-21 16:15:24 +01003126 u64 *pte, __pte;
Joerg Roedel645c4c82008-12-02 20:05:50 +01003127
Joerg Roedel132bd682011-11-17 14:18:46 +01003128 if (domain->mode == PAGE_MODE_NONE)
3129 return iova;
3130
Joerg Roedel3039ca12015-04-01 14:58:48 +02003131 pte = fetch_pte(domain, iova, &pte_pgsize);
Joerg Roedel645c4c82008-12-02 20:05:50 +01003132
Joerg Roedela6d41a42009-09-02 17:08:55 +02003133 if (!pte || !IOMMU_PTE_PRESENT(*pte))
Joerg Roedel645c4c82008-12-02 20:05:50 +01003134 return 0;
3135
Joerg Roedelb24b1b62015-04-01 14:58:51 +02003136 offset_mask = pte_pgsize - 1;
3137 __pte = *pte & PM_ADDR_MASK;
Joerg Roedelf03152b2010-01-21 16:15:24 +01003138
Joerg Roedelb24b1b62015-04-01 14:58:51 +02003139 return (__pte & ~offset_mask) | (iova & offset_mask);
Joerg Roedel645c4c82008-12-02 20:05:50 +01003140}
3141
Joerg Roedelab636482014-09-05 10:48:21 +02003142static bool amd_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003143{
Joerg Roedel80a506b2010-07-27 17:14:24 +02003144 switch (cap) {
3145 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedelab636482014-09-05 10:48:21 +02003146 return true;
Joerg Roedelbdddadc2012-07-02 18:38:13 +02003147 case IOMMU_CAP_INTR_REMAP:
Joerg Roedelab636482014-09-05 10:48:21 +02003148 return (irq_remapping_enabled == 1);
Will Deaconcfdeec22014-10-27 11:24:48 +00003149 case IOMMU_CAP_NOEXEC:
3150 return false;
Joerg Roedel80a506b2010-07-27 17:14:24 +02003151 }
3152
Joerg Roedelab636482014-09-05 10:48:21 +02003153 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003154}
3155
Joerg Roedel35cf2482015-05-28 18:41:37 +02003156static void amd_iommu_get_dm_regions(struct device *dev,
3157 struct list_head *head)
3158{
3159 struct unity_map_entry *entry;
3160 u16 devid;
3161
3162 devid = get_device_id(dev);
3163
3164 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3165 struct iommu_dm_region *region;
3166
3167 if (devid < entry->devid_start || devid > entry->devid_end)
3168 continue;
3169
3170 region = kzalloc(sizeof(*region), GFP_KERNEL);
3171 if (!region) {
3172 pr_err("Out of memory allocating dm-regions for %s\n",
3173 dev_name(dev));
3174 return;
3175 }
3176
3177 region->start = entry->address_start;
3178 region->length = entry->address_end - entry->address_start;
3179 if (entry->prot & IOMMU_PROT_IR)
3180 region->prot |= IOMMU_READ;
3181 if (entry->prot & IOMMU_PROT_IW)
3182 region->prot |= IOMMU_WRITE;
3183
3184 list_add_tail(&region->list, head);
3185 }
3186}
3187
3188static void amd_iommu_put_dm_regions(struct device *dev,
3189 struct list_head *head)
3190{
3191 struct iommu_dm_region *entry, *next;
3192
3193 list_for_each_entry_safe(entry, next, head, list)
3194 kfree(entry);
3195}
3196
Thierry Redingb22f6432014-06-27 09:03:12 +02003197static const struct iommu_ops amd_iommu_ops = {
Joerg Roedelab636482014-09-05 10:48:21 +02003198 .capable = amd_iommu_capable,
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003199 .domain_alloc = amd_iommu_domain_alloc,
3200 .domain_free = amd_iommu_domain_free,
Joerg Roedel26961ef2008-12-03 17:00:17 +01003201 .attach_dev = amd_iommu_attach_device,
3202 .detach_dev = amd_iommu_detach_device,
Joerg Roedel468e2362010-01-21 16:37:36 +01003203 .map = amd_iommu_map,
3204 .unmap = amd_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07003205 .map_sg = default_iommu_map_sg,
Joerg Roedel26961ef2008-12-03 17:00:17 +01003206 .iova_to_phys = amd_iommu_iova_to_phys,
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02003207 .add_device = amd_iommu_add_device,
3208 .remove_device = amd_iommu_remove_device,
Joerg Roedel35cf2482015-05-28 18:41:37 +02003209 .get_dm_regions = amd_iommu_get_dm_regions,
3210 .put_dm_regions = amd_iommu_put_dm_regions,
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +02003211 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
Joerg Roedel26961ef2008-12-03 17:00:17 +01003212};
3213
Joerg Roedel0feae532009-08-26 15:26:30 +02003214/*****************************************************************************
3215 *
3216 * The next functions do a basic initialization of IOMMU for pass through
3217 * mode
3218 *
3219 * In passthrough mode the IOMMU is initialized and enabled but not used for
3220 * DMA-API translation.
3221 *
3222 *****************************************************************************/
3223
3224int __init amd_iommu_init_passthrough(void)
3225{
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003226 struct iommu_dev_data *dev_data;
Joerg Roedel0feae532009-08-26 15:26:30 +02003227 struct pci_dev *dev = NULL;
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003228 int ret;
Joerg Roedel0feae532009-08-26 15:26:30 +02003229
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003230 ret = alloc_passthrough_domain();
3231 if (ret)
3232 return ret;
Joerg Roedel0feae532009-08-26 15:26:30 +02003233
Kulikov Vasiliy6c54aab2010-07-03 12:03:51 -04003234 for_each_pci_dev(dev) {
Joerg Roedel98fc5a62009-11-24 17:19:23 +01003235 if (!check_device(&dev->dev))
Joerg Roedel0feae532009-08-26 15:26:30 +02003236 continue;
3237
Joerg Roedel5abcdba2011-12-01 15:49:45 +01003238 dev_data = get_dev_data(&dev->dev);
3239 dev_data->passthrough = true;
3240
Joerg Roedel15898bb2009-11-24 15:39:42 +01003241 attach_device(&dev->dev, pt_domain);
Joerg Roedel0feae532009-08-26 15:26:30 +02003242 }
3243
Joerg Roedel2655d7a2011-12-22 12:35:38 +01003244 amd_iommu_stats_init();
3245
Joerg Roedel0feae532009-08-26 15:26:30 +02003246 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3247
3248 return 0;
3249}
Joerg Roedel72e1dcc2011-11-10 19:13:51 +01003250
3251/* IOMMUv2 specific functions */
3252int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3253{
3254 return atomic_notifier_chain_register(&ppr_notifier, nb);
3255}
3256EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3257
3258int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3259{
3260 return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3261}
3262EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
Joerg Roedel132bd682011-11-17 14:18:46 +01003263
3264void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3265{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003266 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel132bd682011-11-17 14:18:46 +01003267 unsigned long flags;
3268
3269 spin_lock_irqsave(&domain->lock, flags);
3270
3271 /* Update data structure */
3272 domain->mode = PAGE_MODE_NONE;
3273 domain->updated = true;
3274
3275 /* Make changes visible to IOMMUs */
3276 update_domain(domain);
3277
3278 /* Page-table is not visible to IOMMU anymore, so free it */
3279 free_pagetable(domain);
3280
3281 spin_unlock_irqrestore(&domain->lock, flags);
3282}
3283EXPORT_SYMBOL(amd_iommu_domain_direct_map);
Joerg Roedel52815b72011-11-17 17:24:28 +01003284
3285int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3286{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003287 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel52815b72011-11-17 17:24:28 +01003288 unsigned long flags;
3289 int levels, ret;
3290
3291 if (pasids <= 0 || pasids > (PASID_MASK + 1))
3292 return -EINVAL;
3293
3294 /* Number of GCR3 table levels required */
3295 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3296 levels += 1;
3297
3298 if (levels > amd_iommu_max_glx_val)
3299 return -EINVAL;
3300
3301 spin_lock_irqsave(&domain->lock, flags);
3302
3303 /*
3304 * Save us all sanity checks whether devices already in the
3305 * domain support IOMMUv2. Just force that the domain has no
3306 * devices attached when it is switched into IOMMUv2 mode.
3307 */
3308 ret = -EBUSY;
3309 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3310 goto out;
3311
3312 ret = -ENOMEM;
3313 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3314 if (domain->gcr3_tbl == NULL)
3315 goto out;
3316
3317 domain->glx = levels;
3318 domain->flags |= PD_IOMMUV2_MASK;
3319 domain->updated = true;
3320
3321 update_domain(domain);
3322
3323 ret = 0;
3324
3325out:
3326 spin_unlock_irqrestore(&domain->lock, flags);
3327
3328 return ret;
3329}
3330EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
Joerg Roedel22e266c2011-11-21 15:59:08 +01003331
3332static int __flush_pasid(struct protection_domain *domain, int pasid,
3333 u64 address, bool size)
3334{
3335 struct iommu_dev_data *dev_data;
3336 struct iommu_cmd cmd;
3337 int i, ret;
3338
3339 if (!(domain->flags & PD_IOMMUV2_MASK))
3340 return -EINVAL;
3341
3342 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3343
3344 /*
3345 * IOMMU TLB needs to be flushed before Device TLB to
3346 * prevent device TLB refill from IOMMU TLB
3347 */
3348 for (i = 0; i < amd_iommus_present; ++i) {
3349 if (domain->dev_iommu[i] == 0)
3350 continue;
3351
3352 ret = iommu_queue_command(amd_iommus[i], &cmd);
3353 if (ret != 0)
3354 goto out;
3355 }
3356
3357 /* Wait until IOMMU TLB flushes are complete */
3358 domain_flush_complete(domain);
3359
3360 /* Now flush device TLBs */
3361 list_for_each_entry(dev_data, &domain->dev_list, list) {
3362 struct amd_iommu *iommu;
3363 int qdep;
3364
3365 BUG_ON(!dev_data->ats.enabled);
3366
3367 qdep = dev_data->ats.qdep;
3368 iommu = amd_iommu_rlookup_table[dev_data->devid];
3369
3370 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3371 qdep, address, size);
3372
3373 ret = iommu_queue_command(iommu, &cmd);
3374 if (ret != 0)
3375 goto out;
3376 }
3377
3378 /* Wait until all device TLBs are flushed */
3379 domain_flush_complete(domain);
3380
3381 ret = 0;
3382
3383out:
3384
3385 return ret;
3386}
3387
3388static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3389 u64 address)
3390{
Joerg Roedel399be2f2011-12-01 16:53:47 +01003391 INC_STATS_COUNTER(invalidate_iotlb);
3392
Joerg Roedel22e266c2011-11-21 15:59:08 +01003393 return __flush_pasid(domain, pasid, address, false);
3394}
3395
3396int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3397 u64 address)
3398{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003399 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel22e266c2011-11-21 15:59:08 +01003400 unsigned long flags;
3401 int ret;
3402
3403 spin_lock_irqsave(&domain->lock, flags);
3404 ret = __amd_iommu_flush_page(domain, pasid, address);
3405 spin_unlock_irqrestore(&domain->lock, flags);
3406
3407 return ret;
3408}
3409EXPORT_SYMBOL(amd_iommu_flush_page);
3410
3411static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3412{
Joerg Roedel399be2f2011-12-01 16:53:47 +01003413 INC_STATS_COUNTER(invalidate_iotlb_all);
3414
Joerg Roedel22e266c2011-11-21 15:59:08 +01003415 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3416 true);
3417}
3418
3419int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3420{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003421 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel22e266c2011-11-21 15:59:08 +01003422 unsigned long flags;
3423 int ret;
3424
3425 spin_lock_irqsave(&domain->lock, flags);
3426 ret = __amd_iommu_flush_tlb(domain, pasid);
3427 spin_unlock_irqrestore(&domain->lock, flags);
3428
3429 return ret;
3430}
3431EXPORT_SYMBOL(amd_iommu_flush_tlb);
3432
Joerg Roedelb16137b2011-11-21 16:50:23 +01003433static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3434{
3435 int index;
3436 u64 *pte;
3437
3438 while (true) {
3439
3440 index = (pasid >> (9 * level)) & 0x1ff;
3441 pte = &root[index];
3442
3443 if (level == 0)
3444 break;
3445
3446 if (!(*pte & GCR3_VALID)) {
3447 if (!alloc)
3448 return NULL;
3449
3450 root = (void *)get_zeroed_page(GFP_ATOMIC);
3451 if (root == NULL)
3452 return NULL;
3453
3454 *pte = __pa(root) | GCR3_VALID;
3455 }
3456
3457 root = __va(*pte & PAGE_MASK);
3458
3459 level -= 1;
3460 }
3461
3462 return pte;
3463}
3464
3465static int __set_gcr3(struct protection_domain *domain, int pasid,
3466 unsigned long cr3)
3467{
3468 u64 *pte;
3469
3470 if (domain->mode != PAGE_MODE_NONE)
3471 return -EINVAL;
3472
3473 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3474 if (pte == NULL)
3475 return -ENOMEM;
3476
3477 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3478
3479 return __amd_iommu_flush_tlb(domain, pasid);
3480}
3481
3482static int __clear_gcr3(struct protection_domain *domain, int pasid)
3483{
3484 u64 *pte;
3485
3486 if (domain->mode != PAGE_MODE_NONE)
3487 return -EINVAL;
3488
3489 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3490 if (pte == NULL)
3491 return 0;
3492
3493 *pte = 0;
3494
3495 return __amd_iommu_flush_tlb(domain, pasid);
3496}
3497
3498int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3499 unsigned long cr3)
3500{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003501 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelb16137b2011-11-21 16:50:23 +01003502 unsigned long flags;
3503 int ret;
3504
3505 spin_lock_irqsave(&domain->lock, flags);
3506 ret = __set_gcr3(domain, pasid, cr3);
3507 spin_unlock_irqrestore(&domain->lock, flags);
3508
3509 return ret;
3510}
3511EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3512
3513int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3514{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003515 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelb16137b2011-11-21 16:50:23 +01003516 unsigned long flags;
3517 int ret;
3518
3519 spin_lock_irqsave(&domain->lock, flags);
3520 ret = __clear_gcr3(domain, pasid);
3521 spin_unlock_irqrestore(&domain->lock, flags);
3522
3523 return ret;
3524}
3525EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
Joerg Roedelc99afa22011-11-21 18:19:25 +01003526
3527int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3528 int status, int tag)
3529{
3530 struct iommu_dev_data *dev_data;
3531 struct amd_iommu *iommu;
3532 struct iommu_cmd cmd;
3533
Joerg Roedel399be2f2011-12-01 16:53:47 +01003534 INC_STATS_COUNTER(complete_ppr);
3535
Joerg Roedelc99afa22011-11-21 18:19:25 +01003536 dev_data = get_dev_data(&pdev->dev);
3537 iommu = amd_iommu_rlookup_table[dev_data->devid];
3538
3539 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3540 tag, dev_data->pri_tlp);
3541
3542 return iommu_queue_command(iommu, &cmd);
3543}
3544EXPORT_SYMBOL(amd_iommu_complete_ppr);
Joerg Roedelf3572db2011-11-23 12:36:25 +01003545
3546struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3547{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003548 struct protection_domain *pdomain;
Joerg Roedelf3572db2011-11-23 12:36:25 +01003549
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003550 pdomain = get_domain(&pdev->dev);
3551 if (IS_ERR(pdomain))
Joerg Roedelf3572db2011-11-23 12:36:25 +01003552 return NULL;
3553
3554 /* Only return IOMMUv2 domains */
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003555 if (!(pdomain->flags & PD_IOMMUV2_MASK))
Joerg Roedelf3572db2011-11-23 12:36:25 +01003556 return NULL;
3557
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003558 return &pdomain->domain;
Joerg Roedelf3572db2011-11-23 12:36:25 +01003559}
3560EXPORT_SYMBOL(amd_iommu_get_v2_domain);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01003561
3562void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3563{
3564 struct iommu_dev_data *dev_data;
3565
3566 if (!amd_iommu_v2_supported())
3567 return;
3568
3569 dev_data = get_dev_data(&pdev->dev);
3570 dev_data->errata |= (1 << erratum);
3571}
3572EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
Joerg Roedel52efdb82011-12-07 12:01:36 +01003573
3574int amd_iommu_device_info(struct pci_dev *pdev,
3575 struct amd_iommu_device_info *info)
3576{
3577 int max_pasids;
3578 int pos;
3579
3580 if (pdev == NULL || info == NULL)
3581 return -EINVAL;
3582
3583 if (!amd_iommu_v2_supported())
3584 return -EINVAL;
3585
3586 memset(info, 0, sizeof(*info));
3587
3588 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3589 if (pos)
3590 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3591
3592 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3593 if (pos)
3594 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3595
3596 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3597 if (pos) {
3598 int features;
3599
3600 max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3601 max_pasids = min(max_pasids, (1 << 20));
3602
3603 info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3604 info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3605
3606 features = pci_pasid_features(pdev);
3607 if (features & PCI_PASID_CAP_EXEC)
3608 info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3609 if (features & PCI_PASID_CAP_PRIV)
3610 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3611 }
3612
3613 return 0;
3614}
3615EXPORT_SYMBOL(amd_iommu_device_info);
Joerg Roedel2b324502012-06-21 16:29:10 +02003616
3617#ifdef CONFIG_IRQ_REMAP
3618
3619/*****************************************************************************
3620 *
3621 * Interrupt Remapping Implementation
3622 *
3623 *****************************************************************************/
3624
3625union irte {
3626 u32 val;
3627 struct {
3628 u32 valid : 1,
3629 no_fault : 1,
3630 int_type : 3,
3631 rq_eoi : 1,
3632 dm : 1,
3633 rsvd_1 : 1,
3634 destination : 8,
3635 vector : 8,
3636 rsvd_2 : 8;
3637 } fields;
3638};
3639
3640#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
3641#define DTE_IRQ_REMAP_INTCTL (2ULL << 60)
3642#define DTE_IRQ_TABLE_LEN (8ULL << 1)
3643#define DTE_IRQ_REMAP_ENABLE 1ULL
3644
3645static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3646{
3647 u64 dte;
3648
3649 dte = amd_iommu_dev_table[devid].data[2];
3650 dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
3651 dte |= virt_to_phys(table->table);
3652 dte |= DTE_IRQ_REMAP_INTCTL;
3653 dte |= DTE_IRQ_TABLE_LEN;
3654 dte |= DTE_IRQ_REMAP_ENABLE;
3655
3656 amd_iommu_dev_table[devid].data[2] = dte;
3657}
3658
3659#define IRTE_ALLOCATED (~1U)
3660
3661static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3662{
3663 struct irq_remap_table *table = NULL;
3664 struct amd_iommu *iommu;
3665 unsigned long flags;
3666 u16 alias;
3667
3668 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3669
3670 iommu = amd_iommu_rlookup_table[devid];
3671 if (!iommu)
3672 goto out_unlock;
3673
3674 table = irq_lookup_table[devid];
3675 if (table)
3676 goto out;
3677
3678 alias = amd_iommu_alias_table[devid];
3679 table = irq_lookup_table[alias];
3680 if (table) {
3681 irq_lookup_table[devid] = table;
3682 set_dte_irq_entry(devid, table);
3683 iommu_flush_dte(iommu, devid);
3684 goto out;
3685 }
3686
3687 /* Nothing there yet, allocate new irq remapping table */
3688 table = kzalloc(sizeof(*table), GFP_ATOMIC);
3689 if (!table)
3690 goto out;
3691
Joerg Roedel197887f2013-04-09 21:14:08 +02003692 /* Initialize table spin-lock */
3693 spin_lock_init(&table->lock);
3694
Joerg Roedel2b324502012-06-21 16:29:10 +02003695 if (ioapic)
3696 /* Keep the first 32 indexes free for IOAPIC interrupts */
3697 table->min_index = 32;
3698
3699 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
3700 if (!table->table) {
3701 kfree(table);
Dan Carpenter821f0f62012-10-02 11:34:40 +03003702 table = NULL;
Joerg Roedel2b324502012-06-21 16:29:10 +02003703 goto out;
3704 }
3705
3706 memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32));
3707
3708 if (ioapic) {
3709 int i;
3710
3711 for (i = 0; i < 32; ++i)
3712 table->table[i] = IRTE_ALLOCATED;
3713 }
3714
3715 irq_lookup_table[devid] = table;
3716 set_dte_irq_entry(devid, table);
3717 iommu_flush_dte(iommu, devid);
3718 if (devid != alias) {
3719 irq_lookup_table[alias] = table;
Alex Williamsone028a9e2014-04-22 10:08:40 -06003720 set_dte_irq_entry(alias, table);
Joerg Roedel2b324502012-06-21 16:29:10 +02003721 iommu_flush_dte(iommu, alias);
3722 }
3723
3724out:
3725 iommu_completion_wait(iommu);
3726
3727out_unlock:
3728 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3729
3730 return table;
3731}
3732
3733static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
3734{
3735 struct irq_remap_table *table;
3736 unsigned long flags;
3737 int index, c;
3738
3739 table = get_irq_table(devid, false);
3740 if (!table)
3741 return -ENODEV;
3742
3743 spin_lock_irqsave(&table->lock, flags);
3744
3745 /* Scan table for free entries */
3746 for (c = 0, index = table->min_index;
3747 index < MAX_IRQS_PER_TABLE;
3748 ++index) {
3749 if (table->table[index] == 0)
3750 c += 1;
3751 else
3752 c = 0;
3753
3754 if (c == count) {
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003755 struct irq_2_irte *irte_info;
Joerg Roedel2b324502012-06-21 16:29:10 +02003756
3757 for (; c != 0; --c)
3758 table->table[index - c + 1] = IRTE_ALLOCATED;
3759
3760 index -= count - 1;
3761
Joerg Roedel9b1b0e42012-09-26 12:44:45 +02003762 cfg->remapped = 1;
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003763 irte_info = &cfg->irq_2_irte;
3764 irte_info->devid = devid;
3765 irte_info->index = index;
Joerg Roedel2b324502012-06-21 16:29:10 +02003766
3767 goto out;
3768 }
3769 }
3770
3771 index = -ENOSPC;
3772
3773out:
3774 spin_unlock_irqrestore(&table->lock, flags);
3775
3776 return index;
3777}
3778
3779static int get_irte(u16 devid, int index, union irte *irte)
3780{
3781 struct irq_remap_table *table;
3782 unsigned long flags;
3783
3784 table = get_irq_table(devid, false);
3785 if (!table)
3786 return -ENOMEM;
3787
3788 spin_lock_irqsave(&table->lock, flags);
3789 irte->val = table->table[index];
3790 spin_unlock_irqrestore(&table->lock, flags);
3791
3792 return 0;
3793}
3794
3795static int modify_irte(u16 devid, int index, union irte irte)
3796{
3797 struct irq_remap_table *table;
3798 struct amd_iommu *iommu;
3799 unsigned long flags;
3800
3801 iommu = amd_iommu_rlookup_table[devid];
3802 if (iommu == NULL)
3803 return -EINVAL;
3804
3805 table = get_irq_table(devid, false);
3806 if (!table)
3807 return -ENOMEM;
3808
3809 spin_lock_irqsave(&table->lock, flags);
3810 table->table[index] = irte.val;
3811 spin_unlock_irqrestore(&table->lock, flags);
3812
3813 iommu_flush_irt(iommu, devid);
3814 iommu_completion_wait(iommu);
3815
3816 return 0;
3817}
3818
3819static void free_irte(u16 devid, int index)
3820{
3821 struct irq_remap_table *table;
3822 struct amd_iommu *iommu;
3823 unsigned long flags;
3824
3825 iommu = amd_iommu_rlookup_table[devid];
3826 if (iommu == NULL)
3827 return;
3828
3829 table = get_irq_table(devid, false);
3830 if (!table)
3831 return;
3832
3833 spin_lock_irqsave(&table->lock, flags);
3834 table->table[index] = 0;
3835 spin_unlock_irqrestore(&table->lock, flags);
3836
3837 iommu_flush_irt(iommu, devid);
3838 iommu_completion_wait(iommu);
3839}
3840
Joerg Roedel5527de72012-06-26 11:17:32 +02003841static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
3842 unsigned int destination, int vector,
3843 struct io_apic_irq_attr *attr)
3844{
3845 struct irq_remap_table *table;
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003846 struct irq_2_irte *irte_info;
Joerg Roedel5527de72012-06-26 11:17:32 +02003847 struct irq_cfg *cfg;
3848 union irte irte;
3849 int ioapic_id;
3850 int index;
3851 int devid;
3852 int ret;
3853
Jiang Liu719b5302014-10-27 16:12:10 +08003854 cfg = irq_cfg(irq);
Joerg Roedel5527de72012-06-26 11:17:32 +02003855 if (!cfg)
3856 return -EINVAL;
3857
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003858 irte_info = &cfg->irq_2_irte;
Joerg Roedel5527de72012-06-26 11:17:32 +02003859 ioapic_id = mpc_ioapic_id(attr->ioapic);
3860 devid = get_ioapic_devid(ioapic_id);
3861
3862 if (devid < 0)
3863 return devid;
3864
3865 table = get_irq_table(devid, true);
3866 if (table == NULL)
3867 return -ENOMEM;
3868
3869 index = attr->ioapic_pin;
3870
3871 /* Setup IRQ remapping info */
Joerg Roedel9b1b0e42012-09-26 12:44:45 +02003872 cfg->remapped = 1;
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003873 irte_info->devid = devid;
3874 irte_info->index = index;
Joerg Roedel5527de72012-06-26 11:17:32 +02003875
3876 /* Setup IRTE for IOMMU */
3877 irte.val = 0;
3878 irte.fields.vector = vector;
3879 irte.fields.int_type = apic->irq_delivery_mode;
3880 irte.fields.destination = destination;
3881 irte.fields.dm = apic->irq_dest_mode;
3882 irte.fields.valid = 1;
3883
3884 ret = modify_irte(devid, index, irte);
3885 if (ret)
3886 return ret;
3887
3888 /* Setup IOAPIC entry */
3889 memset(entry, 0, sizeof(*entry));
3890
3891 entry->vector = index;
3892 entry->mask = 0;
3893 entry->trigger = attr->trigger;
3894 entry->polarity = attr->polarity;
3895
3896 /*
3897 * Mask level triggered irqs.
Joerg Roedel5527de72012-06-26 11:17:32 +02003898 */
3899 if (attr->trigger)
3900 entry->mask = 1;
3901
3902 return 0;
3903}
3904
3905static int set_affinity(struct irq_data *data, const struct cpumask *mask,
3906 bool force)
3907{
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003908 struct irq_2_irte *irte_info;
Joerg Roedel5527de72012-06-26 11:17:32 +02003909 unsigned int dest, irq;
3910 struct irq_cfg *cfg;
3911 union irte irte;
3912 int err;
3913
3914 if (!config_enabled(CONFIG_SMP))
3915 return -1;
3916
Jiang Liu719b5302014-10-27 16:12:10 +08003917 cfg = irqd_cfg(data);
Joerg Roedel5527de72012-06-26 11:17:32 +02003918 irq = data->irq;
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003919 irte_info = &cfg->irq_2_irte;
Joerg Roedel5527de72012-06-26 11:17:32 +02003920
3921 if (!cpumask_intersects(mask, cpu_online_mask))
3922 return -EINVAL;
3923
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003924 if (get_irte(irte_info->devid, irte_info->index, &irte))
Joerg Roedel5527de72012-06-26 11:17:32 +02003925 return -EBUSY;
3926
3927 if (assign_irq_vector(irq, cfg, mask))
3928 return -EBUSY;
3929
3930 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
3931 if (err) {
3932 if (assign_irq_vector(irq, cfg, data->affinity))
3933 pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
3934 return err;
3935 }
3936
3937 irte.fields.vector = cfg->vector;
3938 irte.fields.destination = dest;
3939
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003940 modify_irte(irte_info->devid, irte_info->index, irte);
Joerg Roedel5527de72012-06-26 11:17:32 +02003941
3942 if (cfg->move_in_progress)
3943 send_cleanup_vector(cfg);
3944
3945 cpumask_copy(data->affinity, mask);
3946
3947 return 0;
3948}
3949
3950static int free_irq(int irq)
3951{
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003952 struct irq_2_irte *irte_info;
Joerg Roedel5527de72012-06-26 11:17:32 +02003953 struct irq_cfg *cfg;
3954
Jiang Liu719b5302014-10-27 16:12:10 +08003955 cfg = irq_cfg(irq);
Joerg Roedel5527de72012-06-26 11:17:32 +02003956 if (!cfg)
3957 return -EINVAL;
3958
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003959 irte_info = &cfg->irq_2_irte;
Joerg Roedel5527de72012-06-26 11:17:32 +02003960
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003961 free_irte(irte_info->devid, irte_info->index);
Joerg Roedel5527de72012-06-26 11:17:32 +02003962
3963 return 0;
3964}
3965
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003966static void compose_msi_msg(struct pci_dev *pdev,
3967 unsigned int irq, unsigned int dest,
3968 struct msi_msg *msg, u8 hpet_id)
3969{
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003970 struct irq_2_irte *irte_info;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003971 struct irq_cfg *cfg;
3972 union irte irte;
3973
Jiang Liu719b5302014-10-27 16:12:10 +08003974 cfg = irq_cfg(irq);
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003975 if (!cfg)
3976 return;
3977
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003978 irte_info = &cfg->irq_2_irte;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003979
3980 irte.val = 0;
3981 irte.fields.vector = cfg->vector;
3982 irte.fields.int_type = apic->irq_delivery_mode;
3983 irte.fields.destination = dest;
3984 irte.fields.dm = apic->irq_dest_mode;
3985 irte.fields.valid = 1;
3986
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003987 modify_irte(irte_info->devid, irte_info->index, irte);
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003988
3989 msg->address_hi = MSI_ADDR_BASE_HI;
3990 msg->address_lo = MSI_ADDR_BASE_LO;
Joerg Roedel0dfedd62013-04-09 15:39:16 +02003991 msg->data = irte_info->index;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003992}
3993
3994static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
3995{
3996 struct irq_cfg *cfg;
3997 int index;
3998 u16 devid;
3999
4000 if (!pdev)
4001 return -EINVAL;
4002
Jiang Liu719b5302014-10-27 16:12:10 +08004003 cfg = irq_cfg(irq);
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02004004 if (!cfg)
4005 return -EINVAL;
4006
4007 devid = get_device_id(&pdev->dev);
4008 index = alloc_irq_index(cfg, devid, nvec);
4009
4010 return index < 0 ? MAX_IRQS_PER_TABLE : index;
4011}
4012
4013static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
4014 int index, int offset)
4015{
Joerg Roedel0dfedd62013-04-09 15:39:16 +02004016 struct irq_2_irte *irte_info;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02004017 struct irq_cfg *cfg;
4018 u16 devid;
4019
4020 if (!pdev)
4021 return -EINVAL;
4022
Jiang Liu719b5302014-10-27 16:12:10 +08004023 cfg = irq_cfg(irq);
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02004024 if (!cfg)
4025 return -EINVAL;
4026
4027 if (index >= MAX_IRQS_PER_TABLE)
4028 return 0;
4029
4030 devid = get_device_id(&pdev->dev);
Joerg Roedel0dfedd62013-04-09 15:39:16 +02004031 irte_info = &cfg->irq_2_irte;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02004032
Joerg Roedel9b1b0e42012-09-26 12:44:45 +02004033 cfg->remapped = 1;
Joerg Roedel0dfedd62013-04-09 15:39:16 +02004034 irte_info->devid = devid;
4035 irte_info->index = index + offset;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02004036
4037 return 0;
4038}
4039
Yijing Wang5fc24d82014-09-17 17:32:19 +08004040static int alloc_hpet_msi(unsigned int irq, unsigned int id)
Joerg Roedeld9761952012-06-26 16:00:08 +02004041{
Joerg Roedel0dfedd62013-04-09 15:39:16 +02004042 struct irq_2_irte *irte_info;
Joerg Roedeld9761952012-06-26 16:00:08 +02004043 struct irq_cfg *cfg;
4044 int index, devid;
4045
Jiang Liu719b5302014-10-27 16:12:10 +08004046 cfg = irq_cfg(irq);
Joerg Roedeld9761952012-06-26 16:00:08 +02004047 if (!cfg)
4048 return -EINVAL;
4049
Joerg Roedel0dfedd62013-04-09 15:39:16 +02004050 irte_info = &cfg->irq_2_irte;
Joerg Roedeld9761952012-06-26 16:00:08 +02004051 devid = get_hpet_devid(id);
4052 if (devid < 0)
4053 return devid;
4054
4055 index = alloc_irq_index(cfg, devid, 1);
4056 if (index < 0)
4057 return index;
4058
Joerg Roedel9b1b0e42012-09-26 12:44:45 +02004059 cfg->remapped = 1;
Joerg Roedel0dfedd62013-04-09 15:39:16 +02004060 irte_info->devid = devid;
4061 irte_info->index = index;
Joerg Roedeld9761952012-06-26 16:00:08 +02004062
4063 return 0;
4064}
4065
Joerg Roedel6b474b82012-06-26 16:46:04 +02004066struct irq_remap_ops amd_iommu_irq_ops = {
Joerg Roedel6b474b82012-06-26 16:46:04 +02004067 .prepare = amd_iommu_prepare,
4068 .enable = amd_iommu_enable,
4069 .disable = amd_iommu_disable,
4070 .reenable = amd_iommu_reenable,
4071 .enable_faulting = amd_iommu_enable_faulting,
4072 .setup_ioapic_entry = setup_ioapic_entry,
4073 .set_affinity = set_affinity,
4074 .free_irq = free_irq,
4075 .compose_msi_msg = compose_msi_msg,
4076 .msi_alloc_irq = msi_alloc_irq,
4077 .msi_setup_irq = msi_setup_irq,
Yijing Wang5fc24d82014-09-17 17:32:19 +08004078 .alloc_hpet_msi = alloc_hpet_msi,
Joerg Roedel6b474b82012-06-26 16:46:04 +02004079};
Joerg Roedel2b324502012-06-21 16:29:10 +02004080#endif