blob: 83819d0cbf909d0da86975183ef2011e56e02aa3 [file] [log] [blame]
Joerg Roedelb6c02712008-06-26 21:27:53 +02001/*
Joerg Roedel5d0d7152010-10-13 11:13:21 +02002 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
Joerg Roedel63ce3ae2015-02-04 16:12:55 +01003 * Author: Joerg Roedel <jroedel@suse.de>
Joerg Roedelb6c02712008-06-26 21:27:53 +02004 * Leo Duran <leo.duran@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010020#include <linux/ratelimit.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020021#include <linux/pci.h>
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -040022#include <linux/acpi.h>
Wan Zongshun9a4d3bf52016-04-01 09:06:05 -040023#include <linux/amba/bus.h>
Wan Zongshun0076cd32016-05-10 09:21:01 -040024#include <linux/platform_device.h>
Joerg Roedelcb41ed82011-04-05 11:00:53 +020025#include <linux/pci-ats.h>
Akinobu Mitaa66022c2009-12-15 16:48:28 -080026#include <linux/bitmap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Joerg Roedel7f265082008-12-12 13:50:21 +010028#include <linux/debugfs.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020029#include <linux/scatterlist.h>
FUJITA Tomonori51491362009-01-05 23:47:25 +090030#include <linux/dma-mapping.h>
Christoph Hellwigfec777c2018-03-19 11:38:15 +010031#include <linux/dma-direct.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020032#include <linux/iommu-helper.h>
Joerg Roedelc156e342008-12-02 18:13:27 +010033#include <linux/iommu.h>
Joerg Roedel815b33f2011-04-06 17:26:49 +020034#include <linux/delay.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020035#include <linux/amd-iommu.h>
Joerg Roedel72e1dcc2011-11-10 19:13:51 +010036#include <linux/notifier.h>
37#include <linux/export.h>
Joerg Roedel2b324502012-06-21 16:29:10 +020038#include <linux/irq.h>
39#include <linux/msi.h>
Joerg Roedel3b839a52015-04-01 14:58:47 +020040#include <linux/dma-contiguous.h>
Jiang Liu7c71d302015-04-13 14:11:33 +080041#include <linux/irqdomain.h>
Joerg Roedel5f6bed52015-12-22 13:34:22 +010042#include <linux/percpu.h>
Joerg Roedel307d5852016-07-05 11:54:04 +020043#include <linux/iova.h>
Joerg Roedel2b324502012-06-21 16:29:10 +020044#include <asm/irq_remapping.h>
45#include <asm/io_apic.h>
46#include <asm/apic.h>
47#include <asm/hw_irq.h>
Joerg Roedel17f5b562011-07-06 17:14:44 +020048#include <asm/msidef.h>
Joerg Roedelb6c02712008-06-26 21:27:53 +020049#include <asm/proto.h>
FUJITA Tomonori46a7fa22008-07-11 10:23:42 +090050#include <asm/iommu.h>
Joerg Roedel1d9b16d2008-11-27 18:39:15 +010051#include <asm/gart.h>
Joerg Roedel27c21272011-05-30 15:56:24 +020052#include <asm/dma.h>
Joerg Roedel403f81d2011-06-14 16:44:25 +020053
54#include "amd_iommu_proto.h"
55#include "amd_iommu_types.h"
Joerg Roedel6b474b82012-06-26 16:46:04 +020056#include "irq_remapping.h"
Joerg Roedelb6c02712008-06-26 21:27:53 +020057
Christoph Hellwiga8695722017-05-21 13:26:45 +020058#define AMD_IOMMU_MAPPING_ERROR 0
59
Joerg Roedelb6c02712008-06-26 21:27:53 +020060#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
61
Joerg Roedel815b33f2011-04-06 17:26:49 +020062#define LOOP_TIMEOUT 100000
Joerg Roedel136f78a2008-07-11 17:14:27 +020063
Joerg Roedel307d5852016-07-05 11:54:04 +020064/* IO virtual address start page frame number */
65#define IOVA_START_PFN (1)
66#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
Joerg Roedel307d5852016-07-05 11:54:04 +020067
Joerg Roedel81cd07b2016-07-07 18:01:10 +020068/* Reserved IOVA ranges */
69#define MSI_RANGE_START (0xfee00000)
70#define MSI_RANGE_END (0xfeefffff)
71#define HT_RANGE_START (0xfd00000000ULL)
72#define HT_RANGE_END (0xffffffffffULL)
73
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020074/*
75 * This bitmap is used to advertise the page sizes our hardware support
76 * to the IOMMU core, which will then use this information to split
77 * physically contiguous memory regions it is mapping into page sizes
78 * that we support.
79 *
Joerg Roedel954e3dd2012-12-02 15:35:37 +010080 * 512GB Pages are not supported due to a hardware bug
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020081 */
Joerg Roedel954e3dd2012-12-02 15:35:37 +010082#define AMD_IOMMU_PGSIZES ((~0xFFFUL) & ~(2ULL << 38))
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +020083
Joerg Roedelb6c02712008-06-26 21:27:53 +020084static DEFINE_RWLOCK(amd_iommu_devtable_lock);
85
Joerg Roedel8fa5f802011-06-09 12:24:45 +020086/* List of all available dev_data structures */
87static LIST_HEAD(dev_data_list);
88static DEFINE_SPINLOCK(dev_data_list_lock);
89
Joerg Roedel6efed632012-06-14 15:52:58 +020090LIST_HEAD(ioapic_map);
91LIST_HEAD(hpet_map);
Wan Zongshun2a0cb4e2016-04-01 09:06:00 -040092LIST_HEAD(acpihid_map);
Joerg Roedel6efed632012-06-14 15:52:58 +020093
Joerg Roedel0feae532009-08-26 15:26:30 +020094/*
95 * Domain for untranslated devices - only allocated
96 * if iommu=pt passed on kernel cmd line.
97 */
Joerg Roedelb0119e82017-02-01 13:23:08 +010098const struct iommu_ops amd_iommu_ops;
Joerg Roedel26961ef2008-12-03 17:00:17 +010099
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100100static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
Joerg Roedel52815b72011-11-17 17:24:28 +0100101int amd_iommu_max_glx_val = -1;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100102
Bart Van Assche52997092017-01-20 13:04:01 -0800103static const struct dma_map_ops amd_iommu_dma_ops;
Joerg Roedelac1534a2012-06-21 14:52:40 +0200104
Joerg Roedel431b2a22008-07-11 17:14:22 +0200105/*
106 * general struct to manage commands send to an IOMMU
107 */
Joerg Roedeld6449532008-07-11 17:14:28 +0200108struct iommu_cmd {
Joerg Roedelb6c02712008-06-26 21:27:53 +0200109 u32 data[4];
110};
111
Joerg Roedel05152a02012-06-15 16:53:51 +0200112struct kmem_cache *amd_iommu_irq_cache;
113
Joerg Roedel04bfdd82009-09-02 16:00:23 +0200114static void update_domain(struct protection_domain *domain);
Joerg Roedel7a5a5662015-06-30 08:56:11 +0200115static int protection_domain_init(struct protection_domain *domain);
Joerg Roedelb6809ee2016-02-26 16:48:59 +0100116static void detach_device(struct device *dev);
Joerg Roedel9003d612017-08-10 17:19:13 +0200117static void iova_domain_flush_tlb(struct iova_domain *iovad);
Joerg Roedeld4241a22017-06-02 14:55:56 +0200118
Joerg Roedel007b74b2015-12-21 12:53:54 +0100119/*
Joerg Roedel007b74b2015-12-21 12:53:54 +0100120 * Data container for a dma_ops specific protection domain
121 */
122struct dma_ops_domain {
123 /* generic protection domain information */
124 struct protection_domain domain;
125
Joerg Roedel307d5852016-07-05 11:54:04 +0200126 /* IOVA RB-Tree */
127 struct iova_domain iovad;
Joerg Roedel007b74b2015-12-21 12:53:54 +0100128};
129
Joerg Roedel81cd07b2016-07-07 18:01:10 +0200130static struct iova_domain reserved_iova_ranges;
131static struct lock_class_key reserved_rbtree_key;
132
Joerg Roedel15898bb2009-11-24 15:39:42 +0100133/****************************************************************************
134 *
135 * Helper functions
136 *
137 ****************************************************************************/
138
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400139static inline int match_hid_uid(struct device *dev,
140 struct acpihid_map_entry *entry)
Joerg Roedel3f4b87b2015-03-26 13:43:07 +0100141{
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400142 const char *hid, *uid;
143
144 hid = acpi_device_hid(ACPI_COMPANION(dev));
145 uid = acpi_device_uid(ACPI_COMPANION(dev));
146
147 if (!hid || !(*hid))
148 return -ENODEV;
149
150 if (!uid || !(*uid))
151 return strcmp(hid, entry->hid);
152
153 if (!(*entry->uid))
154 return strcmp(hid, entry->hid);
155
156 return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
Joerg Roedel3f4b87b2015-03-26 13:43:07 +0100157}
158
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400159static inline u16 get_pci_device_id(struct device *dev)
Joerg Roedele3156042016-04-08 15:12:24 +0200160{
161 struct pci_dev *pdev = to_pci_dev(dev);
162
163 return PCI_DEVID(pdev->bus->number, pdev->devfn);
164}
165
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400166static inline int get_acpihid_device_id(struct device *dev,
167 struct acpihid_map_entry **entry)
168{
169 struct acpihid_map_entry *p;
170
171 list_for_each_entry(p, &acpihid_map, list) {
172 if (!match_hid_uid(dev, p)) {
173 if (entry)
174 *entry = p;
175 return p->devid;
176 }
177 }
178 return -EINVAL;
179}
180
181static inline int get_device_id(struct device *dev)
182{
183 int devid;
184
185 if (dev_is_pci(dev))
186 devid = get_pci_device_id(dev);
187 else
188 devid = get_acpihid_device_id(dev, NULL);
189
190 return devid;
191}
192
Joerg Roedel15898bb2009-11-24 15:39:42 +0100193static struct protection_domain *to_pdomain(struct iommu_domain *dom)
194{
195 return container_of(dom, struct protection_domain, domain);
196}
197
Joerg Roedelb3311b02016-07-08 13:31:31 +0200198static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain)
199{
200 BUG_ON(domain->flags != PD_DMA_OPS_MASK);
201 return container_of(domain, struct dma_ops_domain, domain);
202}
203
Joerg Roedelf62dda62011-06-09 12:55:35 +0200204static struct iommu_dev_data *alloc_dev_data(u16 devid)
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200205{
206 struct iommu_dev_data *dev_data;
207 unsigned long flags;
208
209 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
210 if (!dev_data)
211 return NULL;
212
Joerg Roedelf62dda62011-06-09 12:55:35 +0200213 dev_data->devid = devid;
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200214
215 spin_lock_irqsave(&dev_data_list_lock, flags);
216 list_add_tail(&dev_data->dev_data_list, &dev_data_list);
217 spin_unlock_irqrestore(&dev_data_list_lock, flags);
218
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200219 ratelimit_default_init(&dev_data->rs);
220
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200221 return dev_data;
222}
223
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200224static struct iommu_dev_data *search_dev_data(u16 devid)
225{
226 struct iommu_dev_data *dev_data;
227 unsigned long flags;
228
229 spin_lock_irqsave(&dev_data_list_lock, flags);
230 list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
231 if (dev_data->devid == devid)
232 goto out_unlock;
233 }
234
235 dev_data = NULL;
236
237out_unlock:
238 spin_unlock_irqrestore(&dev_data_list_lock, flags);
239
240 return dev_data;
241}
242
Joerg Roedele3156042016-04-08 15:12:24 +0200243static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
244{
245 *(u16 *)data = alias;
246 return 0;
247}
248
249static u16 get_alias(struct device *dev)
250{
251 struct pci_dev *pdev = to_pci_dev(dev);
252 u16 devid, ivrs_alias, pci_alias;
253
Joerg Roedel6c0b43d2016-05-09 19:39:17 +0200254 /* The callers make sure that get_device_id() does not fail here */
Joerg Roedele3156042016-04-08 15:12:24 +0200255 devid = get_device_id(dev);
256 ivrs_alias = amd_iommu_alias_table[devid];
257 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
258
259 if (ivrs_alias == pci_alias)
260 return ivrs_alias;
261
262 /*
263 * DMA alias showdown
264 *
265 * The IVRS is fairly reliable in telling us about aliases, but it
266 * can't know about every screwy device. If we don't have an IVRS
267 * reported alias, use the PCI reported alias. In that case we may
268 * still need to initialize the rlookup and dev_table entries if the
269 * alias is to a non-existent device.
270 */
271 if (ivrs_alias == devid) {
272 if (!amd_iommu_rlookup_table[pci_alias]) {
273 amd_iommu_rlookup_table[pci_alias] =
274 amd_iommu_rlookup_table[devid];
275 memcpy(amd_iommu_dev_table[pci_alias].data,
276 amd_iommu_dev_table[devid].data,
277 sizeof(amd_iommu_dev_table[pci_alias].data));
278 }
279
280 return pci_alias;
281 }
282
283 pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
284 "for device %s[%04x:%04x], kernel reported alias "
285 "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
286 PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
287 PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
288 PCI_FUNC(pci_alias));
289
290 /*
291 * If we don't have a PCI DMA alias and the IVRS alias is on the same
292 * bus, then the IVRS table may know about a quirk that we don't.
293 */
294 if (pci_alias == devid &&
295 PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
Linus Torvalds7afd16f2016-05-19 13:10:54 -0700296 pci_add_dma_alias(pdev, ivrs_alias & 0xff);
Joerg Roedele3156042016-04-08 15:12:24 +0200297 pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
298 PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
299 dev_name(dev));
300 }
301
302 return ivrs_alias;
303}
304
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200305static struct iommu_dev_data *find_dev_data(u16 devid)
306{
307 struct iommu_dev_data *dev_data;
Baoquan Hedf3f7a62017-08-09 16:33:41 +0800308 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200309
310 dev_data = search_dev_data(devid);
311
Baoquan Hedf3f7a62017-08-09 16:33:41 +0800312 if (dev_data == NULL) {
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200313 dev_data = alloc_dev_data(devid);
314
Baoquan Hedf3f7a62017-08-09 16:33:41 +0800315 if (translation_pre_enabled(iommu))
316 dev_data->defer_attach = true;
317 }
318
Joerg Roedel3b03bb72011-06-09 18:53:25 +0200319 return dev_data;
320}
321
Baoquan Hedaae2d22017-08-09 16:33:43 +0800322struct iommu_dev_data *get_dev_data(struct device *dev)
Joerg Roedel657cbb62009-11-23 15:26:46 +0100323{
324 return dev->archdata.iommu;
325}
Baoquan Hedaae2d22017-08-09 16:33:43 +0800326EXPORT_SYMBOL(get_dev_data);
Joerg Roedel657cbb62009-11-23 15:26:46 +0100327
Wan Zongshunb097d112016-04-01 09:06:04 -0400328/*
329* Find or create an IOMMU group for a acpihid device.
330*/
331static struct iommu_group *acpihid_device_group(struct device *dev)
332{
333 struct acpihid_map_entry *p, *entry = NULL;
Dan Carpenter2d8e1f02016-04-11 10:14:46 +0300334 int devid;
Wan Zongshunb097d112016-04-01 09:06:04 -0400335
336 devid = get_acpihid_device_id(dev, &entry);
337 if (devid < 0)
338 return ERR_PTR(devid);
339
340 list_for_each_entry(p, &acpihid_map, list) {
341 if ((devid == p->devid) && p->group)
342 entry->group = p->group;
343 }
344
345 if (!entry->group)
346 entry->group = generic_device_group(dev);
Robin Murphyf2f101f2016-11-11 17:59:23 +0000347 else
348 iommu_group_ref_get(entry->group);
Wan Zongshunb097d112016-04-01 09:06:04 -0400349
350 return entry->group;
351}
352
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100353static bool pci_iommuv2_capable(struct pci_dev *pdev)
354{
355 static const int caps[] = {
356 PCI_EXT_CAP_ID_ATS,
Joerg Roedel46277b72011-12-07 14:34:02 +0100357 PCI_EXT_CAP_ID_PRI,
358 PCI_EXT_CAP_ID_PASID,
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100359 };
360 int i, pos;
361
362 for (i = 0; i < 3; ++i) {
363 pos = pci_find_ext_capability(pdev, caps[i]);
364 if (pos == 0)
365 return false;
366 }
367
368 return true;
369}
370
Joerg Roedel6a113dd2011-12-01 12:04:58 +0100371static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
372{
373 struct iommu_dev_data *dev_data;
374
375 dev_data = get_dev_data(&pdev->dev);
376
377 return dev_data->errata & (1 << erratum) ? true : false;
378}
379
Joerg Roedel71c70982009-11-24 16:43:06 +0100380/*
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100381 * This function checks if the driver got a valid device from the caller to
382 * avoid dereferencing invalid pointers.
383 */
384static bool check_device(struct device *dev)
385{
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400386 int devid;
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100387
388 if (!dev || !dev->dma_mask)
389 return false;
390
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100391 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +0200392 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400393 return false;
Joerg Roedel98fc5a62009-11-24 17:19:23 +0100394
395 /* Out of our scope? */
396 if (devid > amd_iommu_last_bdf)
397 return false;
398
399 if (amd_iommu_rlookup_table[devid] == NULL)
400 return false;
401
402 return true;
403}
404
Alex Williamson25b11ce2014-09-19 10:03:13 -0600405static void init_iommu_group(struct device *dev)
Alex Williamson2851db22012-10-08 22:49:41 -0600406{
Alex Williamson2851db22012-10-08 22:49:41 -0600407 struct iommu_group *group;
Alex Williamson2851db22012-10-08 22:49:41 -0600408
Alex Williamson65d53522014-07-03 09:51:30 -0600409 group = iommu_group_get_for_dev(dev);
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200410 if (IS_ERR(group))
411 return;
412
Joerg Roedel0bb6e242015-05-28 18:41:40 +0200413 iommu_group_put(group);
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600414}
415
416static int iommu_init_device(struct device *dev)
417{
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600418 struct iommu_dev_data *dev_data;
Joerg Roedel39ab9552017-02-01 16:56:46 +0100419 struct amd_iommu *iommu;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400420 int devid;
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600421
422 if (dev->archdata.iommu)
423 return 0;
424
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400425 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +0200426 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400427 return devid;
428
Joerg Roedel39ab9552017-02-01 16:56:46 +0100429 iommu = amd_iommu_rlookup_table[devid];
430
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400431 dev_data = find_dev_data(devid);
Alex Williamsoneb9c9522012-10-08 22:49:35 -0600432 if (!dev_data)
433 return -ENOMEM;
434
Joerg Roedele3156042016-04-08 15:12:24 +0200435 dev_data->alias = get_alias(dev);
436
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400437 if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100438 struct amd_iommu *iommu;
439
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -0400440 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel5abcdba2011-12-01 15:49:45 +0100441 dev_data->iommu_v2 = iommu->is_iommu_v2;
442 }
443
Joerg Roedel657cbb62009-11-23 15:26:46 +0100444 dev->archdata.iommu = dev_data;
445
Joerg Roedele3d10af2017-02-01 17:23:22 +0100446 iommu_device_link(&iommu->iommu, dev);
Alex Williamson066f2e92014-06-12 16:12:37 -0600447
Joerg Roedel657cbb62009-11-23 15:26:46 +0100448 return 0;
449}
450
Joerg Roedel26018872011-06-06 16:50:14 +0200451static void iommu_ignore_device(struct device *dev)
452{
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400453 u16 alias;
454 int devid;
Joerg Roedel26018872011-06-06 16:50:14 +0200455
456 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +0200457 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400458 return;
459
Joerg Roedele3156042016-04-08 15:12:24 +0200460 alias = get_alias(dev);
Joerg Roedel26018872011-06-06 16:50:14 +0200461
462 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
463 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
464
465 amd_iommu_rlookup_table[devid] = NULL;
466 amd_iommu_rlookup_table[alias] = NULL;
467}
468
Joerg Roedel657cbb62009-11-23 15:26:46 +0100469static void iommu_uninit_device(struct device *dev)
470{
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400471 struct iommu_dev_data *dev_data;
Joerg Roedel39ab9552017-02-01 16:56:46 +0100472 struct amd_iommu *iommu;
473 int devid;
Alex Williamsonc1931092014-07-03 09:51:24 -0600474
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400475 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +0200476 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400477 return;
478
Joerg Roedel39ab9552017-02-01 16:56:46 +0100479 iommu = amd_iommu_rlookup_table[devid];
480
Wan Zongshun7aba6cb2016-04-01 09:06:02 -0400481 dev_data = search_dev_data(devid);
Alex Williamsonc1931092014-07-03 09:51:24 -0600482 if (!dev_data)
483 return;
484
Joerg Roedelb6809ee2016-02-26 16:48:59 +0100485 if (dev_data->domain)
486 detach_device(dev);
487
Joerg Roedele3d10af2017-02-01 17:23:22 +0100488 iommu_device_unlink(&iommu->iommu, dev);
Alex Williamson066f2e92014-06-12 16:12:37 -0600489
Alex Williamson9dcd6132012-05-30 14:19:07 -0600490 iommu_group_remove_device(dev);
491
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200492 /* Remove dma-ops */
Bart Van Assche56579332017-01-20 13:04:02 -0800493 dev->dma_ops = NULL;
Joerg Roedelaafd8ba2015-05-28 18:41:39 +0200494
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200495 /*
Alex Williamsonc1931092014-07-03 09:51:24 -0600496 * We keep dev_data around for unplugged devices and reuse it when the
497 * device is re-plugged - not doing so would introduce a ton of races.
Joerg Roedel8fa5f802011-06-09 12:24:45 +0200498 */
Joerg Roedel657cbb62009-11-23 15:26:46 +0100499}
Joerg Roedelb7cc9552009-12-10 11:03:39 +0100500
Joerg Roedel431b2a22008-07-11 17:14:22 +0200501/****************************************************************************
502 *
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200503 * Interrupt handling functions
504 *
505 ****************************************************************************/
506
Joerg Roedele3e59872009-09-03 14:02:10 +0200507static void dump_dte_entry(u16 devid)
508{
509 int i;
510
Joerg Roedelee6c2862011-11-09 12:06:03 +0100511 for (i = 0; i < 4; ++i)
512 pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
Joerg Roedele3e59872009-09-03 14:02:10 +0200513 amd_iommu_dev_table[devid].data[i]);
514}
515
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200516static void dump_command(unsigned long phys_addr)
517{
Tom Lendacky2543a782017-07-17 16:10:24 -0500518 struct iommu_cmd *cmd = iommu_phys_to_virt(phys_addr);
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200519 int i;
520
521 for (i = 0; i < 4; ++i)
522 pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]);
523}
524
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200525static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
526 u64 address, int flags)
527{
528 struct iommu_dev_data *dev_data = NULL;
529 struct pci_dev *pdev;
530
Sinan Kayad5bf0f42017-12-19 00:37:47 -0500531 pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
532 devid & 0xff);
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200533 if (pdev)
534 dev_data = get_dev_data(&pdev->dev);
535
536 if (dev_data && __ratelimit(&dev_data->rs)) {
537 dev_err(&pdev->dev, "AMD-Vi: Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%016llx flags=0x%04x]\n",
538 domain_id, address, flags);
539 } else if (printk_ratelimit()) {
540 pr_err("AMD-Vi: Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n",
541 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
542 domain_id, address, flags);
543 }
544
545 if (pdev)
546 pci_dev_put(pdev);
547}
548
Joerg Roedela345b232009-09-03 15:01:43 +0200549static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
Joerg Roedel90008ee2008-09-09 16:41:05 +0200550{
Joerg Roedel3d06fca2012-04-12 14:12:00 +0200551 int type, devid, domid, flags;
552 volatile u32 *event = __evt;
553 int count = 0;
554 u64 address;
555
556retry:
557 type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
558 devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
559 domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
560 flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
561 address = (u64)(((u64)event[3]) << 32) | event[2];
562
563 if (type == 0) {
564 /* Did we hit the erratum? */
565 if (++count == LOOP_TIMEOUT) {
566 pr_err("AMD-Vi: No event written to event log\n");
567 return;
568 }
569 udelay(1);
570 goto retry;
571 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200572
Joerg Roedel30bf2df2017-05-15 16:25:03 +0200573 if (type == EVENT_TYPE_IO_FAULT) {
574 amd_iommu_report_page_fault(devid, domid, address, flags);
575 return;
576 } else {
577 printk(KERN_ERR "AMD-Vi: Event logged [");
578 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200579
580 switch (type) {
581 case EVENT_TYPE_ILL_DEV:
582 printk("ILLEGAL_DEV_TABLE_ENTRY device=%02x:%02x.%x "
583 "address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700584 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200585 address, flags);
Joerg Roedele3e59872009-09-03 14:02:10 +0200586 dump_dte_entry(devid);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200587 break;
Joerg Roedel90008ee2008-09-09 16:41:05 +0200588 case EVENT_TYPE_DEV_TAB_ERR:
589 printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
590 "address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700591 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200592 address, flags);
593 break;
594 case EVENT_TYPE_PAGE_TAB_ERR:
595 printk("PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x "
596 "domain=0x%04x address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700597 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200598 domid, address, flags);
599 break;
600 case EVENT_TYPE_ILL_CMD:
601 printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address);
Joerg Roedel945b4ac2009-09-03 14:25:02 +0200602 dump_command(address);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200603 break;
604 case EVENT_TYPE_CMD_HARD_ERR:
605 printk("COMMAND_HARDWARE_ERROR address=0x%016llx "
606 "flags=0x%04x]\n", address, flags);
607 break;
608 case EVENT_TYPE_IOTLB_INV_TO:
609 printk("IOTLB_INV_TIMEOUT device=%02x:%02x.%x "
610 "address=0x%016llx]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700611 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200612 address);
613 break;
614 case EVENT_TYPE_INV_DEV_REQ:
615 printk("INVALID_DEVICE_REQUEST device=%02x:%02x.%x "
616 "address=0x%016llx flags=0x%04x]\n",
Shuah Khanc5081cd2013-02-27 17:07:19 -0700617 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
Joerg Roedel90008ee2008-09-09 16:41:05 +0200618 address, flags);
619 break;
620 default:
Gary R Hookf9fc0492017-12-20 09:47:07 -0700621 printk(KERN_ERR "UNKNOWN type=0x%02x event[0]=0x%08x "
622 "event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
623 type, event[0], event[1], event[2], event[3]);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200624 }
Joerg Roedel3d06fca2012-04-12 14:12:00 +0200625
626 memset(__evt, 0, 4 * sizeof(u32));
Joerg Roedel90008ee2008-09-09 16:41:05 +0200627}
628
629static void iommu_poll_events(struct amd_iommu *iommu)
630{
631 u32 head, tail;
Joerg Roedel90008ee2008-09-09 16:41:05 +0200632
633 head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
634 tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
635
636 while (head != tail) {
Joerg Roedela345b232009-09-03 15:01:43 +0200637 iommu_print_event(iommu, iommu->evt_buf + head);
Joerg Roedeldeba4bc2015-10-20 17:33:41 +0200638 head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE;
Joerg Roedel90008ee2008-09-09 16:41:05 +0200639 }
640
641 writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200642}
643
Joerg Roedeleee53532012-06-01 15:20:23 +0200644static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100645{
646 struct amd_iommu_fault fault;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100647
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100648 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
649 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
650 return;
651 }
652
653 fault.address = raw[1];
654 fault.pasid = PPR_PASID(raw[0]);
655 fault.device_id = PPR_DEVID(raw[0]);
656 fault.tag = PPR_TAG(raw[0]);
657 fault.flags = PPR_FLAGS(raw[0]);
658
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100659 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
660}
661
662static void iommu_poll_ppr_log(struct amd_iommu *iommu)
663{
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100664 u32 head, tail;
665
666 if (iommu->ppr_log == NULL)
667 return;
668
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100669 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
670 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
671
672 while (head != tail) {
Joerg Roedeleee53532012-06-01 15:20:23 +0200673 volatile u64 *raw;
674 u64 entry[2];
675 int i;
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100676
Joerg Roedeleee53532012-06-01 15:20:23 +0200677 raw = (u64 *)(iommu->ppr_log + head);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100678
Joerg Roedeleee53532012-06-01 15:20:23 +0200679 /*
680 * Hardware bug: Interrupt may arrive before the entry is
681 * written to memory. If this happens we need to wait for the
682 * entry to arrive.
683 */
684 for (i = 0; i < LOOP_TIMEOUT; ++i) {
685 if (PPR_REQ_TYPE(raw[0]) != 0)
686 break;
687 udelay(1);
688 }
689
690 /* Avoid memcpy function-call overhead */
691 entry[0] = raw[0];
692 entry[1] = raw[1];
693
694 /*
695 * To detect the hardware bug we need to clear the entry
696 * back to zero.
697 */
698 raw[0] = raw[1] = 0UL;
699
700 /* Update head pointer of hardware ring-buffer */
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100701 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
702 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
Joerg Roedeleee53532012-06-01 15:20:23 +0200703
Joerg Roedeleee53532012-06-01 15:20:23 +0200704 /* Handle PPR entry */
705 iommu_handle_ppr_entry(iommu, entry);
706
Joerg Roedeleee53532012-06-01 15:20:23 +0200707 /* Refresh ring-buffer information */
708 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100709 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
710 }
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100711}
712
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500713#ifdef CONFIG_IRQ_REMAP
714static int (*iommu_ga_log_notifier)(u32);
715
716int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
717{
718 iommu_ga_log_notifier = notifier;
719
720 return 0;
721}
722EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
723
724static void iommu_poll_ga_log(struct amd_iommu *iommu)
725{
726 u32 head, tail, cnt = 0;
727
728 if (iommu->ga_log == NULL)
729 return;
730
731 head = readl(iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
732 tail = readl(iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
733
734 while (head != tail) {
735 volatile u64 *raw;
736 u64 log_entry;
737
738 raw = (u64 *)(iommu->ga_log + head);
739 cnt++;
740
741 /* Avoid memcpy function-call overhead */
742 log_entry = *raw;
743
744 /* Update head pointer of hardware ring-buffer */
745 head = (head + GA_ENTRY_SIZE) % GA_LOG_SIZE;
746 writel(head, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
747
748 /* Handle GA entry */
749 switch (GA_REQ_TYPE(log_entry)) {
750 case GA_GUEST_NR:
751 if (!iommu_ga_log_notifier)
752 break;
753
754 pr_debug("AMD-Vi: %s: devid=%#x, ga_tag=%#x\n",
755 __func__, GA_DEVID(log_entry),
756 GA_TAG(log_entry));
757
758 if (iommu_ga_log_notifier(GA_TAG(log_entry)) != 0)
759 pr_err("AMD-Vi: GA log notifier failed.\n");
760 break;
761 default:
762 break;
763 }
764 }
765}
766#endif /* CONFIG_IRQ_REMAP */
767
768#define AMD_IOMMU_INT_MASK \
769 (MMIO_STATUS_EVT_INT_MASK | \
770 MMIO_STATUS_PPR_INT_MASK | \
771 MMIO_STATUS_GALOG_INT_MASK)
772
Joerg Roedel72fe00f2011-05-10 10:50:42 +0200773irqreturn_t amd_iommu_int_thread(int irq, void *data)
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200774{
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500775 struct amd_iommu *iommu = (struct amd_iommu *) data;
776 u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
Joerg Roedel90008ee2008-09-09 16:41:05 +0200777
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500778 while (status & AMD_IOMMU_INT_MASK) {
779 /* Enable EVT and PPR and GA interrupts again */
780 writel(AMD_IOMMU_INT_MASK,
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500781 iommu->mmio_base + MMIO_STATUS_OFFSET);
782
783 if (status & MMIO_STATUS_EVT_INT_MASK) {
784 pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
785 iommu_poll_events(iommu);
786 }
787
788 if (status & MMIO_STATUS_PPR_INT_MASK) {
789 pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
790 iommu_poll_ppr_log(iommu);
791 }
792
Suravee Suthikulpanitbd6fcef2016-08-23 13:52:37 -0500793#ifdef CONFIG_IRQ_REMAP
794 if (status & MMIO_STATUS_GALOG_INT_MASK) {
795 pr_devel("AMD-Vi: Processing IOMMU GA Log\n");
796 iommu_poll_ga_log(iommu);
797 }
798#endif
799
Suravee Suthikulpanit3f398bc2013-04-22 16:32:34 -0500800 /*
801 * Hardware bug: ERBT1312
802 * When re-enabling interrupt (by writing 1
803 * to clear the bit), the hardware might also try to set
804 * the interrupt bit in the event status register.
805 * In this scenario, the bit will be set, and disable
806 * subsequent interrupts.
807 *
808 * Workaround: The IOMMU driver should read back the
809 * status register and check if the interrupt bits are cleared.
810 * If not, driver will need to go through the interrupt handler
811 * again and re-clear the bits
812 */
813 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
Joerg Roedel72e1dcc2011-11-10 19:13:51 +0100814 }
Joerg Roedel90008ee2008-09-09 16:41:05 +0200815 return IRQ_HANDLED;
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200816}
817
Joerg Roedel72fe00f2011-05-10 10:50:42 +0200818irqreturn_t amd_iommu_int_handler(int irq, void *data)
819{
820 return IRQ_WAKE_THREAD;
821}
822
Joerg Roedela80dc3e2008-09-11 16:51:41 +0200823/****************************************************************************
824 *
Joerg Roedel431b2a22008-07-11 17:14:22 +0200825 * IOMMU command queuing functions
826 *
827 ****************************************************************************/
828
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200829static int wait_on_sem(volatile u64 *sem)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200830{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200831 int i = 0;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200832
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200833 while (*sem == 0 && i < LOOP_TIMEOUT) {
834 udelay(1);
835 i += 1;
836 }
837
838 if (i == LOOP_TIMEOUT) {
839 pr_alert("AMD-Vi: Completion-Wait loop timed out\n");
840 return -EIO;
841 }
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200842
843 return 0;
844}
845
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200846static void copy_cmd_to_buffer(struct amd_iommu *iommu,
Tom Lendackyd334a562017-06-05 14:52:12 -0500847 struct iommu_cmd *cmd)
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200848{
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200849 u8 *target;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200850
Tom Lendackyd334a562017-06-05 14:52:12 -0500851 target = iommu->cmd_buf + iommu->cmd_buf_tail;
852
853 iommu->cmd_buf_tail += sizeof(*cmd);
854 iommu->cmd_buf_tail %= CMD_BUFFER_SIZE;
Joerg Roedela19ae1e2008-06-26 21:27:55 +0200855
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200856 /* Copy command to buffer */
857 memcpy(target, cmd, sizeof(*cmd));
858
859 /* Tell the IOMMU about it */
Tom Lendackyd334a562017-06-05 14:52:12 -0500860 writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
Joerg Roedelac0ea6e2011-04-06 18:38:20 +0200861}
862
Joerg Roedel815b33f2011-04-06 17:26:49 +0200863static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
Joerg Roedelded46732011-04-06 10:53:48 +0200864{
Tom Lendacky2543a782017-07-17 16:10:24 -0500865 u64 paddr = iommu_virt_to_phys((void *)address);
866
Joerg Roedel815b33f2011-04-06 17:26:49 +0200867 WARN_ON(address & 0x7ULL);
868
Joerg Roedelded46732011-04-06 10:53:48 +0200869 memset(cmd, 0, sizeof(*cmd));
Tom Lendacky2543a782017-07-17 16:10:24 -0500870 cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
871 cmd->data[1] = upper_32_bits(paddr);
Joerg Roedel815b33f2011-04-06 17:26:49 +0200872 cmd->data[2] = 1;
Joerg Roedelded46732011-04-06 10:53:48 +0200873 CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
874}
875
Joerg Roedel94fe79e2011-04-06 11:07:21 +0200876static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
877{
878 memset(cmd, 0, sizeof(*cmd));
879 cmd->data[0] = devid;
880 CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
881}
882
Joerg Roedel11b64022011-04-06 11:49:28 +0200883static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
884 size_t size, u16 domid, int pde)
885{
886 u64 pages;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100887 bool s;
Joerg Roedel11b64022011-04-06 11:49:28 +0200888
889 pages = iommu_num_pages(address, size, PAGE_SIZE);
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100890 s = false;
Joerg Roedel11b64022011-04-06 11:49:28 +0200891
892 if (pages > 1) {
893 /*
894 * If we have to flush more than one page, flush all
895 * TLB entries for this domain
896 */
897 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100898 s = true;
Joerg Roedel11b64022011-04-06 11:49:28 +0200899 }
900
901 address &= PAGE_MASK;
902
903 memset(cmd, 0, sizeof(*cmd));
904 cmd->data[1] |= domid;
905 cmd->data[2] = lower_32_bits(address);
906 cmd->data[3] = upper_32_bits(address);
907 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
908 if (s) /* size bit - we flush more than one 4kb page */
909 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
Frank Arnolddf805ab2012-08-27 19:21:04 +0200910 if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
Joerg Roedel11b64022011-04-06 11:49:28 +0200911 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
912}
913
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200914static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
915 u64 address, size_t size)
916{
917 u64 pages;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100918 bool s;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200919
920 pages = iommu_num_pages(address, size, PAGE_SIZE);
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100921 s = false;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200922
923 if (pages > 1) {
924 /*
925 * If we have to flush more than one page, flush all
926 * TLB entries for this domain
927 */
928 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
Quentin Lambertae0cbbb2015-02-04 11:40:07 +0100929 s = true;
Joerg Roedelcb41ed82011-04-05 11:00:53 +0200930 }
931
932 address &= PAGE_MASK;
933
934 memset(cmd, 0, sizeof(*cmd));
935 cmd->data[0] = devid;
936 cmd->data[0] |= (qdep & 0xff) << 24;
937 cmd->data[1] = devid;
938 cmd->data[2] = lower_32_bits(address);
939 cmd->data[3] = upper_32_bits(address);
940 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
941 if (s)
942 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
943}
944
Joerg Roedel22e266c2011-11-21 15:59:08 +0100945static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
946 u64 address, bool size)
947{
948 memset(cmd, 0, sizeof(*cmd));
949
950 address &= ~(0xfffULL);
951
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600952 cmd->data[0] = pasid;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100953 cmd->data[1] = domid;
954 cmd->data[2] = lower_32_bits(address);
955 cmd->data[3] = upper_32_bits(address);
956 cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
957 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
958 if (size)
959 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
960 CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
961}
962
963static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
964 int qdep, u64 address, bool size)
965{
966 memset(cmd, 0, sizeof(*cmd));
967
968 address &= ~(0xfffULL);
969
970 cmd->data[0] = devid;
Jay Cornwalle8d2d822014-02-26 15:49:31 -0600971 cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100972 cmd->data[0] |= (qdep & 0xff) << 24;
973 cmd->data[1] = devid;
Jay Cornwalle8d2d822014-02-26 15:49:31 -0600974 cmd->data[1] |= (pasid & 0xff) << 16;
Joerg Roedel22e266c2011-11-21 15:59:08 +0100975 cmd->data[2] = lower_32_bits(address);
976 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
977 cmd->data[3] = upper_32_bits(address);
978 if (size)
979 cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
980 CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
981}
982
Joerg Roedelc99afa22011-11-21 18:19:25 +0100983static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
984 int status, int tag, bool gn)
985{
986 memset(cmd, 0, sizeof(*cmd));
987
988 cmd->data[0] = devid;
989 if (gn) {
Suravee Suthikulpanita919a012014-03-05 18:54:18 -0600990 cmd->data[1] = pasid;
Joerg Roedelc99afa22011-11-21 18:19:25 +0100991 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
992 }
993 cmd->data[3] = tag & 0x1ff;
994 cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
995
996 CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
997}
998
Joerg Roedel58fc7f12011-04-11 11:13:24 +0200999static void build_inv_all(struct iommu_cmd *cmd)
1000{
1001 memset(cmd, 0, sizeof(*cmd));
1002 CMD_SET_TYPE(cmd, CMD_INV_ALL);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001003}
1004
Joerg Roedel7ef27982012-06-21 16:46:04 +02001005static void build_inv_irt(struct iommu_cmd *cmd, u16 devid)
1006{
1007 memset(cmd, 0, sizeof(*cmd));
1008 cmd->data[0] = devid;
1009 CMD_SET_TYPE(cmd, CMD_INV_IRT);
1010}
1011
Joerg Roedel431b2a22008-07-11 17:14:22 +02001012/*
Joerg Roedelb6c02712008-06-26 21:27:53 +02001013 * Writes the command to the IOMMUs command buffer and informs the
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001014 * hardware about the new command.
Joerg Roedel431b2a22008-07-11 17:14:22 +02001015 */
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001016static int __iommu_queue_command_sync(struct amd_iommu *iommu,
1017 struct iommu_cmd *cmd,
1018 bool sync)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001019{
Tom Lendacky23e967e2017-06-05 14:52:26 -05001020 unsigned int count = 0;
Tom Lendackyd334a562017-06-05 14:52:12 -05001021 u32 left, next_tail;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001022
Tom Lendackyd334a562017-06-05 14:52:12 -05001023 next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001024again:
Tom Lendackyd334a562017-06-05 14:52:12 -05001025 left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001026
Huang Rui432abf62016-12-12 07:28:26 -05001027 if (left <= 0x20) {
Tom Lendacky23e967e2017-06-05 14:52:26 -05001028 /* Skip udelay() the first time around */
1029 if (count++) {
1030 if (count == LOOP_TIMEOUT) {
1031 pr_err("AMD-Vi: Command buffer timeout\n");
1032 return -EIO;
1033 }
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001034
Tom Lendacky23e967e2017-06-05 14:52:26 -05001035 udelay(1);
Tom Lendackyd334a562017-06-05 14:52:12 -05001036 }
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001037
Tom Lendacky23e967e2017-06-05 14:52:26 -05001038 /* Update head and recheck remaining space */
1039 iommu->cmd_buf_head = readl(iommu->mmio_base +
1040 MMIO_CMD_HEAD_OFFSET);
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001041
1042 goto again;
Joerg Roedel136f78a2008-07-11 17:14:27 +02001043 }
1044
Tom Lendackyd334a562017-06-05 14:52:12 -05001045 copy_cmd_to_buffer(iommu, cmd);
Joerg Roedel519c31b2008-08-14 19:55:15 +02001046
Tom Lendacky23e967e2017-06-05 14:52:26 -05001047 /* Do we need to make sure all commands are processed? */
Joerg Roedelf1ca1512011-09-02 14:10:32 +02001048 iommu->need_sync = sync;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001049
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001050 return 0;
1051}
1052
1053static int iommu_queue_command_sync(struct amd_iommu *iommu,
1054 struct iommu_cmd *cmd,
1055 bool sync)
1056{
1057 unsigned long flags;
1058 int ret;
1059
1060 spin_lock_irqsave(&iommu->lock, flags);
1061 ret = __iommu_queue_command_sync(iommu, cmd, sync);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001062 spin_unlock_irqrestore(&iommu->lock, flags);
1063
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001064 return ret;
Joerg Roedel8d201962008-12-02 20:34:41 +01001065}
1066
Joerg Roedelf1ca1512011-09-02 14:10:32 +02001067static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
1068{
1069 return iommu_queue_command_sync(iommu, cmd, true);
1070}
1071
Joerg Roedel8d201962008-12-02 20:34:41 +01001072/*
1073 * This function queues a completion wait command into the command
1074 * buffer of an IOMMU
1075 */
Joerg Roedel8d201962008-12-02 20:34:41 +01001076static int iommu_completion_wait(struct amd_iommu *iommu)
1077{
Joerg Roedel815b33f2011-04-06 17:26:49 +02001078 struct iommu_cmd cmd;
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001079 unsigned long flags;
Joerg Roedelac0ea6e2011-04-06 18:38:20 +02001080 int ret;
Joerg Roedel8d201962008-12-02 20:34:41 +01001081
1082 if (!iommu->need_sync)
Joerg Roedel815b33f2011-04-06 17:26:49 +02001083 return 0;
Joerg Roedel8d201962008-12-02 20:34:41 +01001084
Joerg Roedel8d201962008-12-02 20:34:41 +01001085
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001086 build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
1087
1088 spin_lock_irqsave(&iommu->lock, flags);
1089
1090 iommu->cmd_sem = 0;
1091
1092 ret = __iommu_queue_command_sync(iommu, &cmd, false);
Joerg Roedel8d201962008-12-02 20:34:41 +01001093 if (ret)
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001094 goto out_unlock;
Joerg Roedel8d201962008-12-02 20:34:41 +01001095
Joerg Roedel4bf5bee2016-09-14 11:41:59 +02001096 ret = wait_on_sem(&iommu->cmd_sem);
1097
1098out_unlock:
1099 spin_unlock_irqrestore(&iommu->lock, flags);
1100
1101 return ret;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001102}
1103
Joerg Roedeld8c13082011-04-06 18:51:26 +02001104static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001105{
1106 struct iommu_cmd cmd;
1107
Joerg Roedeld8c13082011-04-06 18:51:26 +02001108 build_inv_dte(&cmd, devid);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001109
Joerg Roedeld8c13082011-04-06 18:51:26 +02001110 return iommu_queue_command(iommu, &cmd);
1111}
1112
Joerg Roedel0688a092017-08-23 15:50:03 +02001113static void amd_iommu_flush_dte_all(struct amd_iommu *iommu)
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001114{
1115 u32 devid;
1116
1117 for (devid = 0; devid <= 0xffff; ++devid)
1118 iommu_flush_dte(iommu, devid);
1119
1120 iommu_completion_wait(iommu);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001121}
1122
1123/*
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001124 * This function uses heavy locking and may disable irqs for some time. But
1125 * this is no issue because it is only called during resume.
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001126 */
Joerg Roedel0688a092017-08-23 15:50:03 +02001127static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001128{
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001129 u32 dom_id;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001130
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001131 for (dom_id = 0; dom_id <= 0xffff; ++dom_id) {
1132 struct iommu_cmd cmd;
1133 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1134 dom_id, 1);
1135 iommu_queue_command(iommu, &cmd);
1136 }
Joerg Roedel431b2a22008-07-11 17:14:22 +02001137
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001138 iommu_completion_wait(iommu);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001139}
1140
Joerg Roedel0688a092017-08-23 15:50:03 +02001141static void amd_iommu_flush_all(struct amd_iommu *iommu)
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001142{
1143 struct iommu_cmd cmd;
1144
1145 build_inv_all(&cmd);
1146
1147 iommu_queue_command(iommu, &cmd);
1148 iommu_completion_wait(iommu);
1149}
1150
Joerg Roedel7ef27982012-06-21 16:46:04 +02001151static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid)
1152{
1153 struct iommu_cmd cmd;
1154
1155 build_inv_irt(&cmd, devid);
1156
1157 iommu_queue_command(iommu, &cmd);
1158}
1159
Joerg Roedel0688a092017-08-23 15:50:03 +02001160static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
Joerg Roedel7ef27982012-06-21 16:46:04 +02001161{
1162 u32 devid;
1163
1164 for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++)
1165 iommu_flush_irt(iommu, devid);
1166
1167 iommu_completion_wait(iommu);
1168}
1169
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001170void iommu_flush_all_caches(struct amd_iommu *iommu)
1171{
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001172 if (iommu_feature(iommu, FEATURE_IA)) {
Joerg Roedel0688a092017-08-23 15:50:03 +02001173 amd_iommu_flush_all(iommu);
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001174 } else {
Joerg Roedel0688a092017-08-23 15:50:03 +02001175 amd_iommu_flush_dte_all(iommu);
1176 amd_iommu_flush_irt_all(iommu);
1177 amd_iommu_flush_tlb_all(iommu);
Joerg Roedel58fc7f12011-04-11 11:13:24 +02001178 }
Joerg Roedel7d0c5cc2011-04-07 08:16:10 +02001179}
1180
Joerg Roedel431b2a22008-07-11 17:14:22 +02001181/*
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001182 * Command send function for flushing on-device TLB
1183 */
Joerg Roedel6c542042011-06-09 17:07:31 +02001184static int device_flush_iotlb(struct iommu_dev_data *dev_data,
1185 u64 address, size_t size)
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001186{
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001187 struct amd_iommu *iommu;
1188 struct iommu_cmd cmd;
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001189 int qdep;
1190
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001191 qdep = dev_data->ats.qdep;
1192 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001193
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001194 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001195
1196 return iommu_queue_command(iommu, &cmd);
1197}
1198
1199/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001200 * Command send function for invalidating a device table entry
1201 */
Joerg Roedel6c542042011-06-09 17:07:31 +02001202static int device_flush_dte(struct iommu_dev_data *dev_data)
Joerg Roedel3fa43652009-11-26 15:04:38 +01001203{
1204 struct amd_iommu *iommu;
Joerg Roedele25bfb52015-10-20 17:33:38 +02001205 u16 alias;
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001206 int ret;
Joerg Roedel3fa43652009-11-26 15:04:38 +01001207
Joerg Roedel6c542042011-06-09 17:07:31 +02001208 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedele3156042016-04-08 15:12:24 +02001209 alias = dev_data->alias;
Joerg Roedel3fa43652009-11-26 15:04:38 +01001210
Joerg Roedelf62dda62011-06-09 12:55:35 +02001211 ret = iommu_flush_dte(iommu, dev_data->devid);
Joerg Roedele25bfb52015-10-20 17:33:38 +02001212 if (!ret && alias != dev_data->devid)
1213 ret = iommu_flush_dte(iommu, alias);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001214 if (ret)
1215 return ret;
1216
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001217 if (dev_data->ats.enabled)
Joerg Roedel6c542042011-06-09 17:07:31 +02001218 ret = device_flush_iotlb(dev_data, 0, ~0UL);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001219
1220 return ret;
Joerg Roedel3fa43652009-11-26 15:04:38 +01001221}
1222
Joerg Roedel431b2a22008-07-11 17:14:22 +02001223/*
1224 * TLB invalidation function which is called from the mapping functions.
1225 * It invalidates a single PTE if the range to flush is within a single
1226 * page. Otherwise it flushes the whole TLB of the IOMMU.
1227 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001228static void __domain_flush_pages(struct protection_domain *domain,
1229 u64 address, size_t size, int pde)
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001230{
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001231 struct iommu_dev_data *dev_data;
Joerg Roedel11b64022011-04-06 11:49:28 +02001232 struct iommu_cmd cmd;
1233 int ret = 0, i;
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001234
Joerg Roedel11b64022011-04-06 11:49:28 +02001235 build_inv_iommu_pages(&cmd, address, size, domain->id, pde);
Joerg Roedel999ba412008-07-03 19:35:08 +02001236
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06001237 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001238 if (!domain->dev_iommu[i])
1239 continue;
1240
1241 /*
1242 * Devices of this domain are behind this IOMMU
1243 * We need a TLB flush
1244 */
Joerg Roedel11b64022011-04-06 11:49:28 +02001245 ret |= iommu_queue_command(amd_iommus[i], &cmd);
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001246 }
1247
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001248 list_for_each_entry(dev_data, &domain->dev_list, list) {
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001249
Joerg Roedelea61cdd2011-06-09 12:56:30 +02001250 if (!dev_data->ats.enabled)
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001251 continue;
1252
Joerg Roedel6c542042011-06-09 17:07:31 +02001253 ret |= device_flush_iotlb(dev_data, address, size);
Joerg Roedelcb41ed82011-04-05 11:00:53 +02001254 }
1255
Joerg Roedel11b64022011-04-06 11:49:28 +02001256 WARN_ON(ret);
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001257}
1258
Joerg Roedel17b124b2011-04-06 18:01:35 +02001259static void domain_flush_pages(struct protection_domain *domain,
1260 u64 address, size_t size)
Joerg Roedel6de8ad92009-11-23 18:30:32 +01001261{
Joerg Roedel17b124b2011-04-06 18:01:35 +02001262 __domain_flush_pages(domain, address, size, 0);
Joerg Roedela19ae1e2008-06-26 21:27:55 +02001263}
Joerg Roedelb6c02712008-06-26 21:27:53 +02001264
Joerg Roedel1c655772008-09-04 18:40:05 +02001265/* Flush the whole IO/TLB for a given protection domain */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001266static void domain_flush_tlb(struct protection_domain *domain)
Joerg Roedel1c655772008-09-04 18:40:05 +02001267{
Joerg Roedel17b124b2011-04-06 18:01:35 +02001268 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
Joerg Roedel1c655772008-09-04 18:40:05 +02001269}
1270
Chris Wright42a49f92009-06-15 15:42:00 +02001271/* Flush the whole IO/TLB for a given protection domain - including PDE */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001272static void domain_flush_tlb_pde(struct protection_domain *domain)
Chris Wright42a49f92009-06-15 15:42:00 +02001273{
Joerg Roedel17b124b2011-04-06 18:01:35 +02001274 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
1275}
1276
1277static void domain_flush_complete(struct protection_domain *domain)
Joerg Roedelb6c02712008-06-26 21:27:53 +02001278{
1279 int i;
1280
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06001281 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
Joerg Roedelf1eae7c2016-07-06 12:50:35 +02001282 if (domain && !domain->dev_iommu[i])
Joerg Roedelb6c02712008-06-26 21:27:53 +02001283 continue;
1284
1285 /*
1286 * Devices of this domain are behind this IOMMU
1287 * We need to wait for completion of all commands.
1288 */
1289 iommu_completion_wait(amd_iommus[i]);
1290 }
1291}
1292
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001293
Joerg Roedel43f49602008-12-02 21:01:12 +01001294/*
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001295 * This function flushes the DTEs for all devices in domain
Joerg Roedel43f49602008-12-02 21:01:12 +01001296 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02001297static void domain_flush_devices(struct protection_domain *domain)
Joerg Roedelbfd1be12009-05-05 15:33:57 +02001298{
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001299 struct iommu_dev_data *dev_data;
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001300
1301 list_for_each_entry(dev_data, &domain->dev_list, list)
Joerg Roedel6c542042011-06-09 17:07:31 +02001302 device_flush_dte(dev_data);
Joerg Roedelb00d3bc2009-11-26 15:35:33 +01001303}
1304
Joerg Roedel431b2a22008-07-11 17:14:22 +02001305/****************************************************************************
1306 *
1307 * The functions below are used the create the page table mappings for
1308 * unity mapped regions.
1309 *
1310 ****************************************************************************/
1311
1312/*
Joerg Roedel308973d2009-11-24 17:43:32 +01001313 * This function is used to add another level to an IO page table. Adding
1314 * another level increases the size of the address space by 9 bits to a size up
1315 * to 64 bits.
1316 */
1317static bool increase_address_space(struct protection_domain *domain,
1318 gfp_t gfp)
1319{
1320 u64 *pte;
1321
1322 if (domain->mode == PAGE_MODE_6_LEVEL)
1323 /* address space already 64 bit large */
1324 return false;
1325
1326 pte = (void *)get_zeroed_page(gfp);
1327 if (!pte)
1328 return false;
1329
1330 *pte = PM_LEVEL_PDE(domain->mode,
Tom Lendacky2543a782017-07-17 16:10:24 -05001331 iommu_virt_to_phys(domain->pt_root));
Joerg Roedel308973d2009-11-24 17:43:32 +01001332 domain->pt_root = pte;
1333 domain->mode += 1;
1334 domain->updated = true;
1335
1336 return true;
1337}
1338
1339static u64 *alloc_pte(struct protection_domain *domain,
1340 unsigned long address,
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001341 unsigned long page_size,
Joerg Roedel308973d2009-11-24 17:43:32 +01001342 u64 **pte_page,
1343 gfp_t gfp)
1344{
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001345 int level, end_lvl;
Joerg Roedel308973d2009-11-24 17:43:32 +01001346 u64 *pte, *page;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001347
1348 BUG_ON(!is_power_of_2(page_size));
Joerg Roedel308973d2009-11-24 17:43:32 +01001349
1350 while (address > PM_LEVEL_SIZE(domain->mode))
1351 increase_address_space(domain, gfp);
1352
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001353 level = domain->mode - 1;
1354 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1355 address = PAGE_SIZE_ALIGN(address, page_size);
1356 end_lvl = PAGE_SIZE_LEVEL(page_size);
Joerg Roedel308973d2009-11-24 17:43:32 +01001357
1358 while (level > end_lvl) {
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001359 u64 __pte, __npte;
1360
1361 __pte = *pte;
1362
1363 if (!IOMMU_PTE_PRESENT(__pte)) {
Joerg Roedel308973d2009-11-24 17:43:32 +01001364 page = (u64 *)get_zeroed_page(gfp);
1365 if (!page)
1366 return NULL;
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001367
Tom Lendacky2543a782017-07-17 16:10:24 -05001368 __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001369
Baoquan He134414f2016-09-15 16:50:50 +08001370 /* pte could have been changed somewhere. */
1371 if (cmpxchg64(pte, __pte, __npte) != __pte) {
Joerg Roedel7bfa5bd2015-12-21 19:07:50 +01001372 free_page((unsigned long)page);
1373 continue;
1374 }
Joerg Roedel308973d2009-11-24 17:43:32 +01001375 }
1376
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001377 /* No level skipping support yet */
1378 if (PM_PTE_LEVEL(*pte) != level)
1379 return NULL;
1380
Joerg Roedel308973d2009-11-24 17:43:32 +01001381 level -= 1;
1382
1383 pte = IOMMU_PTE_PAGE(*pte);
1384
1385 if (pte_page && level == end_lvl)
1386 *pte_page = pte;
1387
1388 pte = &pte[PM_LEVEL_INDEX(level, address)];
1389 }
1390
1391 return pte;
1392}
1393
1394/*
1395 * This function checks if there is a PTE for a given dma address. If
1396 * there is one, it returns the pointer to it.
1397 */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001398static u64 *fetch_pte(struct protection_domain *domain,
1399 unsigned long address,
1400 unsigned long *page_size)
Joerg Roedel308973d2009-11-24 17:43:32 +01001401{
1402 int level;
1403 u64 *pte;
1404
Joerg Roedel24cd7722010-01-19 17:27:39 +01001405 if (address > PM_LEVEL_SIZE(domain->mode))
1406 return NULL;
Joerg Roedel308973d2009-11-24 17:43:32 +01001407
Joerg Roedel3039ca12015-04-01 14:58:48 +02001408 level = domain->mode - 1;
1409 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
1410 *page_size = PTE_LEVEL_PAGE_SIZE(level);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001411
1412 while (level > 0) {
1413
1414 /* Not Present */
Joerg Roedel308973d2009-11-24 17:43:32 +01001415 if (!IOMMU_PTE_PRESENT(*pte))
1416 return NULL;
1417
Joerg Roedel24cd7722010-01-19 17:27:39 +01001418 /* Large PTE */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001419 if (PM_PTE_LEVEL(*pte) == 7 ||
1420 PM_PTE_LEVEL(*pte) == 0)
1421 break;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001422
1423 /* No level skipping support yet */
1424 if (PM_PTE_LEVEL(*pte) != level)
1425 return NULL;
1426
Joerg Roedel308973d2009-11-24 17:43:32 +01001427 level -= 1;
1428
Joerg Roedel24cd7722010-01-19 17:27:39 +01001429 /* Walk to the next level */
Joerg Roedel3039ca12015-04-01 14:58:48 +02001430 pte = IOMMU_PTE_PAGE(*pte);
1431 pte = &pte[PM_LEVEL_INDEX(level, address)];
1432 *page_size = PTE_LEVEL_PAGE_SIZE(level);
1433 }
1434
1435 if (PM_PTE_LEVEL(*pte) == 0x07) {
1436 unsigned long pte_mask;
1437
1438 /*
1439 * If we have a series of large PTEs, make
1440 * sure to return a pointer to the first one.
1441 */
1442 *page_size = pte_mask = PTE_PAGE_SIZE(*pte);
1443 pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
1444 pte = (u64 *)(((unsigned long)pte) & pte_mask);
Joerg Roedel308973d2009-11-24 17:43:32 +01001445 }
1446
1447 return pte;
1448}
1449
1450/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02001451 * Generic mapping functions. It maps a physical address into a DMA
1452 * address space. It allocates the page table pages if necessary.
1453 * In the future it can be extended to a generic mapping function
1454 * supporting all features of AMD IOMMU page tables like level skipping
1455 * and full 64 bit address spaces.
1456 */
Joerg Roedel38e817f2008-12-02 17:27:52 +01001457static int iommu_map_page(struct protection_domain *dom,
1458 unsigned long bus_addr,
1459 unsigned long phys_addr,
Joerg Roedelb911b892016-07-05 14:29:11 +02001460 unsigned long page_size,
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001461 int prot,
Joerg Roedelb911b892016-07-05 14:29:11 +02001462 gfp_t gfp)
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001463{
Joerg Roedel8bda3092009-05-12 12:02:46 +02001464 u64 __pte, *pte;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001465 int i, count;
Joerg Roedelabdc5eb2009-09-03 11:33:51 +02001466
Joerg Roedeld4b03662015-04-01 14:58:52 +02001467 BUG_ON(!IS_ALIGNED(bus_addr, page_size));
1468 BUG_ON(!IS_ALIGNED(phys_addr, page_size));
1469
Joerg Roedelbad1cac2009-09-02 16:52:23 +02001470 if (!(prot & IOMMU_PROT_MASK))
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001471 return -EINVAL;
1472
Joerg Roedeld4b03662015-04-01 14:58:52 +02001473 count = PAGE_SIZE_PTE_COUNT(page_size);
Joerg Roedelb911b892016-07-05 14:29:11 +02001474 pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp);
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001475
Maurizio Lombardi63eaa752014-09-11 12:28:03 +02001476 if (!pte)
1477 return -ENOMEM;
1478
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001479 for (i = 0; i < count; ++i)
1480 if (IOMMU_PTE_PRESENT(pte[i]))
1481 return -EBUSY;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001482
Joerg Roedeld4b03662015-04-01 14:58:52 +02001483 if (count > 1) {
Tom Lendacky2543a782017-07-17 16:10:24 -05001484 __pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
Baoquan He07a80a62017-08-09 16:33:36 +08001485 __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001486 } else
Linus Torvalds4dfc2782017-09-09 15:03:24 -07001487 __pte = __sme_set(phys_addr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001488
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001489 if (prot & IOMMU_PROT_IR)
1490 __pte |= IOMMU_PTE_IR;
1491 if (prot & IOMMU_PROT_IW)
1492 __pte |= IOMMU_PTE_IW;
1493
Joerg Roedelcbb9d722010-01-15 14:41:15 +01001494 for (i = 0; i < count; ++i)
1495 pte[i] = __pte;
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001496
Joerg Roedel04bfdd82009-09-02 16:00:23 +02001497 update_domain(dom);
1498
Joerg Roedelbd0e5212008-06-26 21:27:56 +02001499 return 0;
1500}
1501
Joerg Roedel24cd7722010-01-19 17:27:39 +01001502static unsigned long iommu_unmap_page(struct protection_domain *dom,
1503 unsigned long bus_addr,
1504 unsigned long page_size)
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001505{
Joerg Roedel71b390e2015-04-01 14:58:49 +02001506 unsigned long long unmapped;
1507 unsigned long unmap_size;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001508 u64 *pte;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001509
Joerg Roedel24cd7722010-01-19 17:27:39 +01001510 BUG_ON(!is_power_of_2(page_size));
1511
1512 unmapped = 0;
1513
1514 while (unmapped < page_size) {
1515
Joerg Roedel71b390e2015-04-01 14:58:49 +02001516 pte = fetch_pte(dom, bus_addr, &unmap_size);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001517
Joerg Roedel71b390e2015-04-01 14:58:49 +02001518 if (pte) {
1519 int i, count;
Joerg Roedel24cd7722010-01-19 17:27:39 +01001520
Joerg Roedel71b390e2015-04-01 14:58:49 +02001521 count = PAGE_SIZE_PTE_COUNT(unmap_size);
Joerg Roedel24cd7722010-01-19 17:27:39 +01001522 for (i = 0; i < count; i++)
1523 pte[i] = 0ULL;
1524 }
1525
1526 bus_addr = (bus_addr & ~(unmap_size - 1)) + unmap_size;
1527 unmapped += unmap_size;
1528 }
1529
Alex Williamson60d0ca32013-06-21 14:33:19 -06001530 BUG_ON(unmapped && !is_power_of_2(unmapped));
Joerg Roedel24cd7722010-01-19 17:27:39 +01001531
1532 return unmapped;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001533}
Joerg Roedeleb74ff62008-12-02 19:59:10 +01001534
Joerg Roedel431b2a22008-07-11 17:14:22 +02001535/****************************************************************************
1536 *
1537 * The next functions belong to the address allocator for the dma_ops
Joerg Roedel2d4c5152016-07-05 16:21:32 +02001538 * interface functions.
Joerg Roedel431b2a22008-07-11 17:14:22 +02001539 *
1540 ****************************************************************************/
Joerg Roedeld3086442008-06-26 21:27:57 +02001541
Joerg Roedel9cabe892009-05-18 16:38:55 +02001542
Joerg Roedel256e4622016-07-05 14:23:01 +02001543static unsigned long dma_ops_alloc_iova(struct device *dev,
1544 struct dma_ops_domain *dma_dom,
1545 unsigned int pages, u64 dma_mask)
Joerg Roedela0f51442015-12-21 16:20:09 +01001546{
Joerg Roedel256e4622016-07-05 14:23:01 +02001547 unsigned long pfn = 0;
Joerg Roedela0f51442015-12-21 16:20:09 +01001548
Joerg Roedel256e4622016-07-05 14:23:01 +02001549 pages = __roundup_pow_of_two(pages);
Joerg Roedela0f51442015-12-21 16:20:09 +01001550
Joerg Roedel256e4622016-07-05 14:23:01 +02001551 if (dma_mask > DMA_BIT_MASK(32))
1552 pfn = alloc_iova_fast(&dma_dom->iovad, pages,
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02001553 IOVA_PFN(DMA_BIT_MASK(32)), false);
Joerg Roedel7b5e25b2015-12-22 13:38:12 +01001554
Joerg Roedel256e4622016-07-05 14:23:01 +02001555 if (!pfn)
Tomasz Nowicki538d5b32017-09-20 10:52:02 +02001556 pfn = alloc_iova_fast(&dma_dom->iovad, pages,
1557 IOVA_PFN(dma_mask), true);
Joerg Roedel60e6a7c2015-12-21 16:53:17 +01001558
Joerg Roedel256e4622016-07-05 14:23:01 +02001559 return (pfn << PAGE_SHIFT);
Joerg Roedela0f51442015-12-21 16:20:09 +01001560}
1561
Joerg Roedel256e4622016-07-05 14:23:01 +02001562static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
1563 unsigned long address,
1564 unsigned int pages)
Joerg Roedel384de722009-05-15 12:30:05 +02001565{
Joerg Roedel256e4622016-07-05 14:23:01 +02001566 pages = __roundup_pow_of_two(pages);
1567 address >>= PAGE_SHIFT;
Joerg Roedel5f6bed52015-12-22 13:34:22 +01001568
Joerg Roedel256e4622016-07-05 14:23:01 +02001569 free_iova_fast(&dma_dom->iovad, address, pages);
Joerg Roedeld3086442008-06-26 21:27:57 +02001570}
1571
Joerg Roedel431b2a22008-07-11 17:14:22 +02001572/****************************************************************************
1573 *
1574 * The next functions belong to the domain allocation. A domain is
1575 * allocated for every IOMMU as the default domain. If device isolation
1576 * is enabled, every device get its own domain. The most important thing
1577 * about domains is the page table mapping the DMA address space they
1578 * contain.
1579 *
1580 ****************************************************************************/
1581
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001582/*
1583 * This function adds a protection domain to the global protection domain list
1584 */
1585static void add_domain_to_list(struct protection_domain *domain)
1586{
1587 unsigned long flags;
1588
1589 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1590 list_add(&domain->list, &amd_iommu_pd_list);
1591 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1592}
1593
1594/*
1595 * This function removes a protection domain to the global
1596 * protection domain list
1597 */
1598static void del_domain_from_list(struct protection_domain *domain)
1599{
1600 unsigned long flags;
1601
1602 spin_lock_irqsave(&amd_iommu_pd_lock, flags);
1603 list_del(&domain->list);
1604 spin_unlock_irqrestore(&amd_iommu_pd_lock, flags);
1605}
1606
Joerg Roedelec487d12008-06-26 21:27:58 +02001607static u16 domain_id_alloc(void)
1608{
1609 unsigned long flags;
1610 int id;
1611
1612 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1613 id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
1614 BUG_ON(id == 0);
1615 if (id > 0 && id < MAX_DOMAIN_ID)
1616 __set_bit(id, amd_iommu_pd_alloc_bitmap);
1617 else
1618 id = 0;
1619 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1620
1621 return id;
1622}
1623
Joerg Roedela2acfb72008-12-02 18:28:53 +01001624static void domain_id_free(int id)
1625{
1626 unsigned long flags;
1627
1628 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1629 if (id > 0 && id < MAX_DOMAIN_ID)
1630 __clear_bit(id, amd_iommu_pd_alloc_bitmap);
1631 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1632}
Joerg Roedela2acfb72008-12-02 18:28:53 +01001633
Joerg Roedel5c34c402013-06-20 20:22:58 +02001634#define DEFINE_FREE_PT_FN(LVL, FN) \
1635static void free_pt_##LVL (unsigned long __pt) \
1636{ \
1637 unsigned long p; \
1638 u64 *pt; \
1639 int i; \
1640 \
1641 pt = (u64 *)__pt; \
1642 \
1643 for (i = 0; i < 512; ++i) { \
Joerg Roedel0b3fff52015-06-18 10:48:34 +02001644 /* PTE present? */ \
Joerg Roedel5c34c402013-06-20 20:22:58 +02001645 if (!IOMMU_PTE_PRESENT(pt[i])) \
1646 continue; \
1647 \
Joerg Roedel0b3fff52015-06-18 10:48:34 +02001648 /* Large PTE? */ \
1649 if (PM_PTE_LEVEL(pt[i]) == 0 || \
1650 PM_PTE_LEVEL(pt[i]) == 7) \
1651 continue; \
1652 \
Joerg Roedel5c34c402013-06-20 20:22:58 +02001653 p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
1654 FN(p); \
1655 } \
1656 free_page((unsigned long)pt); \
1657}
1658
1659DEFINE_FREE_PT_FN(l2, free_page)
1660DEFINE_FREE_PT_FN(l3, free_pt_l2)
1661DEFINE_FREE_PT_FN(l4, free_pt_l3)
1662DEFINE_FREE_PT_FN(l5, free_pt_l4)
1663DEFINE_FREE_PT_FN(l6, free_pt_l5)
1664
Joerg Roedel86db2e52008-12-02 18:20:21 +01001665static void free_pagetable(struct protection_domain *domain)
Joerg Roedelec487d12008-06-26 21:27:58 +02001666{
Joerg Roedel5c34c402013-06-20 20:22:58 +02001667 unsigned long root = (unsigned long)domain->pt_root;
Joerg Roedelec487d12008-06-26 21:27:58 +02001668
Joerg Roedel5c34c402013-06-20 20:22:58 +02001669 switch (domain->mode) {
1670 case PAGE_MODE_NONE:
1671 break;
1672 case PAGE_MODE_1_LEVEL:
1673 free_page(root);
1674 break;
1675 case PAGE_MODE_2_LEVEL:
1676 free_pt_l2(root);
1677 break;
1678 case PAGE_MODE_3_LEVEL:
1679 free_pt_l3(root);
1680 break;
1681 case PAGE_MODE_4_LEVEL:
1682 free_pt_l4(root);
1683 break;
1684 case PAGE_MODE_5_LEVEL:
1685 free_pt_l5(root);
1686 break;
1687 case PAGE_MODE_6_LEVEL:
1688 free_pt_l6(root);
1689 break;
1690 default:
1691 BUG();
Joerg Roedelec487d12008-06-26 21:27:58 +02001692 }
Joerg Roedelec487d12008-06-26 21:27:58 +02001693}
1694
Joerg Roedelb16137b2011-11-21 16:50:23 +01001695static void free_gcr3_tbl_level1(u64 *tbl)
1696{
1697 u64 *ptr;
1698 int i;
1699
1700 for (i = 0; i < 512; ++i) {
1701 if (!(tbl[i] & GCR3_VALID))
1702 continue;
1703
Tom Lendacky2543a782017-07-17 16:10:24 -05001704 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
Joerg Roedelb16137b2011-11-21 16:50:23 +01001705
1706 free_page((unsigned long)ptr);
1707 }
1708}
1709
1710static void free_gcr3_tbl_level2(u64 *tbl)
1711{
1712 u64 *ptr;
1713 int i;
1714
1715 for (i = 0; i < 512; ++i) {
1716 if (!(tbl[i] & GCR3_VALID))
1717 continue;
1718
Tom Lendacky2543a782017-07-17 16:10:24 -05001719 ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
Joerg Roedelb16137b2011-11-21 16:50:23 +01001720
1721 free_gcr3_tbl_level1(ptr);
1722 }
1723}
1724
Joerg Roedel52815b72011-11-17 17:24:28 +01001725static void free_gcr3_table(struct protection_domain *domain)
1726{
Joerg Roedelb16137b2011-11-21 16:50:23 +01001727 if (domain->glx == 2)
1728 free_gcr3_tbl_level2(domain->gcr3_tbl);
1729 else if (domain->glx == 1)
1730 free_gcr3_tbl_level1(domain->gcr3_tbl);
Joerg Roedel23d3a982015-08-13 11:15:13 +02001731 else
1732 BUG_ON(domain->glx != 0);
Joerg Roedelb16137b2011-11-21 16:50:23 +01001733
Joerg Roedel52815b72011-11-17 17:24:28 +01001734 free_page((unsigned long)domain->gcr3_tbl);
1735}
1736
Joerg Roedelfca6af62017-06-02 18:13:37 +02001737static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom)
1738{
Joerg Roedelfca6af62017-06-02 18:13:37 +02001739 domain_flush_tlb(&dom->domain);
1740 domain_flush_complete(&dom->domain);
Joerg Roedelfca6af62017-06-02 18:13:37 +02001741}
1742
Joerg Roedel9003d612017-08-10 17:19:13 +02001743static void iova_domain_flush_tlb(struct iova_domain *iovad)
Joerg Roedelfd621902017-06-02 15:37:26 +02001744{
Joerg Roedel9003d612017-08-10 17:19:13 +02001745 struct dma_ops_domain *dom;
Joerg Roedele241f8e2017-06-02 15:44:57 +02001746
Joerg Roedel9003d612017-08-10 17:19:13 +02001747 dom = container_of(iovad, struct dma_ops_domain, iovad);
Joerg Roedelfca6af62017-06-02 18:13:37 +02001748
1749 dma_ops_domain_flush_tlb(dom);
Joerg Roedelfca6af62017-06-02 18:13:37 +02001750}
1751
Joerg Roedel431b2a22008-07-11 17:14:22 +02001752/*
1753 * Free a domain, only used if something went wrong in the
1754 * allocation path and we need to free an already allocated page table
1755 */
Joerg Roedelec487d12008-06-26 21:27:58 +02001756static void dma_ops_domain_free(struct dma_ops_domain *dom)
1757{
1758 if (!dom)
1759 return;
1760
Joerg Roedelaeb26f52009-11-20 16:44:01 +01001761 del_domain_from_list(&dom->domain);
1762
Joerg Roedel2d4c5152016-07-05 16:21:32 +02001763 put_iova_domain(&dom->iovad);
1764
Joerg Roedel86db2e52008-12-02 18:20:21 +01001765 free_pagetable(&dom->domain);
Joerg Roedelec487d12008-06-26 21:27:58 +02001766
Baoquan Hec3db9012016-09-15 16:50:52 +08001767 if (dom->domain.id)
1768 domain_id_free(dom->domain.id);
1769
Joerg Roedelec487d12008-06-26 21:27:58 +02001770 kfree(dom);
1771}
1772
Joerg Roedel431b2a22008-07-11 17:14:22 +02001773/*
1774 * Allocates a new protection domain usable for the dma_ops functions.
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001775 * It also initializes the page table and the address allocator data
Joerg Roedel431b2a22008-07-11 17:14:22 +02001776 * structures required for the dma_ops interface
1777 */
Joerg Roedel87a64d52009-11-24 17:26:43 +01001778static struct dma_ops_domain *dma_ops_domain_alloc(void)
Joerg Roedelec487d12008-06-26 21:27:58 +02001779{
1780 struct dma_ops_domain *dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001781
1782 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
1783 if (!dma_dom)
1784 return NULL;
1785
Joerg Roedel7a5a5662015-06-30 08:56:11 +02001786 if (protection_domain_init(&dma_dom->domain))
Joerg Roedelec487d12008-06-26 21:27:58 +02001787 goto free_dma_dom;
Joerg Roedel7a5a5662015-06-30 08:56:11 +02001788
Joerg Roedelffec2192016-07-26 15:31:23 +02001789 dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
Joerg Roedelec487d12008-06-26 21:27:58 +02001790 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
Joerg Roedel9fdb19d2008-12-02 17:46:25 +01001791 dma_dom->domain.flags = PD_DMA_OPS_MASK;
Joerg Roedelec487d12008-06-26 21:27:58 +02001792 if (!dma_dom->domain.pt_root)
1793 goto free_dma_dom;
Joerg Roedelec487d12008-06-26 21:27:58 +02001794
Zhen Leiaa3ac942017-09-21 16:52:45 +01001795 init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
Joerg Roedel307d5852016-07-05 11:54:04 +02001796
Joerg Roedel9003d612017-08-10 17:19:13 +02001797 if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
Joerg Roedeld4241a22017-06-02 14:55:56 +02001798 goto free_dma_dom;
1799
Joerg Roedel9003d612017-08-10 17:19:13 +02001800 /* Initialize reserved ranges */
1801 copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad);
Joerg Roedelfca6af62017-06-02 18:13:37 +02001802
Joerg Roedel2d4c5152016-07-05 16:21:32 +02001803 add_domain_to_list(&dma_dom->domain);
1804
Joerg Roedelec487d12008-06-26 21:27:58 +02001805 return dma_dom;
1806
1807free_dma_dom:
1808 dma_ops_domain_free(dma_dom);
1809
1810 return NULL;
1811}
1812
Joerg Roedel431b2a22008-07-11 17:14:22 +02001813/*
Joerg Roedel5b28df62008-12-02 17:49:42 +01001814 * little helper function to check whether a given protection domain is a
1815 * dma_ops domain
1816 */
1817static bool dma_ops_domain(struct protection_domain *domain)
1818{
1819 return domain->flags & PD_DMA_OPS_MASK;
1820}
1821
Gary R Hookff18c4e2017-12-20 09:47:08 -07001822static void set_dte_entry(u16 devid, struct protection_domain *domain,
1823 bool ats, bool ppr)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001824{
Joerg Roedel132bd682011-11-17 14:18:46 +01001825 u64 pte_root = 0;
Joerg Roedelee6c2862011-11-09 12:06:03 +01001826 u64 flags = 0;
Joerg Roedel863c74e2008-12-02 17:56:36 +01001827
Joerg Roedel132bd682011-11-17 14:18:46 +01001828 if (domain->mode != PAGE_MODE_NONE)
Tom Lendacky2543a782017-07-17 16:10:24 -05001829 pte_root = iommu_virt_to_phys(domain->pt_root);
Joerg Roedel132bd682011-11-17 14:18:46 +01001830
Joerg Roedel38ddf412008-09-11 10:38:32 +02001831 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
1832 << DEV_ENTRY_MODE_SHIFT;
Baoquan He07a80a62017-08-09 16:33:36 +08001833 pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V | DTE_FLAG_TV;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001834
Joerg Roedelee6c2862011-11-09 12:06:03 +01001835 flags = amd_iommu_dev_table[devid].data[1];
1836
Joerg Roedelfd7b5532011-04-05 15:31:08 +02001837 if (ats)
1838 flags |= DTE_FLAG_IOTLB;
1839
Gary R Hookff18c4e2017-12-20 09:47:08 -07001840 if (ppr) {
1841 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1842
1843 if (iommu_feature(iommu, FEATURE_EPHSUP))
1844 pte_root |= 1ULL << DEV_ENTRY_PPR;
1845 }
1846
Joerg Roedel52815b72011-11-17 17:24:28 +01001847 if (domain->flags & PD_IOMMUV2_MASK) {
Tom Lendacky2543a782017-07-17 16:10:24 -05001848 u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
Joerg Roedel52815b72011-11-17 17:24:28 +01001849 u64 glx = domain->glx;
1850 u64 tmp;
1851
1852 pte_root |= DTE_FLAG_GV;
1853 pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
1854
1855 /* First mask out possible old values for GCR3 table */
1856 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
1857 flags &= ~tmp;
1858
1859 tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
1860 flags &= ~tmp;
1861
1862 /* Encode GCR3 table into DTE */
1863 tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
1864 pte_root |= tmp;
1865
1866 tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
1867 flags |= tmp;
1868
1869 tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
1870 flags |= tmp;
1871 }
1872
Baoquan He45a01c42017-08-09 16:33:37 +08001873 flags &= ~DEV_DOMID_MASK;
Joerg Roedelee6c2862011-11-09 12:06:03 +01001874 flags |= domain->id;
1875
1876 amd_iommu_dev_table[devid].data[1] = flags;
1877 amd_iommu_dev_table[devid].data[0] = pte_root;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02001878}
1879
Joerg Roedel15898bb2009-11-24 15:39:42 +01001880static void clear_dte_entry(u16 devid)
Joerg Roedel355bf552008-12-08 12:02:41 +01001881{
Joerg Roedel355bf552008-12-08 12:02:41 +01001882 /* remove entry from the device table seen by the hardware */
Baoquan He07a80a62017-08-09 16:33:36 +08001883 amd_iommu_dev_table[devid].data[0] = DTE_FLAG_V | DTE_FLAG_TV;
Joerg Roedelcbf3ccd2015-10-20 14:59:36 +02001884 amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
Joerg Roedel355bf552008-12-08 12:02:41 +01001885
Joerg Roedelc5cca142009-10-09 18:31:20 +02001886 amd_iommu_apply_erratum_63(devid);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001887}
1888
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001889static void do_attach(struct iommu_dev_data *dev_data,
1890 struct protection_domain *domain)
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001891{
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001892 struct amd_iommu *iommu;
Joerg Roedele25bfb52015-10-20 17:33:38 +02001893 u16 alias;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001894 bool ats;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001895
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001896 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedele3156042016-04-08 15:12:24 +02001897 alias = dev_data->alias;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001898 ats = dev_data->ats.enabled;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001899
1900 /* Update data structures */
1901 dev_data->domain = domain;
1902 list_add(&dev_data->list, &domain->dev_list);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001903
1904 /* Do reference counting */
1905 domain->dev_iommu[iommu->index] += 1;
1906 domain->dev_cnt += 1;
1907
Joerg Roedele25bfb52015-10-20 17:33:38 +02001908 /* Update device table */
Gary R Hookff18c4e2017-12-20 09:47:08 -07001909 set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
Joerg Roedele25bfb52015-10-20 17:33:38 +02001910 if (alias != dev_data->devid)
Gary R Hookff18c4e2017-12-20 09:47:08 -07001911 set_dte_entry(alias, domain, ats, dev_data->iommu_v2);
Joerg Roedele25bfb52015-10-20 17:33:38 +02001912
Joerg Roedel6c542042011-06-09 17:07:31 +02001913 device_flush_dte(dev_data);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001914}
1915
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001916static void do_detach(struct iommu_dev_data *dev_data)
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001917{
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001918 struct amd_iommu *iommu;
Joerg Roedele25bfb52015-10-20 17:33:38 +02001919 u16 alias;
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001920
Joerg Roedel5adad992015-10-09 16:23:33 +02001921 /*
1922 * First check if the device is still attached. It might already
1923 * be detached from its domain because the generic
1924 * iommu_detach_group code detached it and we try again here in
1925 * our alias handling.
1926 */
1927 if (!dev_data->domain)
1928 return;
1929
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001930 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedele3156042016-04-08 15:12:24 +02001931 alias = dev_data->alias;
Joerg Roedelc5cca142009-10-09 18:31:20 +02001932
Joerg Roedelc4596112009-11-20 14:57:32 +01001933 /* decrease reference counters */
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001934 dev_data->domain->dev_iommu[iommu->index] -= 1;
1935 dev_data->domain->dev_cnt -= 1;
Joerg Roedel355bf552008-12-08 12:02:41 +01001936
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001937 /* Update data structures */
1938 dev_data->domain = NULL;
1939 list_del(&dev_data->list);
Joerg Roedelf62dda62011-06-09 12:55:35 +02001940 clear_dte_entry(dev_data->devid);
Joerg Roedele25bfb52015-10-20 17:33:38 +02001941 if (alias != dev_data->devid)
1942 clear_dte_entry(alias);
Joerg Roedel7f760dd2009-11-26 14:49:59 +01001943
1944 /* Flush the DTE entry */
Joerg Roedel6c542042011-06-09 17:07:31 +02001945 device_flush_dte(dev_data);
Joerg Roedel15898bb2009-11-24 15:39:42 +01001946}
1947
1948/*
1949 * If a device is not yet associated with a domain, this function does
1950 * assigns it visible for the hardware
1951 */
Joerg Roedelec9e79e2011-06-09 17:25:50 +02001952static int __attach_device(struct iommu_dev_data *dev_data,
Joerg Roedel15898bb2009-11-24 15:39:42 +01001953 struct protection_domain *domain)
1954{
Julia Lawall84fe6c12010-05-27 12:31:51 +02001955 int ret;
Joerg Roedel657cbb62009-11-23 15:26:46 +01001956
Joerg Roedel272e4f92015-10-20 17:33:37 +02001957 /*
1958 * Must be called with IRQs disabled. Warn here to detect early
1959 * when its not.
1960 */
1961 WARN_ON(!irqs_disabled());
1962
Joerg Roedel15898bb2009-11-24 15:39:42 +01001963 /* lock domain */
1964 spin_lock(&domain->lock);
1965
Joerg Roedel397111a2014-08-05 17:31:51 +02001966 ret = -EBUSY;
Joerg Roedel150952f2015-10-20 17:33:35 +02001967 if (dev_data->domain != NULL)
Joerg Roedel397111a2014-08-05 17:31:51 +02001968 goto out_unlock;
Joerg Roedel24100052009-11-25 15:59:57 +01001969
Joerg Roedel397111a2014-08-05 17:31:51 +02001970 /* Attach alias group root */
Joerg Roedel150952f2015-10-20 17:33:35 +02001971 do_attach(dev_data, domain);
Joerg Roedel24100052009-11-25 15:59:57 +01001972
Julia Lawall84fe6c12010-05-27 12:31:51 +02001973 ret = 0;
1974
1975out_unlock:
1976
Joerg Roedel355bf552008-12-08 12:02:41 +01001977 /* ready */
1978 spin_unlock(&domain->lock);
Joerg Roedel21129f72009-09-01 11:59:42 +02001979
Julia Lawall84fe6c12010-05-27 12:31:51 +02001980 return ret;
Joerg Roedel15898bb2009-11-24 15:39:42 +01001981}
1982
Joerg Roedel52815b72011-11-17 17:24:28 +01001983
1984static void pdev_iommuv2_disable(struct pci_dev *pdev)
1985{
1986 pci_disable_ats(pdev);
1987 pci_disable_pri(pdev);
1988 pci_disable_pasid(pdev);
1989}
1990
Joerg Roedel6a113dd2011-12-01 12:04:58 +01001991/* FIXME: Change generic reset-function to do the same */
1992static int pri_reset_while_enabled(struct pci_dev *pdev)
1993{
1994 u16 control;
1995 int pos;
1996
Joerg Roedel46277b72011-12-07 14:34:02 +01001997 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01001998 if (!pos)
1999 return -EINVAL;
2000
Joerg Roedel46277b72011-12-07 14:34:02 +01002001 pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
2002 control |= PCI_PRI_CTRL_RESET;
2003 pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002004
2005 return 0;
2006}
2007
Joerg Roedel52815b72011-11-17 17:24:28 +01002008static int pdev_iommuv2_enable(struct pci_dev *pdev)
2009{
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002010 bool reset_enable;
2011 int reqs, ret;
2012
2013 /* FIXME: Hardcode number of outstanding requests for now */
2014 reqs = 32;
2015 if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
2016 reqs = 1;
2017 reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
Joerg Roedel52815b72011-11-17 17:24:28 +01002018
2019 /* Only allow access to user-accessible pages */
2020 ret = pci_enable_pasid(pdev, 0);
2021 if (ret)
2022 goto out_err;
2023
2024 /* First reset the PRI state of the device */
2025 ret = pci_reset_pri(pdev);
2026 if (ret)
2027 goto out_err;
2028
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002029 /* Enable PRI */
2030 ret = pci_enable_pri(pdev, reqs);
Joerg Roedel52815b72011-11-17 17:24:28 +01002031 if (ret)
2032 goto out_err;
2033
Joerg Roedel6a113dd2011-12-01 12:04:58 +01002034 if (reset_enable) {
2035 ret = pri_reset_while_enabled(pdev);
2036 if (ret)
2037 goto out_err;
2038 }
2039
Joerg Roedel52815b72011-11-17 17:24:28 +01002040 ret = pci_enable_ats(pdev, PAGE_SHIFT);
2041 if (ret)
2042 goto out_err;
2043
2044 return 0;
2045
2046out_err:
2047 pci_disable_pri(pdev);
2048 pci_disable_pasid(pdev);
2049
2050 return ret;
2051}
2052
Joerg Roedelc99afa22011-11-21 18:19:25 +01002053/* FIXME: Move this to PCI code */
Joerg Roedela3b93122012-04-12 12:49:26 +02002054#define PCI_PRI_TLP_OFF (1 << 15)
Joerg Roedelc99afa22011-11-21 18:19:25 +01002055
Joerg Roedel98f1ad22012-07-06 13:28:37 +02002056static bool pci_pri_tlp_required(struct pci_dev *pdev)
Joerg Roedelc99afa22011-11-21 18:19:25 +01002057{
Joerg Roedela3b93122012-04-12 12:49:26 +02002058 u16 status;
Joerg Roedelc99afa22011-11-21 18:19:25 +01002059 int pos;
2060
Joerg Roedel46277b72011-12-07 14:34:02 +01002061 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
Joerg Roedelc99afa22011-11-21 18:19:25 +01002062 if (!pos)
2063 return false;
2064
Joerg Roedela3b93122012-04-12 12:49:26 +02002065 pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
Joerg Roedelc99afa22011-11-21 18:19:25 +01002066
Joerg Roedela3b93122012-04-12 12:49:26 +02002067 return (status & PCI_PRI_TLP_OFF) ? true : false;
Joerg Roedelc99afa22011-11-21 18:19:25 +01002068}
2069
Joerg Roedel15898bb2009-11-24 15:39:42 +01002070/*
Frank Arnolddf805ab2012-08-27 19:21:04 +02002071 * If a device is not yet associated with a domain, this function
Joerg Roedel15898bb2009-11-24 15:39:42 +01002072 * assigns it visible for the hardware
2073 */
2074static int attach_device(struct device *dev,
2075 struct protection_domain *domain)
2076{
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002077 struct pci_dev *pdev;
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002078 struct iommu_dev_data *dev_data;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002079 unsigned long flags;
2080 int ret;
2081
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002082 dev_data = get_dev_data(dev);
2083
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002084 if (!dev_is_pci(dev))
2085 goto skip_ats_check;
2086
2087 pdev = to_pci_dev(dev);
Joerg Roedel52815b72011-11-17 17:24:28 +01002088 if (domain->flags & PD_IOMMUV2_MASK) {
Joerg Roedel02ca2022015-07-28 16:58:49 +02002089 if (!dev_data->passthrough)
Joerg Roedel52815b72011-11-17 17:24:28 +01002090 return -EINVAL;
2091
Joerg Roedel02ca2022015-07-28 16:58:49 +02002092 if (dev_data->iommu_v2) {
2093 if (pdev_iommuv2_enable(pdev) != 0)
2094 return -EINVAL;
Joerg Roedel52815b72011-11-17 17:24:28 +01002095
Joerg Roedel02ca2022015-07-28 16:58:49 +02002096 dev_data->ats.enabled = true;
2097 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2098 dev_data->pri_tlp = pci_pri_tlp_required(pdev);
2099 }
Joerg Roedel52815b72011-11-17 17:24:28 +01002100 } else if (amd_iommu_iotlb_sup &&
2101 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002102 dev_data->ats.enabled = true;
2103 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2104 }
Joerg Roedelfd7b5532011-04-05 15:31:08 +02002105
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002106skip_ats_check:
Joerg Roedel15898bb2009-11-24 15:39:42 +01002107 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002108 ret = __attach_device(dev_data, domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002109 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2110
2111 /*
2112 * We might boot into a crash-kernel here. The crashed kernel
2113 * left the caches in the IOMMU dirty. So we have to flush
2114 * here to evict all dirty stuff.
2115 */
Joerg Roedel17b124b2011-04-06 18:01:35 +02002116 domain_flush_tlb_pde(domain);
Joerg Roedel15898bb2009-11-24 15:39:42 +01002117
2118 return ret;
2119}
2120
2121/*
2122 * Removes a device from a protection domain (unlocked)
2123 */
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002124static void __detach_device(struct iommu_dev_data *dev_data)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002125{
Joerg Roedel2ca76272010-01-22 16:45:31 +01002126 struct protection_domain *domain;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002127
Joerg Roedel272e4f92015-10-20 17:33:37 +02002128 /*
2129 * Must be called with IRQs disabled. Warn here to detect early
2130 * when its not.
2131 */
2132 WARN_ON(!irqs_disabled());
2133
Joerg Roedelf34c73f2015-10-20 17:33:34 +02002134 if (WARN_ON(!dev_data->domain))
2135 return;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002136
Joerg Roedel2ca76272010-01-22 16:45:31 +01002137 domain = dev_data->domain;
2138
Joerg Roedelf1dd0a82015-10-20 17:33:36 +02002139 spin_lock(&domain->lock);
Joerg Roedel24100052009-11-25 15:59:57 +01002140
Joerg Roedel150952f2015-10-20 17:33:35 +02002141 do_detach(dev_data);
Joerg Roedel71f77582011-06-09 19:03:15 +02002142
Joerg Roedelf1dd0a82015-10-20 17:33:36 +02002143 spin_unlock(&domain->lock);
Joerg Roedel355bf552008-12-08 12:02:41 +01002144}
2145
2146/*
2147 * Removes a device from a protection domain (with devtable_lock held)
2148 */
Joerg Roedel15898bb2009-11-24 15:39:42 +01002149static void detach_device(struct device *dev)
Joerg Roedel355bf552008-12-08 12:02:41 +01002150{
Joerg Roedel52815b72011-11-17 17:24:28 +01002151 struct protection_domain *domain;
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002152 struct iommu_dev_data *dev_data;
Joerg Roedel355bf552008-12-08 12:02:41 +01002153 unsigned long flags;
2154
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002155 dev_data = get_dev_data(dev);
Joerg Roedel52815b72011-11-17 17:24:28 +01002156 domain = dev_data->domain;
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002157
Joerg Roedel355bf552008-12-08 12:02:41 +01002158 /* lock device table */
2159 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
Joerg Roedelec9e79e2011-06-09 17:25:50 +02002160 __detach_device(dev_data);
Joerg Roedel355bf552008-12-08 12:02:41 +01002161 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
Joerg Roedelfd7b5532011-04-05 15:31:08 +02002162
Wan Zongshun2bf9a0a2016-04-01 09:06:03 -04002163 if (!dev_is_pci(dev))
2164 return;
2165
Joerg Roedel02ca2022015-07-28 16:58:49 +02002166 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
Joerg Roedel52815b72011-11-17 17:24:28 +01002167 pdev_iommuv2_disable(to_pci_dev(dev));
2168 else if (dev_data->ats.enabled)
Joerg Roedelea61cdd2011-06-09 12:56:30 +02002169 pci_disable_ats(to_pci_dev(dev));
Joerg Roedel52815b72011-11-17 17:24:28 +01002170
2171 dev_data->ats.enabled = false;
Joerg Roedel355bf552008-12-08 12:02:41 +01002172}
Joerg Roedele275a2a2008-12-10 18:27:25 +01002173
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002174static int amd_iommu_add_device(struct device *dev)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002175{
Joerg Roedel71f77582011-06-09 19:03:15 +02002176 struct iommu_dev_data *dev_data;
Joerg Roedel07ee8692015-05-28 18:41:42 +02002177 struct iommu_domain *domain;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002178 struct amd_iommu *iommu;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002179 int ret, devid;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002180
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002181 if (!check_device(dev) || get_dev_data(dev))
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002182 return 0;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002183
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002184 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02002185 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002186 return devid;
2187
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002188 iommu = amd_iommu_rlookup_table[devid];
Joerg Roedele275a2a2008-12-10 18:27:25 +01002189
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002190 ret = iommu_init_device(dev);
Joerg Roedel4d58b8a2015-06-11 09:21:39 +02002191 if (ret) {
2192 if (ret != -ENOTSUPP)
2193 pr_err("Failed to initialize device %s - trying to proceed anyway\n",
2194 dev_name(dev));
Joerg Roedel657cbb62009-11-23 15:26:46 +01002195
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002196 iommu_ignore_device(dev);
Christoph Hellwigfec777c2018-03-19 11:38:15 +01002197 dev->dma_ops = &dma_direct_ops;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002198 goto out;
2199 }
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002200 init_iommu_group(dev);
Joerg Roedele275a2a2008-12-10 18:27:25 +01002201
Joerg Roedel07ee8692015-05-28 18:41:42 +02002202 dev_data = get_dev_data(dev);
Joerg Roedel4d58b8a2015-06-11 09:21:39 +02002203
2204 BUG_ON(!dev_data);
2205
Joerg Roedel1e6a7b02015-07-28 16:58:48 +02002206 if (iommu_pass_through || dev_data->iommu_v2)
Joerg Roedel07ee8692015-05-28 18:41:42 +02002207 iommu_request_dm_for_dev(dev);
2208
2209 /* Domains are initialized for this device - have a look what we ended up with */
2210 domain = iommu_get_domain_for_dev(dev);
Joerg Roedel32302322015-07-28 16:58:50 +02002211 if (domain->type == IOMMU_DOMAIN_IDENTITY)
Joerg Roedel07ee8692015-05-28 18:41:42 +02002212 dev_data->passthrough = true;
Joerg Roedel32302322015-07-28 16:58:50 +02002213 else
Bart Van Assche56579332017-01-20 13:04:02 -08002214 dev->dma_ops = &amd_iommu_dma_ops;
Joerg Roedele275a2a2008-12-10 18:27:25 +01002215
2216out:
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002217 iommu_completion_wait(iommu);
2218
Joerg Roedele275a2a2008-12-10 18:27:25 +01002219 return 0;
2220}
2221
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002222static void amd_iommu_remove_device(struct device *dev)
Joerg Roedel8638c492009-12-10 11:12:25 +01002223{
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002224 struct amd_iommu *iommu;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002225 int devid;
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002226
2227 if (!check_device(dev))
2228 return;
2229
2230 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02002231 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002232 return;
2233
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02002234 iommu = amd_iommu_rlookup_table[devid];
2235
2236 iommu_uninit_device(dev);
2237 iommu_completion_wait(iommu);
Joerg Roedel8638c492009-12-10 11:12:25 +01002238}
2239
Wan Zongshunb097d112016-04-01 09:06:04 -04002240static struct iommu_group *amd_iommu_device_group(struct device *dev)
2241{
2242 if (dev_is_pci(dev))
2243 return pci_device_group(dev);
2244
2245 return acpihid_device_group(dev);
2246}
2247
Joerg Roedel431b2a22008-07-11 17:14:22 +02002248/*****************************************************************************
2249 *
2250 * The next functions belong to the dma_ops mapping/unmapping code.
2251 *
2252 *****************************************************************************/
2253
2254/*
2255 * In the dma_ops path we only have the struct device. This function
2256 * finds the corresponding IOMMU, the protection domain and the
2257 * requestor id for a given device.
2258 * If the device is not yet associated with a domain this is also done
2259 * in this function.
2260 */
Joerg Roedel94f6d192009-11-24 16:40:02 +01002261static struct protection_domain *get_domain(struct device *dev)
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002262{
Joerg Roedel94f6d192009-11-24 16:40:02 +01002263 struct protection_domain *domain;
Baoquan Hedf3f7a62017-08-09 16:33:41 +08002264 struct iommu_domain *io_domain;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002265
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002266 if (!check_device(dev))
Joerg Roedel94f6d192009-11-24 16:40:02 +01002267 return ERR_PTR(-EINVAL);
Joerg Roedeldbcc1122008-09-04 15:04:26 +02002268
Joerg Roedeld26592a2016-07-07 15:31:13 +02002269 domain = get_dev_data(dev)->domain;
Baoquan Hedf3f7a62017-08-09 16:33:41 +08002270 if (domain == NULL && get_dev_data(dev)->defer_attach) {
2271 get_dev_data(dev)->defer_attach = false;
2272 io_domain = iommu_get_domain_for_dev(dev);
2273 domain = to_pdomain(io_domain);
2274 attach_device(dev, domain);
2275 }
Baoquan Heec62b1a2017-08-24 21:13:57 +08002276 if (domain == NULL)
2277 return ERR_PTR(-EBUSY);
2278
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002279 if (!dma_ops_domain(domain))
Joerg Roedel94f6d192009-11-24 16:40:02 +01002280 return ERR_PTR(-EBUSY);
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002281
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002282 return domain;
Joerg Roedelb20ac0d2008-06-26 21:27:59 +02002283}
2284
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002285static void update_device_table(struct protection_domain *domain)
2286{
Joerg Roedel492667d2009-11-27 13:25:47 +01002287 struct iommu_dev_data *dev_data;
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002288
Joerg Roedel3254de62016-07-26 15:18:54 +02002289 list_for_each_entry(dev_data, &domain->dev_list, list) {
Gary R Hookff18c4e2017-12-20 09:47:08 -07002290 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
2291 dev_data->iommu_v2);
Joerg Roedel3254de62016-07-26 15:18:54 +02002292
2293 if (dev_data->devid == dev_data->alias)
2294 continue;
2295
2296 /* There is an alias, update device table entry for it */
Gary R Hookff18c4e2017-12-20 09:47:08 -07002297 set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled,
2298 dev_data->iommu_v2);
Joerg Roedel3254de62016-07-26 15:18:54 +02002299 }
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002300}
2301
2302static void update_domain(struct protection_domain *domain)
2303{
2304 if (!domain->updated)
2305 return;
2306
2307 update_device_table(domain);
Joerg Roedel17b124b2011-04-06 18:01:35 +02002308
2309 domain_flush_devices(domain);
2310 domain_flush_tlb_pde(domain);
Joerg Roedel04bfdd82009-09-02 16:00:23 +02002311
2312 domain->updated = false;
2313}
2314
Joerg Roedelf37f7f32016-07-08 11:47:22 +02002315static int dir2prot(enum dma_data_direction direction)
2316{
2317 if (direction == DMA_TO_DEVICE)
2318 return IOMMU_PROT_IR;
2319 else if (direction == DMA_FROM_DEVICE)
2320 return IOMMU_PROT_IW;
2321 else if (direction == DMA_BIDIRECTIONAL)
2322 return IOMMU_PROT_IW | IOMMU_PROT_IR;
2323 else
2324 return 0;
2325}
Baoquan Hedaae2d22017-08-09 16:33:43 +08002326
Joerg Roedel431b2a22008-07-11 17:14:22 +02002327/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02002328 * This function contains common code for mapping of a physically
Joerg Roedel24f81162008-12-08 14:25:39 +01002329 * contiguous memory region into DMA address space. It is used by all
2330 * mapping functions provided with this IOMMU driver.
Joerg Roedel431b2a22008-07-11 17:14:22 +02002331 * Must be called with the domain lock held.
2332 */
Joerg Roedelcb76c322008-06-26 21:28:00 +02002333static dma_addr_t __map_single(struct device *dev,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002334 struct dma_ops_domain *dma_dom,
2335 phys_addr_t paddr,
2336 size_t size,
Joerg Roedelf37f7f32016-07-08 11:47:22 +02002337 enum dma_data_direction direction,
Joerg Roedel832a90c2008-09-18 15:54:23 +02002338 u64 dma_mask)
Joerg Roedelcb76c322008-06-26 21:28:00 +02002339{
2340 dma_addr_t offset = paddr & ~PAGE_MASK;
Joerg Roedel53812c12009-05-12 12:17:38 +02002341 dma_addr_t address, start, ret;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002342 unsigned int pages;
Joerg Roedel518d9b42016-07-05 14:39:47 +02002343 int prot = 0;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002344 int i;
2345
Joerg Roedele3c449f2008-10-15 22:02:11 -07002346 pages = iommu_num_pages(paddr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002347 paddr &= PAGE_MASK;
2348
Joerg Roedel256e4622016-07-05 14:23:01 +02002349 address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
Christoph Hellwiga8695722017-05-21 13:26:45 +02002350 if (address == AMD_IOMMU_MAPPING_ERROR)
Joerg Roedel266a3bd2015-12-21 18:54:24 +01002351 goto out;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002352
Joerg Roedelf37f7f32016-07-08 11:47:22 +02002353 prot = dir2prot(direction);
Joerg Roedel518d9b42016-07-05 14:39:47 +02002354
Joerg Roedelcb76c322008-06-26 21:28:00 +02002355 start = address;
2356 for (i = 0; i < pages; ++i) {
Joerg Roedel518d9b42016-07-05 14:39:47 +02002357 ret = iommu_map_page(&dma_dom->domain, start, paddr,
2358 PAGE_SIZE, prot, GFP_ATOMIC);
2359 if (ret)
Joerg Roedel53812c12009-05-12 12:17:38 +02002360 goto out_unmap;
2361
Joerg Roedelcb76c322008-06-26 21:28:00 +02002362 paddr += PAGE_SIZE;
2363 start += PAGE_SIZE;
2364 }
2365 address += offset;
2366
Joerg Roedelab7032b2015-12-21 18:47:11 +01002367 if (unlikely(amd_iommu_np_cache)) {
Joerg Roedel17b124b2011-04-06 18:01:35 +02002368 domain_flush_pages(&dma_dom->domain, address, size);
Joerg Roedelab7032b2015-12-21 18:47:11 +01002369 domain_flush_complete(&dma_dom->domain);
2370 }
Joerg Roedel270cab242008-09-04 15:49:46 +02002371
Joerg Roedelcb76c322008-06-26 21:28:00 +02002372out:
2373 return address;
Joerg Roedel53812c12009-05-12 12:17:38 +02002374
2375out_unmap:
2376
2377 for (--i; i >= 0; --i) {
2378 start -= PAGE_SIZE;
Joerg Roedel518d9b42016-07-05 14:39:47 +02002379 iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
Joerg Roedel53812c12009-05-12 12:17:38 +02002380 }
2381
Joerg Roedel256e4622016-07-05 14:23:01 +02002382 domain_flush_tlb(&dma_dom->domain);
2383 domain_flush_complete(&dma_dom->domain);
2384
2385 dma_ops_free_iova(dma_dom, address, pages);
Joerg Roedel53812c12009-05-12 12:17:38 +02002386
Christoph Hellwiga8695722017-05-21 13:26:45 +02002387 return AMD_IOMMU_MAPPING_ERROR;
Joerg Roedelcb76c322008-06-26 21:28:00 +02002388}
2389
Joerg Roedel431b2a22008-07-11 17:14:22 +02002390/*
2391 * Does the reverse of the __map_single function. Must be called with
2392 * the domain lock held too
2393 */
Joerg Roedelcd8c82e2009-11-23 19:33:56 +01002394static void __unmap_single(struct dma_ops_domain *dma_dom,
Joerg Roedelcb76c322008-06-26 21:28:00 +02002395 dma_addr_t dma_addr,
2396 size_t size,
2397 int dir)
2398{
2399 dma_addr_t i, start;
2400 unsigned int pages;
2401
Joerg Roedele3c449f2008-10-15 22:02:11 -07002402 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002403 dma_addr &= PAGE_MASK;
2404 start = dma_addr;
2405
2406 for (i = 0; i < pages; ++i) {
Joerg Roedel518d9b42016-07-05 14:39:47 +02002407 iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
Joerg Roedelcb76c322008-06-26 21:28:00 +02002408 start += PAGE_SIZE;
2409 }
2410
Joerg Roedelb1516a12016-07-06 13:07:22 +02002411 if (amd_iommu_unmap_flush) {
2412 dma_ops_free_iova(dma_dom, dma_addr, pages);
2413 domain_flush_tlb(&dma_dom->domain);
2414 domain_flush_complete(&dma_dom->domain);
2415 } else {
Joerg Roedel9003d612017-08-10 17:19:13 +02002416 pages = __roundup_pow_of_two(pages);
2417 queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0);
Joerg Roedelb1516a12016-07-06 13:07:22 +02002418 }
Joerg Roedelcb76c322008-06-26 21:28:00 +02002419}
2420
Joerg Roedel431b2a22008-07-11 17:14:22 +02002421/*
2422 * The exported map_single function for dma_ops.
2423 */
FUJITA Tomonori51491362009-01-05 23:47:25 +09002424static dma_addr_t map_page(struct device *dev, struct page *page,
2425 unsigned long offset, size_t size,
2426 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07002427 unsigned long attrs)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002428{
FUJITA Tomonori51491362009-01-05 23:47:25 +09002429 phys_addr_t paddr = page_to_phys(page) + offset;
Joerg Roedel92d420e2015-12-21 19:31:33 +01002430 struct protection_domain *domain;
Joerg Roedelb3311b02016-07-08 13:31:31 +02002431 struct dma_ops_domain *dma_dom;
Joerg Roedel92d420e2015-12-21 19:31:33 +01002432 u64 dma_mask;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002433
Joerg Roedel94f6d192009-11-24 16:40:02 +01002434 domain = get_domain(dev);
2435 if (PTR_ERR(domain) == -EINVAL)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002436 return (dma_addr_t)paddr;
Joerg Roedel94f6d192009-11-24 16:40:02 +01002437 else if (IS_ERR(domain))
Christoph Hellwiga8695722017-05-21 13:26:45 +02002438 return AMD_IOMMU_MAPPING_ERROR;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002439
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002440 dma_mask = *dev->dma_mask;
Joerg Roedelb3311b02016-07-08 13:31:31 +02002441 dma_dom = to_dma_ops_domain(domain);
Joerg Roedelf99c0f12009-11-23 16:52:56 +01002442
Joerg Roedelb3311b02016-07-08 13:31:31 +02002443 return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
Joerg Roedel4da70b92008-06-26 21:28:01 +02002444}
2445
Joerg Roedel431b2a22008-07-11 17:14:22 +02002446/*
2447 * The exported unmap_single function for dma_ops.
2448 */
FUJITA Tomonori51491362009-01-05 23:47:25 +09002449static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07002450 enum dma_data_direction dir, unsigned long attrs)
Joerg Roedel4da70b92008-06-26 21:28:01 +02002451{
Joerg Roedel4da70b92008-06-26 21:28:01 +02002452 struct protection_domain *domain;
Joerg Roedelb3311b02016-07-08 13:31:31 +02002453 struct dma_ops_domain *dma_dom;
Joerg Roedel4da70b92008-06-26 21:28:01 +02002454
Joerg Roedel94f6d192009-11-24 16:40:02 +01002455 domain = get_domain(dev);
2456 if (IS_ERR(domain))
Joerg Roedel5b28df62008-12-02 17:49:42 +01002457 return;
2458
Joerg Roedelb3311b02016-07-08 13:31:31 +02002459 dma_dom = to_dma_ops_domain(domain);
2460
2461 __unmap_single(dma_dom, dma_addr, size, dir);
Joerg Roedel4da70b92008-06-26 21:28:01 +02002462}
2463
Joerg Roedel80187fd2016-07-06 17:20:54 +02002464static int sg_num_pages(struct device *dev,
2465 struct scatterlist *sglist,
2466 int nelems)
2467{
2468 unsigned long mask, boundary_size;
2469 struct scatterlist *s;
2470 int i, npages = 0;
2471
2472 mask = dma_get_seg_boundary(dev);
2473 boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
2474 1UL << (BITS_PER_LONG - PAGE_SHIFT);
2475
2476 for_each_sg(sglist, s, nelems, i) {
2477 int p, n;
2478
2479 s->dma_address = npages << PAGE_SHIFT;
2480 p = npages % boundary_size;
2481 n = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
2482 if (p + n > boundary_size)
2483 npages += boundary_size - p;
2484 npages += n;
2485 }
2486
2487 return npages;
2488}
2489
Joerg Roedel431b2a22008-07-11 17:14:22 +02002490/*
Joerg Roedel431b2a22008-07-11 17:14:22 +02002491 * The exported map_sg function for dma_ops (handles scatter-gather
2492 * lists).
2493 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02002494static int map_sg(struct device *dev, struct scatterlist *sglist,
Joerg Roedel80187fd2016-07-06 17:20:54 +02002495 int nelems, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07002496 unsigned long attrs)
Joerg Roedel65b050a2008-06-26 21:28:02 +02002497{
Joerg Roedel80187fd2016-07-06 17:20:54 +02002498 int mapped_pages = 0, npages = 0, prot = 0, i;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002499 struct protection_domain *domain;
Joerg Roedel80187fd2016-07-06 17:20:54 +02002500 struct dma_ops_domain *dma_dom;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002501 struct scatterlist *s;
Joerg Roedel80187fd2016-07-06 17:20:54 +02002502 unsigned long address;
Joerg Roedel832a90c2008-09-18 15:54:23 +02002503 u64 dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002504
Joerg Roedel94f6d192009-11-24 16:40:02 +01002505 domain = get_domain(dev);
Joerg Roedela0e191b2013-04-09 15:04:36 +02002506 if (IS_ERR(domain))
Joerg Roedel94f6d192009-11-24 16:40:02 +01002507 return 0;
Joerg Roedeldbcc1122008-09-04 15:04:26 +02002508
Joerg Roedelb3311b02016-07-08 13:31:31 +02002509 dma_dom = to_dma_ops_domain(domain);
Joerg Roedel832a90c2008-09-18 15:54:23 +02002510 dma_mask = *dev->dma_mask;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002511
Joerg Roedel80187fd2016-07-06 17:20:54 +02002512 npages = sg_num_pages(dev, sglist, nelems);
2513
2514 address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
Christoph Hellwiga8695722017-05-21 13:26:45 +02002515 if (address == AMD_IOMMU_MAPPING_ERROR)
Joerg Roedel80187fd2016-07-06 17:20:54 +02002516 goto out_err;
2517
2518 prot = dir2prot(direction);
2519
2520 /* Map all sg entries */
Joerg Roedel65b050a2008-06-26 21:28:02 +02002521 for_each_sg(sglist, s, nelems, i) {
Joerg Roedel80187fd2016-07-06 17:20:54 +02002522 int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002523
Joerg Roedel80187fd2016-07-06 17:20:54 +02002524 for (j = 0; j < pages; ++j) {
2525 unsigned long bus_addr, phys_addr;
2526 int ret;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002527
Joerg Roedel80187fd2016-07-06 17:20:54 +02002528 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
2529 phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
2530 ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
2531 if (ret)
2532 goto out_unmap;
2533
2534 mapped_pages += 1;
2535 }
Joerg Roedel65b050a2008-06-26 21:28:02 +02002536 }
2537
Joerg Roedel80187fd2016-07-06 17:20:54 +02002538 /* Everything is mapped - write the right values into s->dma_address */
2539 for_each_sg(sglist, s, nelems, i) {
2540 s->dma_address += address + s->offset;
2541 s->dma_length = s->length;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002542 }
2543
Joerg Roedel80187fd2016-07-06 17:20:54 +02002544 return nelems;
2545
2546out_unmap:
2547 pr_err("%s: IOMMU mapping error in map_sg (io-pages: %d)\n",
2548 dev_name(dev), npages);
2549
2550 for_each_sg(sglist, s, nelems, i) {
2551 int j, pages = iommu_num_pages(sg_phys(s), s->length, PAGE_SIZE);
2552
2553 for (j = 0; j < pages; ++j) {
2554 unsigned long bus_addr;
2555
2556 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
2557 iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
2558
2559 if (--mapped_pages)
2560 goto out_free_iova;
2561 }
2562 }
2563
2564out_free_iova:
2565 free_iova_fast(&dma_dom->iovad, address, npages);
2566
2567out_err:
Joerg Roedel92d420e2015-12-21 19:31:33 +01002568 return 0;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002569}
2570
Joerg Roedel431b2a22008-07-11 17:14:22 +02002571/*
2572 * The exported map_sg function for dma_ops (handles scatter-gather
2573 * lists).
2574 */
Joerg Roedel65b050a2008-06-26 21:28:02 +02002575static void unmap_sg(struct device *dev, struct scatterlist *sglist,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +09002576 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07002577 unsigned long attrs)
Joerg Roedel65b050a2008-06-26 21:28:02 +02002578{
Joerg Roedel65b050a2008-06-26 21:28:02 +02002579 struct protection_domain *domain;
Joerg Roedelb3311b02016-07-08 13:31:31 +02002580 struct dma_ops_domain *dma_dom;
Joerg Roedel80187fd2016-07-06 17:20:54 +02002581 unsigned long startaddr;
2582 int npages = 2;
Joerg Roedel65b050a2008-06-26 21:28:02 +02002583
Joerg Roedel94f6d192009-11-24 16:40:02 +01002584 domain = get_domain(dev);
2585 if (IS_ERR(domain))
Joerg Roedel5b28df62008-12-02 17:49:42 +01002586 return;
2587
Joerg Roedel80187fd2016-07-06 17:20:54 +02002588 startaddr = sg_dma_address(sglist) & PAGE_MASK;
Joerg Roedelb3311b02016-07-08 13:31:31 +02002589 dma_dom = to_dma_ops_domain(domain);
Joerg Roedel80187fd2016-07-06 17:20:54 +02002590 npages = sg_num_pages(dev, sglist, nelems);
2591
Joerg Roedelb3311b02016-07-08 13:31:31 +02002592 __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
Joerg Roedel65b050a2008-06-26 21:28:02 +02002593}
2594
Joerg Roedel431b2a22008-07-11 17:14:22 +02002595/*
2596 * The exported alloc_coherent function for dma_ops.
2597 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002598static void *alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002599 dma_addr_t *dma_addr, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07002600 unsigned long attrs)
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002601{
Joerg Roedel832a90c2008-09-18 15:54:23 +02002602 u64 dma_mask = dev->coherent_dma_mask;
Christoph Hellwigb4686202018-03-19 11:38:19 +01002603 struct protection_domain *domain = get_domain(dev);
2604 bool is_direct = false;
2605 void *virt_addr;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002606
Christoph Hellwigb4686202018-03-19 11:38:19 +01002607 if (IS_ERR(domain)) {
2608 if (PTR_ERR(domain) != -EINVAL)
Joerg Roedel3b839a52015-04-01 14:58:47 +02002609 return NULL;
Christoph Hellwigb4686202018-03-19 11:38:19 +01002610 is_direct = true;
Joerg Roedel3b839a52015-04-01 14:58:47 +02002611 }
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002612
Christoph Hellwigb4686202018-03-19 11:38:19 +01002613 virt_addr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
2614 if (!virt_addr || is_direct)
2615 return virt_addr;
2616
Joerg Roedel832a90c2008-09-18 15:54:23 +02002617 if (!dma_mask)
2618 dma_mask = *dev->dma_mask;
2619
Christoph Hellwigb4686202018-03-19 11:38:19 +01002620 *dma_addr = __map_single(dev, to_dma_ops_domain(domain),
2621 virt_to_phys(virt_addr), PAGE_ALIGN(size),
2622 DMA_BIDIRECTIONAL, dma_mask);
Christoph Hellwiga8695722017-05-21 13:26:45 +02002623 if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
Joerg Roedel5b28df62008-12-02 17:49:42 +01002624 goto out_free;
Christoph Hellwigb4686202018-03-19 11:38:19 +01002625 return virt_addr;
Joerg Roedel5b28df62008-12-02 17:49:42 +01002626
2627out_free:
Christoph Hellwigb4686202018-03-19 11:38:19 +01002628 dma_direct_free(dev, size, virt_addr, *dma_addr, attrs);
Joerg Roedel5b28df62008-12-02 17:49:42 +01002629 return NULL;
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002630}
2631
Joerg Roedel431b2a22008-07-11 17:14:22 +02002632/*
2633 * The exported free_coherent function for dma_ops.
Joerg Roedel431b2a22008-07-11 17:14:22 +02002634 */
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002635static void free_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +02002636 void *virt_addr, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -07002637 unsigned long attrs)
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002638{
Christoph Hellwigb4686202018-03-19 11:38:19 +01002639 struct protection_domain *domain = get_domain(dev);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002640
Joerg Roedel3b839a52015-04-01 14:58:47 +02002641 size = PAGE_ALIGN(size);
2642
Christoph Hellwigb4686202018-03-19 11:38:19 +01002643 if (!IS_ERR(domain)) {
2644 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
Joerg Roedel5b28df62008-12-02 17:49:42 +01002645
Christoph Hellwigb4686202018-03-19 11:38:19 +01002646 __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
2647 }
Joerg Roedelb3311b02016-07-08 13:31:31 +02002648
Christoph Hellwigb4686202018-03-19 11:38:19 +01002649 dma_direct_free(dev, size, virt_addr, dma_addr, attrs);
Joerg Roedel5d8b53c2008-06-26 21:28:03 +02002650}
2651
Joerg Roedelc432f3d2008-06-26 21:28:04 +02002652/*
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002653 * This function is called by the DMA layer to find out if we can handle a
2654 * particular device. It is part of the dma_ops.
2655 */
2656static int amd_iommu_dma_supported(struct device *dev, u64 mask)
2657{
Christoph Hellwigfec777c2018-03-19 11:38:15 +01002658 if (!dma_direct_supported(dev, mask))
Christoph Hellwig5860acc2017-05-22 11:38:27 +02002659 return 0;
Joerg Roedel420aef82009-11-23 16:14:57 +01002660 return check_device(dev);
Joerg Roedelb39ba6a2008-09-09 18:40:46 +02002661}
2662
Christoph Hellwiga8695722017-05-21 13:26:45 +02002663static int amd_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
2664{
2665 return dma_addr == AMD_IOMMU_MAPPING_ERROR;
2666}
2667
Bart Van Assche52997092017-01-20 13:04:01 -08002668static const struct dma_map_ops amd_iommu_dma_ops = {
Joerg Roedela639a8e2015-12-22 16:06:49 +01002669 .alloc = alloc_coherent,
2670 .free = free_coherent,
2671 .map_page = map_page,
2672 .unmap_page = unmap_page,
2673 .map_sg = map_sg,
2674 .unmap_sg = unmap_sg,
2675 .dma_supported = amd_iommu_dma_supported,
Christoph Hellwiga8695722017-05-21 13:26:45 +02002676 .mapping_error = amd_iommu_mapping_error,
Joerg Roedel6631ee92008-06-26 21:28:05 +02002677};
2678
Joerg Roedel81cd07b2016-07-07 18:01:10 +02002679static int init_reserved_iova_ranges(void)
2680{
2681 struct pci_dev *pdev = NULL;
2682 struct iova *val;
2683
Zhen Leiaa3ac942017-09-21 16:52:45 +01002684 init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
Joerg Roedel81cd07b2016-07-07 18:01:10 +02002685
2686 lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
2687 &reserved_rbtree_key);
2688
2689 /* MSI memory range */
2690 val = reserve_iova(&reserved_iova_ranges,
2691 IOVA_PFN(MSI_RANGE_START), IOVA_PFN(MSI_RANGE_END));
2692 if (!val) {
2693 pr_err("Reserving MSI range failed\n");
2694 return -ENOMEM;
2695 }
2696
2697 /* HT memory range */
2698 val = reserve_iova(&reserved_iova_ranges,
2699 IOVA_PFN(HT_RANGE_START), IOVA_PFN(HT_RANGE_END));
2700 if (!val) {
2701 pr_err("Reserving HT range failed\n");
2702 return -ENOMEM;
2703 }
2704
2705 /*
2706 * Memory used for PCI resources
2707 * FIXME: Check whether we can reserve the PCI-hole completly
2708 */
2709 for_each_pci_dev(pdev) {
2710 int i;
2711
2712 for (i = 0; i < PCI_NUM_RESOURCES; ++i) {
2713 struct resource *r = &pdev->resource[i];
2714
2715 if (!(r->flags & IORESOURCE_MEM))
2716 continue;
2717
2718 val = reserve_iova(&reserved_iova_ranges,
2719 IOVA_PFN(r->start),
2720 IOVA_PFN(r->end));
2721 if (!val) {
2722 pr_err("Reserve pci-resource range failed\n");
2723 return -ENOMEM;
2724 }
2725 }
2726 }
2727
2728 return 0;
2729}
2730
Joerg Roedel3a18404c2015-05-28 18:41:45 +02002731int __init amd_iommu_init_api(void)
Joerg Roedel27c21272011-05-30 15:56:24 +02002732{
Joerg Roedel460c26d2017-06-02 14:28:01 +02002733 int ret, err = 0;
Joerg Roedel307d5852016-07-05 11:54:04 +02002734
2735 ret = iova_cache_get();
2736 if (ret)
2737 return ret;
Wan Zongshun9a4d3bf52016-04-01 09:06:05 -04002738
Joerg Roedel81cd07b2016-07-07 18:01:10 +02002739 ret = init_reserved_iova_ranges();
2740 if (ret)
2741 return ret;
2742
Wan Zongshun9a4d3bf52016-04-01 09:06:05 -04002743 err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
2744 if (err)
2745 return err;
2746#ifdef CONFIG_ARM_AMBA
2747 err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
2748 if (err)
2749 return err;
2750#endif
Wan Zongshun0076cd32016-05-10 09:21:01 -04002751 err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops);
2752 if (err)
2753 return err;
Joerg Roedel460c26d2017-06-02 14:28:01 +02002754
Wan Zongshun9a4d3bf52016-04-01 09:06:05 -04002755 return 0;
Joerg Roedelf5325092010-01-22 17:44:35 +01002756}
2757
Joerg Roedel6631ee92008-06-26 21:28:05 +02002758int __init amd_iommu_init_dma_ops(void)
2759{
Tom Lendackyaba2d9a2017-10-06 16:35:40 -05002760 swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002761 iommu_detected = 1;
Joerg Roedel6631ee92008-06-26 21:28:05 +02002762
Joerg Roedel52717822015-07-28 16:58:51 +02002763 /*
2764 * In case we don't initialize SWIOTLB (actually the common case
Tom Lendackyaba2d9a2017-10-06 16:35:40 -05002765 * when AMD IOMMU is enabled and SME is not active), make sure there
2766 * are global dma_ops set as a fall-back for devices not handled by
2767 * this driver (for example non-PCI devices). When SME is active,
2768 * make sure that swiotlb variable remains set so the global dma_ops
2769 * continue to be SWIOTLB.
Joerg Roedel52717822015-07-28 16:58:51 +02002770 */
2771 if (!swiotlb)
Christoph Hellwigfec777c2018-03-19 11:38:15 +01002772 dma_ops = &dma_direct_ops;
Joerg Roedel52717822015-07-28 16:58:51 +02002773
Joerg Roedel62410ee2012-06-12 16:42:43 +02002774 if (amd_iommu_unmap_flush)
2775 pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
2776 else
2777 pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
2778
Joerg Roedel6631ee92008-06-26 21:28:05 +02002779 return 0;
Joerg Roedelc5b5da92016-07-06 11:55:37 +02002780
Joerg Roedel6631ee92008-06-26 21:28:05 +02002781}
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002782
2783/*****************************************************************************
2784 *
2785 * The following functions belong to the exported interface of AMD IOMMU
2786 *
2787 * This interface allows access to lower level functions of the IOMMU
2788 * like protection domain handling and assignement of devices to domains
2789 * which is not possible with the dma_ops interface.
2790 *
2791 *****************************************************************************/
2792
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002793static void cleanup_domain(struct protection_domain *domain)
2794{
Joerg Roedel9b29d3c2014-08-05 17:50:15 +02002795 struct iommu_dev_data *entry;
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002796 unsigned long flags;
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002797
2798 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2799
Joerg Roedel9b29d3c2014-08-05 17:50:15 +02002800 while (!list_empty(&domain->dev_list)) {
2801 entry = list_first_entry(&domain->dev_list,
2802 struct iommu_dev_data, list);
2803 __detach_device(entry);
Joerg Roedel492667d2009-11-27 13:25:47 +01002804 }
Joerg Roedel6d98cd82008-12-08 12:05:55 +01002805
2806 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2807}
2808
Joerg Roedel26508152009-08-26 16:52:40 +02002809static void protection_domain_free(struct protection_domain *domain)
2810{
2811 if (!domain)
2812 return;
2813
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002814 del_domain_from_list(domain);
2815
Joerg Roedel26508152009-08-26 16:52:40 +02002816 if (domain->id)
2817 domain_id_free(domain->id);
2818
2819 kfree(domain);
2820}
2821
Joerg Roedel7a5a5662015-06-30 08:56:11 +02002822static int protection_domain_init(struct protection_domain *domain)
2823{
2824 spin_lock_init(&domain->lock);
2825 mutex_init(&domain->api_lock);
2826 domain->id = domain_id_alloc();
2827 if (!domain->id)
2828 return -ENOMEM;
2829 INIT_LIST_HEAD(&domain->dev_list);
2830
2831 return 0;
2832}
2833
Joerg Roedel26508152009-08-26 16:52:40 +02002834static struct protection_domain *protection_domain_alloc(void)
Joerg Roedelc156e342008-12-02 18:13:27 +01002835{
2836 struct protection_domain *domain;
2837
2838 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
2839 if (!domain)
Joerg Roedel26508152009-08-26 16:52:40 +02002840 return NULL;
Joerg Roedelc156e342008-12-02 18:13:27 +01002841
Joerg Roedel7a5a5662015-06-30 08:56:11 +02002842 if (protection_domain_init(domain))
Joerg Roedel26508152009-08-26 16:52:40 +02002843 goto out_err;
2844
Joerg Roedelaeb26f52009-11-20 16:44:01 +01002845 add_domain_to_list(domain);
2846
Joerg Roedel26508152009-08-26 16:52:40 +02002847 return domain;
2848
2849out_err:
2850 kfree(domain);
2851
2852 return NULL;
2853}
2854
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002855static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2856{
2857 struct protection_domain *pdomain;
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002858 struct dma_ops_domain *dma_domain;
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002859
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002860 switch (type) {
2861 case IOMMU_DOMAIN_UNMANAGED:
2862 pdomain = protection_domain_alloc();
2863 if (!pdomain)
2864 return NULL;
2865
2866 pdomain->mode = PAGE_MODE_3_LEVEL;
2867 pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
2868 if (!pdomain->pt_root) {
2869 protection_domain_free(pdomain);
2870 return NULL;
2871 }
2872
2873 pdomain->domain.geometry.aperture_start = 0;
2874 pdomain->domain.geometry.aperture_end = ~0ULL;
2875 pdomain->domain.geometry.force_aperture = true;
2876
2877 break;
2878 case IOMMU_DOMAIN_DMA:
2879 dma_domain = dma_ops_domain_alloc();
2880 if (!dma_domain) {
2881 pr_err("AMD-Vi: Failed to allocate\n");
2882 return NULL;
2883 }
2884 pdomain = &dma_domain->domain;
2885 break;
Joerg Roedel07f643a2015-05-28 18:41:41 +02002886 case IOMMU_DOMAIN_IDENTITY:
2887 pdomain = protection_domain_alloc();
2888 if (!pdomain)
2889 return NULL;
2890
2891 pdomain->mode = PAGE_MODE_NONE;
2892 break;
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002893 default:
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002894 return NULL;
Joerg Roedel0bb6e242015-05-28 18:41:40 +02002895 }
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002896
2897 return &pdomain->domain;
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002898}
2899
2900static void amd_iommu_domain_free(struct iommu_domain *dom)
Joerg Roedel26508152009-08-26 16:52:40 +02002901{
2902 struct protection_domain *domain;
Joerg Roedelcda70052016-07-07 15:57:04 +02002903 struct dma_ops_domain *dma_dom;
Joerg Roedel98383fc2008-12-02 18:34:12 +01002904
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002905 domain = to_pdomain(dom);
2906
Joerg Roedel98383fc2008-12-02 18:34:12 +01002907 if (domain->dev_cnt > 0)
2908 cleanup_domain(domain);
2909
2910 BUG_ON(domain->dev_cnt != 0);
2911
Joerg Roedelcda70052016-07-07 15:57:04 +02002912 if (!dom)
2913 return;
Joerg Roedel98383fc2008-12-02 18:34:12 +01002914
Joerg Roedelcda70052016-07-07 15:57:04 +02002915 switch (dom->type) {
2916 case IOMMU_DOMAIN_DMA:
Joerg Roedel281e8cc2016-07-07 16:12:02 +02002917 /* Now release the domain */
Joerg Roedelb3311b02016-07-08 13:31:31 +02002918 dma_dom = to_dma_ops_domain(domain);
Joerg Roedelcda70052016-07-07 15:57:04 +02002919 dma_ops_domain_free(dma_dom);
2920 break;
2921 default:
2922 if (domain->mode != PAGE_MODE_NONE)
2923 free_pagetable(domain);
Joerg Roedel52815b72011-11-17 17:24:28 +01002924
Joerg Roedelcda70052016-07-07 15:57:04 +02002925 if (domain->flags & PD_IOMMUV2_MASK)
2926 free_gcr3_table(domain);
2927
2928 protection_domain_free(domain);
2929 break;
2930 }
Joerg Roedel98383fc2008-12-02 18:34:12 +01002931}
2932
Joerg Roedel684f2882008-12-08 12:07:44 +01002933static void amd_iommu_detach_device(struct iommu_domain *dom,
2934 struct device *dev)
2935{
Joerg Roedel657cbb62009-11-23 15:26:46 +01002936 struct iommu_dev_data *dev_data = dev->archdata.iommu;
Joerg Roedel684f2882008-12-08 12:07:44 +01002937 struct amd_iommu *iommu;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002938 int devid;
Joerg Roedel684f2882008-12-08 12:07:44 +01002939
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002940 if (!check_device(dev))
Joerg Roedel684f2882008-12-08 12:07:44 +01002941 return;
2942
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002943 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02002944 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04002945 return;
Joerg Roedel684f2882008-12-08 12:07:44 +01002946
Joerg Roedel657cbb62009-11-23 15:26:46 +01002947 if (dev_data->domain != NULL)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002948 detach_device(dev);
Joerg Roedel684f2882008-12-08 12:07:44 +01002949
2950 iommu = amd_iommu_rlookup_table[devid];
2951 if (!iommu)
2952 return;
2953
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002954#ifdef CONFIG_IRQ_REMAP
2955 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
2956 (dom->type == IOMMU_DOMAIN_UNMANAGED))
2957 dev_data->use_vapic = 0;
2958#endif
2959
Joerg Roedel684f2882008-12-08 12:07:44 +01002960 iommu_completion_wait(iommu);
2961}
2962
Joerg Roedel01106062008-12-02 19:34:11 +01002963static int amd_iommu_attach_device(struct iommu_domain *dom,
2964 struct device *dev)
2965{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01002966 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel657cbb62009-11-23 15:26:46 +01002967 struct iommu_dev_data *dev_data;
Joerg Roedel01106062008-12-02 19:34:11 +01002968 struct amd_iommu *iommu;
Joerg Roedel15898bb2009-11-24 15:39:42 +01002969 int ret;
Joerg Roedel01106062008-12-02 19:34:11 +01002970
Joerg Roedel98fc5a62009-11-24 17:19:23 +01002971 if (!check_device(dev))
Joerg Roedel01106062008-12-02 19:34:11 +01002972 return -EINVAL;
2973
Joerg Roedel657cbb62009-11-23 15:26:46 +01002974 dev_data = dev->archdata.iommu;
2975
Joerg Roedelf62dda62011-06-09 12:55:35 +02002976 iommu = amd_iommu_rlookup_table[dev_data->devid];
Joerg Roedel01106062008-12-02 19:34:11 +01002977 if (!iommu)
2978 return -EINVAL;
2979
Joerg Roedel657cbb62009-11-23 15:26:46 +01002980 if (dev_data->domain)
Joerg Roedel15898bb2009-11-24 15:39:42 +01002981 detach_device(dev);
Joerg Roedel01106062008-12-02 19:34:11 +01002982
Joerg Roedel15898bb2009-11-24 15:39:42 +01002983 ret = attach_device(dev, domain);
Joerg Roedel01106062008-12-02 19:34:11 +01002984
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05002985#ifdef CONFIG_IRQ_REMAP
2986 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
2987 if (dom->type == IOMMU_DOMAIN_UNMANAGED)
2988 dev_data->use_vapic = 1;
2989 else
2990 dev_data->use_vapic = 0;
2991 }
2992#endif
2993
Joerg Roedel01106062008-12-02 19:34:11 +01002994 iommu_completion_wait(iommu);
2995
Joerg Roedel15898bb2009-11-24 15:39:42 +01002996 return ret;
Joerg Roedel01106062008-12-02 19:34:11 +01002997}
2998
Joerg Roedel468e2362010-01-21 16:37:36 +01002999static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003000 phys_addr_t paddr, size_t page_size, int iommu_prot)
Joerg Roedelc6229ca2008-12-02 19:48:43 +01003001{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003002 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelc6229ca2008-12-02 19:48:43 +01003003 int prot = 0;
3004 int ret;
3005
Joerg Roedel132bd682011-11-17 14:18:46 +01003006 if (domain->mode == PAGE_MODE_NONE)
3007 return -EINVAL;
3008
Joerg Roedelc6229ca2008-12-02 19:48:43 +01003009 if (iommu_prot & IOMMU_READ)
3010 prot |= IOMMU_PROT_IR;
3011 if (iommu_prot & IOMMU_WRITE)
3012 prot |= IOMMU_PROT_IW;
3013
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003014 mutex_lock(&domain->api_lock);
Joerg Roedelb911b892016-07-05 14:29:11 +02003015 ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003016 mutex_unlock(&domain->api_lock);
3017
Joerg Roedel795e74f72010-05-11 17:40:57 +02003018 return ret;
Joerg Roedelc6229ca2008-12-02 19:48:43 +01003019}
3020
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003021static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3022 size_t page_size)
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003023{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003024 struct protection_domain *domain = to_pdomain(dom);
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003025 size_t unmap_size;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003026
Joerg Roedel132bd682011-11-17 14:18:46 +01003027 if (domain->mode == PAGE_MODE_NONE)
3028 return -EINVAL;
3029
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003030 mutex_lock(&domain->api_lock);
Joerg Roedel468e2362010-01-21 16:37:36 +01003031 unmap_size = iommu_unmap_page(domain, iova, page_size);
Joerg Roedel795e74f72010-05-11 17:40:57 +02003032 mutex_unlock(&domain->api_lock);
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003033
Joerg Roedel17b124b2011-04-06 18:01:35 +02003034 domain_flush_tlb_pde(domain);
Joerg Roedelce763532017-10-13 14:32:37 +02003035 domain_flush_complete(domain);
Joerg Roedel5d214fe2010-02-08 14:44:49 +01003036
Ohad Ben-Cohen50090652011-11-10 11:32:25 +02003037 return unmap_size;
Joerg Roedeleb74ff62008-12-02 19:59:10 +01003038}
3039
Joerg Roedel645c4c82008-12-02 20:05:50 +01003040static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
Varun Sethibb5547a2013-03-29 01:23:58 +05303041 dma_addr_t iova)
Joerg Roedel645c4c82008-12-02 20:05:50 +01003042{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003043 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel3039ca12015-04-01 14:58:48 +02003044 unsigned long offset_mask, pte_pgsize;
Joerg Roedelf03152b2010-01-21 16:15:24 +01003045 u64 *pte, __pte;
Joerg Roedel645c4c82008-12-02 20:05:50 +01003046
Joerg Roedel132bd682011-11-17 14:18:46 +01003047 if (domain->mode == PAGE_MODE_NONE)
3048 return iova;
3049
Joerg Roedel3039ca12015-04-01 14:58:48 +02003050 pte = fetch_pte(domain, iova, &pte_pgsize);
Joerg Roedel645c4c82008-12-02 20:05:50 +01003051
Joerg Roedela6d41a42009-09-02 17:08:55 +02003052 if (!pte || !IOMMU_PTE_PRESENT(*pte))
Joerg Roedel645c4c82008-12-02 20:05:50 +01003053 return 0;
3054
Joerg Roedelb24b1b62015-04-01 14:58:51 +02003055 offset_mask = pte_pgsize - 1;
3056 __pte = *pte & PM_ADDR_MASK;
Joerg Roedelf03152b2010-01-21 16:15:24 +01003057
Joerg Roedelb24b1b62015-04-01 14:58:51 +02003058 return (__pte & ~offset_mask) | (iova & offset_mask);
Joerg Roedel645c4c82008-12-02 20:05:50 +01003059}
3060
Joerg Roedelab636482014-09-05 10:48:21 +02003061static bool amd_iommu_capable(enum iommu_cap cap)
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003062{
Joerg Roedel80a506b2010-07-27 17:14:24 +02003063 switch (cap) {
3064 case IOMMU_CAP_CACHE_COHERENCY:
Joerg Roedelab636482014-09-05 10:48:21 +02003065 return true;
Joerg Roedelbdddadc2012-07-02 18:38:13 +02003066 case IOMMU_CAP_INTR_REMAP:
Joerg Roedelab636482014-09-05 10:48:21 +02003067 return (irq_remapping_enabled == 1);
Will Deaconcfdeec22014-10-27 11:24:48 +00003068 case IOMMU_CAP_NOEXEC:
3069 return false;
Joerg Roedel80a506b2010-07-27 17:14:24 +02003070 }
3071
Joerg Roedelab636482014-09-05 10:48:21 +02003072 return false;
Sheng Yangdbb9fd82009-03-18 15:33:06 +08003073}
3074
Eric Augere5b52342017-01-19 20:57:47 +00003075static void amd_iommu_get_resv_regions(struct device *dev,
3076 struct list_head *head)
Joerg Roedel35cf2482015-05-28 18:41:37 +02003077{
Eric Auger4397f322017-01-19 20:57:54 +00003078 struct iommu_resv_region *region;
Joerg Roedel35cf2482015-05-28 18:41:37 +02003079 struct unity_map_entry *entry;
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04003080 int devid;
Joerg Roedel35cf2482015-05-28 18:41:37 +02003081
3082 devid = get_device_id(dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02003083 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04003084 return;
Joerg Roedel35cf2482015-05-28 18:41:37 +02003085
3086 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
Eric Auger4397f322017-01-19 20:57:54 +00003087 size_t length;
3088 int prot = 0;
Joerg Roedel35cf2482015-05-28 18:41:37 +02003089
3090 if (devid < entry->devid_start || devid > entry->devid_end)
3091 continue;
3092
Eric Auger4397f322017-01-19 20:57:54 +00003093 length = entry->address_end - entry->address_start;
3094 if (entry->prot & IOMMU_PROT_IR)
3095 prot |= IOMMU_READ;
3096 if (entry->prot & IOMMU_PROT_IW)
3097 prot |= IOMMU_WRITE;
3098
3099 region = iommu_alloc_resv_region(entry->address_start,
3100 length, prot,
3101 IOMMU_RESV_DIRECT);
Joerg Roedel35cf2482015-05-28 18:41:37 +02003102 if (!region) {
3103 pr_err("Out of memory allocating dm-regions for %s\n",
3104 dev_name(dev));
3105 return;
3106 }
Joerg Roedel35cf2482015-05-28 18:41:37 +02003107 list_add_tail(&region->list, head);
3108 }
Eric Auger4397f322017-01-19 20:57:54 +00003109
3110 region = iommu_alloc_resv_region(MSI_RANGE_START,
3111 MSI_RANGE_END - MSI_RANGE_START + 1,
Robin Murphy9d3a4de2017-03-16 17:00:16 +00003112 0, IOMMU_RESV_MSI);
Eric Auger4397f322017-01-19 20:57:54 +00003113 if (!region)
3114 return;
3115 list_add_tail(&region->list, head);
3116
3117 region = iommu_alloc_resv_region(HT_RANGE_START,
3118 HT_RANGE_END - HT_RANGE_START + 1,
3119 0, IOMMU_RESV_RESERVED);
3120 if (!region)
3121 return;
3122 list_add_tail(&region->list, head);
Joerg Roedel35cf2482015-05-28 18:41:37 +02003123}
3124
Eric Augere5b52342017-01-19 20:57:47 +00003125static void amd_iommu_put_resv_regions(struct device *dev,
Joerg Roedel35cf2482015-05-28 18:41:37 +02003126 struct list_head *head)
3127{
Eric Augere5b52342017-01-19 20:57:47 +00003128 struct iommu_resv_region *entry, *next;
Joerg Roedel35cf2482015-05-28 18:41:37 +02003129
3130 list_for_each_entry_safe(entry, next, head, list)
3131 kfree(entry);
3132}
3133
Eric Augere5b52342017-01-19 20:57:47 +00003134static void amd_iommu_apply_resv_region(struct device *dev,
Joerg Roedel8d54d6c2016-07-05 13:32:20 +02003135 struct iommu_domain *domain,
Eric Augere5b52342017-01-19 20:57:47 +00003136 struct iommu_resv_region *region)
Joerg Roedel8d54d6c2016-07-05 13:32:20 +02003137{
Joerg Roedelb3311b02016-07-08 13:31:31 +02003138 struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
Joerg Roedel8d54d6c2016-07-05 13:32:20 +02003139 unsigned long start, end;
3140
3141 start = IOVA_PFN(region->start);
Gary R Hookb92b4fb2017-11-03 10:50:34 -06003142 end = IOVA_PFN(region->start + region->length - 1);
Joerg Roedel8d54d6c2016-07-05 13:32:20 +02003143
3144 WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
3145}
3146
Baoquan Hedf3f7a62017-08-09 16:33:41 +08003147static bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
3148 struct device *dev)
3149{
3150 struct iommu_dev_data *dev_data = dev->archdata.iommu;
3151 return dev_data->defer_attach;
3152}
3153
Joerg Roedelb0119e82017-02-01 13:23:08 +01003154const struct iommu_ops amd_iommu_ops = {
Joerg Roedelab636482014-09-05 10:48:21 +02003155 .capable = amd_iommu_capable,
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003156 .domain_alloc = amd_iommu_domain_alloc,
3157 .domain_free = amd_iommu_domain_free,
Joerg Roedel26961ef2008-12-03 17:00:17 +01003158 .attach_dev = amd_iommu_attach_device,
3159 .detach_dev = amd_iommu_detach_device,
Joerg Roedel468e2362010-01-21 16:37:36 +01003160 .map = amd_iommu_map,
3161 .unmap = amd_iommu_unmap,
Olav Haugan315786e2014-10-25 09:55:16 -07003162 .map_sg = default_iommu_map_sg,
Joerg Roedel26961ef2008-12-03 17:00:17 +01003163 .iova_to_phys = amd_iommu_iova_to_phys,
Joerg Roedelaafd8ba2015-05-28 18:41:39 +02003164 .add_device = amd_iommu_add_device,
3165 .remove_device = amd_iommu_remove_device,
Wan Zongshunb097d112016-04-01 09:06:04 -04003166 .device_group = amd_iommu_device_group,
Eric Augere5b52342017-01-19 20:57:47 +00003167 .get_resv_regions = amd_iommu_get_resv_regions,
3168 .put_resv_regions = amd_iommu_put_resv_regions,
3169 .apply_resv_region = amd_iommu_apply_resv_region,
Baoquan Hedf3f7a62017-08-09 16:33:41 +08003170 .is_attach_deferred = amd_iommu_is_attach_deferred,
Ohad Ben-Cohenaa3de9c2011-11-10 11:32:29 +02003171 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
Joerg Roedel26961ef2008-12-03 17:00:17 +01003172};
3173
Joerg Roedel0feae532009-08-26 15:26:30 +02003174/*****************************************************************************
3175 *
3176 * The next functions do a basic initialization of IOMMU for pass through
3177 * mode
3178 *
3179 * In passthrough mode the IOMMU is initialized and enabled but not used for
3180 * DMA-API translation.
3181 *
3182 *****************************************************************************/
3183
Joerg Roedel72e1dcc2011-11-10 19:13:51 +01003184/* IOMMUv2 specific functions */
3185int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3186{
3187 return atomic_notifier_chain_register(&ppr_notifier, nb);
3188}
3189EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
3190
3191int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
3192{
3193 return atomic_notifier_chain_unregister(&ppr_notifier, nb);
3194}
3195EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
Joerg Roedel132bd682011-11-17 14:18:46 +01003196
3197void amd_iommu_domain_direct_map(struct iommu_domain *dom)
3198{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003199 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel132bd682011-11-17 14:18:46 +01003200 unsigned long flags;
3201
3202 spin_lock_irqsave(&domain->lock, flags);
3203
3204 /* Update data structure */
3205 domain->mode = PAGE_MODE_NONE;
3206 domain->updated = true;
3207
3208 /* Make changes visible to IOMMUs */
3209 update_domain(domain);
3210
3211 /* Page-table is not visible to IOMMU anymore, so free it */
3212 free_pagetable(domain);
3213
3214 spin_unlock_irqrestore(&domain->lock, flags);
3215}
3216EXPORT_SYMBOL(amd_iommu_domain_direct_map);
Joerg Roedel52815b72011-11-17 17:24:28 +01003217
3218int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
3219{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003220 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel52815b72011-11-17 17:24:28 +01003221 unsigned long flags;
3222 int levels, ret;
3223
3224 if (pasids <= 0 || pasids > (PASID_MASK + 1))
3225 return -EINVAL;
3226
3227 /* Number of GCR3 table levels required */
3228 for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
3229 levels += 1;
3230
3231 if (levels > amd_iommu_max_glx_val)
3232 return -EINVAL;
3233
3234 spin_lock_irqsave(&domain->lock, flags);
3235
3236 /*
3237 * Save us all sanity checks whether devices already in the
3238 * domain support IOMMUv2. Just force that the domain has no
3239 * devices attached when it is switched into IOMMUv2 mode.
3240 */
3241 ret = -EBUSY;
3242 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
3243 goto out;
3244
3245 ret = -ENOMEM;
3246 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
3247 if (domain->gcr3_tbl == NULL)
3248 goto out;
3249
3250 domain->glx = levels;
3251 domain->flags |= PD_IOMMUV2_MASK;
3252 domain->updated = true;
3253
3254 update_domain(domain);
3255
3256 ret = 0;
3257
3258out:
3259 spin_unlock_irqrestore(&domain->lock, flags);
3260
3261 return ret;
3262}
3263EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
Joerg Roedel22e266c2011-11-21 15:59:08 +01003264
3265static int __flush_pasid(struct protection_domain *domain, int pasid,
3266 u64 address, bool size)
3267{
3268 struct iommu_dev_data *dev_data;
3269 struct iommu_cmd cmd;
3270 int i, ret;
3271
3272 if (!(domain->flags & PD_IOMMUV2_MASK))
3273 return -EINVAL;
3274
3275 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
3276
3277 /*
3278 * IOMMU TLB needs to be flushed before Device TLB to
3279 * prevent device TLB refill from IOMMU TLB
3280 */
Suravee Suthikulpanit6b9376e2017-02-24 02:48:17 -06003281 for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
Joerg Roedel22e266c2011-11-21 15:59:08 +01003282 if (domain->dev_iommu[i] == 0)
3283 continue;
3284
3285 ret = iommu_queue_command(amd_iommus[i], &cmd);
3286 if (ret != 0)
3287 goto out;
3288 }
3289
3290 /* Wait until IOMMU TLB flushes are complete */
3291 domain_flush_complete(domain);
3292
3293 /* Now flush device TLBs */
3294 list_for_each_entry(dev_data, &domain->dev_list, list) {
3295 struct amd_iommu *iommu;
3296 int qdep;
3297
Joerg Roedel1c1cc452015-07-30 11:24:45 +02003298 /*
3299 There might be non-IOMMUv2 capable devices in an IOMMUv2
3300 * domain.
3301 */
3302 if (!dev_data->ats.enabled)
3303 continue;
Joerg Roedel22e266c2011-11-21 15:59:08 +01003304
3305 qdep = dev_data->ats.qdep;
3306 iommu = amd_iommu_rlookup_table[dev_data->devid];
3307
3308 build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
3309 qdep, address, size);
3310
3311 ret = iommu_queue_command(iommu, &cmd);
3312 if (ret != 0)
3313 goto out;
3314 }
3315
3316 /* Wait until all device TLBs are flushed */
3317 domain_flush_complete(domain);
3318
3319 ret = 0;
3320
3321out:
3322
3323 return ret;
3324}
3325
3326static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
3327 u64 address)
3328{
3329 return __flush_pasid(domain, pasid, address, false);
3330}
3331
3332int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
3333 u64 address)
3334{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003335 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel22e266c2011-11-21 15:59:08 +01003336 unsigned long flags;
3337 int ret;
3338
3339 spin_lock_irqsave(&domain->lock, flags);
3340 ret = __amd_iommu_flush_page(domain, pasid, address);
3341 spin_unlock_irqrestore(&domain->lock, flags);
3342
3343 return ret;
3344}
3345EXPORT_SYMBOL(amd_iommu_flush_page);
3346
3347static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
3348{
3349 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
3350 true);
3351}
3352
3353int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
3354{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003355 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedel22e266c2011-11-21 15:59:08 +01003356 unsigned long flags;
3357 int ret;
3358
3359 spin_lock_irqsave(&domain->lock, flags);
3360 ret = __amd_iommu_flush_tlb(domain, pasid);
3361 spin_unlock_irqrestore(&domain->lock, flags);
3362
3363 return ret;
3364}
3365EXPORT_SYMBOL(amd_iommu_flush_tlb);
3366
Joerg Roedelb16137b2011-11-21 16:50:23 +01003367static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
3368{
3369 int index;
3370 u64 *pte;
3371
3372 while (true) {
3373
3374 index = (pasid >> (9 * level)) & 0x1ff;
3375 pte = &root[index];
3376
3377 if (level == 0)
3378 break;
3379
3380 if (!(*pte & GCR3_VALID)) {
3381 if (!alloc)
3382 return NULL;
3383
3384 root = (void *)get_zeroed_page(GFP_ATOMIC);
3385 if (root == NULL)
3386 return NULL;
3387
Tom Lendacky2543a782017-07-17 16:10:24 -05003388 *pte = iommu_virt_to_phys(root) | GCR3_VALID;
Joerg Roedelb16137b2011-11-21 16:50:23 +01003389 }
3390
Tom Lendacky2543a782017-07-17 16:10:24 -05003391 root = iommu_phys_to_virt(*pte & PAGE_MASK);
Joerg Roedelb16137b2011-11-21 16:50:23 +01003392
3393 level -= 1;
3394 }
3395
3396 return pte;
3397}
3398
3399static int __set_gcr3(struct protection_domain *domain, int pasid,
3400 unsigned long cr3)
3401{
3402 u64 *pte;
3403
3404 if (domain->mode != PAGE_MODE_NONE)
3405 return -EINVAL;
3406
3407 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
3408 if (pte == NULL)
3409 return -ENOMEM;
3410
3411 *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
3412
3413 return __amd_iommu_flush_tlb(domain, pasid);
3414}
3415
3416static int __clear_gcr3(struct protection_domain *domain, int pasid)
3417{
3418 u64 *pte;
3419
3420 if (domain->mode != PAGE_MODE_NONE)
3421 return -EINVAL;
3422
3423 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
3424 if (pte == NULL)
3425 return 0;
3426
3427 *pte = 0;
3428
3429 return __amd_iommu_flush_tlb(domain, pasid);
3430}
3431
3432int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
3433 unsigned long cr3)
3434{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003435 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelb16137b2011-11-21 16:50:23 +01003436 unsigned long flags;
3437 int ret;
3438
3439 spin_lock_irqsave(&domain->lock, flags);
3440 ret = __set_gcr3(domain, pasid, cr3);
3441 spin_unlock_irqrestore(&domain->lock, flags);
3442
3443 return ret;
3444}
3445EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
3446
3447int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
3448{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003449 struct protection_domain *domain = to_pdomain(dom);
Joerg Roedelb16137b2011-11-21 16:50:23 +01003450 unsigned long flags;
3451 int ret;
3452
3453 spin_lock_irqsave(&domain->lock, flags);
3454 ret = __clear_gcr3(domain, pasid);
3455 spin_unlock_irqrestore(&domain->lock, flags);
3456
3457 return ret;
3458}
3459EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
Joerg Roedelc99afa22011-11-21 18:19:25 +01003460
3461int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
3462 int status, int tag)
3463{
3464 struct iommu_dev_data *dev_data;
3465 struct amd_iommu *iommu;
3466 struct iommu_cmd cmd;
3467
3468 dev_data = get_dev_data(&pdev->dev);
3469 iommu = amd_iommu_rlookup_table[dev_data->devid];
3470
3471 build_complete_ppr(&cmd, dev_data->devid, pasid, status,
3472 tag, dev_data->pri_tlp);
3473
3474 return iommu_queue_command(iommu, &cmd);
3475}
3476EXPORT_SYMBOL(amd_iommu_complete_ppr);
Joerg Roedelf3572db2011-11-23 12:36:25 +01003477
3478struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
3479{
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003480 struct protection_domain *pdomain;
Joerg Roedelf3572db2011-11-23 12:36:25 +01003481
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003482 pdomain = get_domain(&pdev->dev);
3483 if (IS_ERR(pdomain))
Joerg Roedelf3572db2011-11-23 12:36:25 +01003484 return NULL;
3485
3486 /* Only return IOMMUv2 domains */
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003487 if (!(pdomain->flags & PD_IOMMUV2_MASK))
Joerg Roedelf3572db2011-11-23 12:36:25 +01003488 return NULL;
3489
Joerg Roedel3f4b87b2015-03-26 13:43:07 +01003490 return &pdomain->domain;
Joerg Roedelf3572db2011-11-23 12:36:25 +01003491}
3492EXPORT_SYMBOL(amd_iommu_get_v2_domain);
Joerg Roedel6a113dd2011-12-01 12:04:58 +01003493
3494void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
3495{
3496 struct iommu_dev_data *dev_data;
3497
3498 if (!amd_iommu_v2_supported())
3499 return;
3500
3501 dev_data = get_dev_data(&pdev->dev);
3502 dev_data->errata |= (1 << erratum);
3503}
3504EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
Joerg Roedel52efdb82011-12-07 12:01:36 +01003505
3506int amd_iommu_device_info(struct pci_dev *pdev,
3507 struct amd_iommu_device_info *info)
3508{
3509 int max_pasids;
3510 int pos;
3511
3512 if (pdev == NULL || info == NULL)
3513 return -EINVAL;
3514
3515 if (!amd_iommu_v2_supported())
3516 return -EINVAL;
3517
3518 memset(info, 0, sizeof(*info));
3519
3520 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
3521 if (pos)
3522 info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
3523
3524 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
3525 if (pos)
3526 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
3527
3528 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
3529 if (pos) {
3530 int features;
3531
3532 max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
3533 max_pasids = min(max_pasids, (1 << 20));
3534
3535 info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
3536 info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
3537
3538 features = pci_pasid_features(pdev);
3539 if (features & PCI_PASID_CAP_EXEC)
3540 info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
3541 if (features & PCI_PASID_CAP_PRIV)
3542 info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
3543 }
3544
3545 return 0;
3546}
3547EXPORT_SYMBOL(amd_iommu_device_info);
Joerg Roedel2b324502012-06-21 16:29:10 +02003548
3549#ifdef CONFIG_IRQ_REMAP
3550
3551/*****************************************************************************
3552 *
3553 * Interrupt Remapping Implementation
3554 *
3555 *****************************************************************************/
3556
Jiang Liu7c71d302015-04-13 14:11:33 +08003557static struct irq_chip amd_ir_chip;
3558
Joerg Roedel2b324502012-06-21 16:29:10 +02003559static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
3560{
3561 u64 dte;
3562
3563 dte = amd_iommu_dev_table[devid].data[2];
3564 dte &= ~DTE_IRQ_PHYS_ADDR_MASK;
Tom Lendacky2543a782017-07-17 16:10:24 -05003565 dte |= iommu_virt_to_phys(table->table);
Joerg Roedel2b324502012-06-21 16:29:10 +02003566 dte |= DTE_IRQ_REMAP_INTCTL;
3567 dte |= DTE_IRQ_TABLE_LEN;
3568 dte |= DTE_IRQ_REMAP_ENABLE;
3569
3570 amd_iommu_dev_table[devid].data[2] = dte;
3571}
3572
Joerg Roedel2b324502012-06-21 16:29:10 +02003573static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
3574{
3575 struct irq_remap_table *table = NULL;
3576 struct amd_iommu *iommu;
3577 unsigned long flags;
3578 u16 alias;
3579
3580 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
3581
3582 iommu = amd_iommu_rlookup_table[devid];
3583 if (!iommu)
3584 goto out_unlock;
3585
3586 table = irq_lookup_table[devid];
3587 if (table)
Baoquan He09284b92016-09-20 09:05:34 +08003588 goto out_unlock;
Joerg Roedel2b324502012-06-21 16:29:10 +02003589
3590 alias = amd_iommu_alias_table[devid];
3591 table = irq_lookup_table[alias];
3592 if (table) {
3593 irq_lookup_table[devid] = table;
3594 set_dte_irq_entry(devid, table);
3595 iommu_flush_dte(iommu, devid);
3596 goto out;
3597 }
3598
3599 /* Nothing there yet, allocate new irq remapping table */
3600 table = kzalloc(sizeof(*table), GFP_ATOMIC);
3601 if (!table)
Baoquan He09284b92016-09-20 09:05:34 +08003602 goto out_unlock;
Joerg Roedel2b324502012-06-21 16:29:10 +02003603
Joerg Roedel197887f2013-04-09 21:14:08 +02003604 /* Initialize table spin-lock */
3605 spin_lock_init(&table->lock);
3606
Joerg Roedel2b324502012-06-21 16:29:10 +02003607 if (ioapic)
3608 /* Keep the first 32 indexes free for IOAPIC interrupts */
3609 table->min_index = 32;
3610
3611 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC);
3612 if (!table->table) {
3613 kfree(table);
Dan Carpenter821f0f62012-10-02 11:34:40 +03003614 table = NULL;
Baoquan He09284b92016-09-20 09:05:34 +08003615 goto out_unlock;
Joerg Roedel2b324502012-06-21 16:29:10 +02003616 }
3617
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003618 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
3619 memset(table->table, 0,
3620 MAX_IRQS_PER_TABLE * sizeof(u32));
3621 else
3622 memset(table->table, 0,
3623 (MAX_IRQS_PER_TABLE * (sizeof(u64) * 2)));
Joerg Roedel2b324502012-06-21 16:29:10 +02003624
3625 if (ioapic) {
3626 int i;
3627
3628 for (i = 0; i < 32; ++i)
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003629 iommu->irte_ops->set_allocated(table, i);
Joerg Roedel2b324502012-06-21 16:29:10 +02003630 }
3631
3632 irq_lookup_table[devid] = table;
3633 set_dte_irq_entry(devid, table);
3634 iommu_flush_dte(iommu, devid);
3635 if (devid != alias) {
3636 irq_lookup_table[alias] = table;
Alex Williamsone028a9e2014-04-22 10:08:40 -06003637 set_dte_irq_entry(alias, table);
Joerg Roedel2b324502012-06-21 16:29:10 +02003638 iommu_flush_dte(iommu, alias);
3639 }
3640
3641out:
3642 iommu_completion_wait(iommu);
3643
3644out_unlock:
3645 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
3646
3647 return table;
3648}
3649
Joerg Roedel37946d92017-10-06 12:16:39 +02003650static int alloc_irq_index(u16 devid, int count, bool align)
Joerg Roedel2b324502012-06-21 16:29:10 +02003651{
3652 struct irq_remap_table *table;
Joerg Roedel37946d92017-10-06 12:16:39 +02003653 int index, c, alignment = 1;
Joerg Roedel2b324502012-06-21 16:29:10 +02003654 unsigned long flags;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003655 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3656
3657 if (!iommu)
3658 return -ENODEV;
Joerg Roedel2b324502012-06-21 16:29:10 +02003659
3660 table = get_irq_table(devid, false);
3661 if (!table)
3662 return -ENODEV;
3663
Joerg Roedel37946d92017-10-06 12:16:39 +02003664 if (align)
3665 alignment = roundup_pow_of_two(count);
3666
Joerg Roedel2b324502012-06-21 16:29:10 +02003667 spin_lock_irqsave(&table->lock, flags);
3668
3669 /* Scan table for free entries */
Joerg Roedel37946d92017-10-06 12:16:39 +02003670 for (index = ALIGN(table->min_index, alignment), c = 0;
Alex Williamson07d1c912017-11-03 10:50:31 -06003671 index < MAX_IRQS_PER_TABLE;) {
Joerg Roedel37946d92017-10-06 12:16:39 +02003672 if (!iommu->irte_ops->is_allocated(table, index)) {
Joerg Roedel2b324502012-06-21 16:29:10 +02003673 c += 1;
Joerg Roedel37946d92017-10-06 12:16:39 +02003674 } else {
3675 c = 0;
Alex Williamson07d1c912017-11-03 10:50:31 -06003676 index = ALIGN(index + 1, alignment);
Joerg Roedel37946d92017-10-06 12:16:39 +02003677 continue;
3678 }
Joerg Roedel2b324502012-06-21 16:29:10 +02003679
3680 if (c == count) {
Joerg Roedel2b324502012-06-21 16:29:10 +02003681 for (; c != 0; --c)
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003682 iommu->irte_ops->set_allocated(table, index - c + 1);
Joerg Roedel2b324502012-06-21 16:29:10 +02003683
3684 index -= count - 1;
Joerg Roedel2b324502012-06-21 16:29:10 +02003685 goto out;
3686 }
Alex Williamson07d1c912017-11-03 10:50:31 -06003687
3688 index++;
Joerg Roedel2b324502012-06-21 16:29:10 +02003689 }
3690
3691 index = -ENOSPC;
3692
3693out:
3694 spin_unlock_irqrestore(&table->lock, flags);
3695
3696 return index;
3697}
3698
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003699static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
3700 struct amd_ir_data *data)
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003701{
3702 struct irq_remap_table *table;
3703 struct amd_iommu *iommu;
3704 unsigned long flags;
3705 struct irte_ga *entry;
3706
3707 iommu = amd_iommu_rlookup_table[devid];
3708 if (iommu == NULL)
3709 return -EINVAL;
3710
3711 table = get_irq_table(devid, false);
3712 if (!table)
3713 return -ENOMEM;
3714
3715 spin_lock_irqsave(&table->lock, flags);
3716
3717 entry = (struct irte_ga *)table->table;
3718 entry = &entry[index];
3719 entry->lo.fields_remap.valid = 0;
3720 entry->hi.val = irte->hi.val;
3721 entry->lo.val = irte->lo.val;
3722 entry->lo.fields_remap.valid = 1;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003723 if (data)
3724 data->ref = entry;
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003725
3726 spin_unlock_irqrestore(&table->lock, flags);
3727
3728 iommu_flush_irt(iommu, devid);
3729 iommu_completion_wait(iommu);
3730
3731 return 0;
3732}
3733
3734static int modify_irte(u16 devid, int index, union irte *irte)
Joerg Roedel2b324502012-06-21 16:29:10 +02003735{
3736 struct irq_remap_table *table;
3737 struct amd_iommu *iommu;
3738 unsigned long flags;
3739
3740 iommu = amd_iommu_rlookup_table[devid];
3741 if (iommu == NULL)
3742 return -EINVAL;
3743
3744 table = get_irq_table(devid, false);
3745 if (!table)
3746 return -ENOMEM;
3747
3748 spin_lock_irqsave(&table->lock, flags);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003749 table->table[index] = irte->val;
Joerg Roedel2b324502012-06-21 16:29:10 +02003750 spin_unlock_irqrestore(&table->lock, flags);
3751
3752 iommu_flush_irt(iommu, devid);
3753 iommu_completion_wait(iommu);
3754
3755 return 0;
3756}
3757
3758static void free_irte(u16 devid, int index)
3759{
3760 struct irq_remap_table *table;
3761 struct amd_iommu *iommu;
3762 unsigned long flags;
3763
3764 iommu = amd_iommu_rlookup_table[devid];
3765 if (iommu == NULL)
3766 return;
3767
3768 table = get_irq_table(devid, false);
3769 if (!table)
3770 return;
3771
3772 spin_lock_irqsave(&table->lock, flags);
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003773 iommu->irte_ops->clear_allocated(table, index);
Joerg Roedel2b324502012-06-21 16:29:10 +02003774 spin_unlock_irqrestore(&table->lock, flags);
3775
3776 iommu_flush_irt(iommu, devid);
3777 iommu_completion_wait(iommu);
3778}
3779
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003780static void irte_prepare(void *entry,
3781 u32 delivery_mode, u32 dest_mode,
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003782 u8 vector, u32 dest_apicid, int devid)
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003783{
3784 union irte *irte = (union irte *) entry;
3785
3786 irte->val = 0;
3787 irte->fields.vector = vector;
3788 irte->fields.int_type = delivery_mode;
3789 irte->fields.destination = dest_apicid;
3790 irte->fields.dm = dest_mode;
3791 irte->fields.valid = 1;
3792}
3793
3794static void irte_ga_prepare(void *entry,
3795 u32 delivery_mode, u32 dest_mode,
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003796 u8 vector, u32 dest_apicid, int devid)
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003797{
3798 struct irte_ga *irte = (struct irte_ga *) entry;
3799
3800 irte->lo.val = 0;
3801 irte->hi.val = 0;
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003802 irte->lo.fields_remap.int_type = delivery_mode;
3803 irte->lo.fields_remap.dm = dest_mode;
3804 irte->hi.fields.vector = vector;
3805 irte->lo.fields_remap.destination = dest_apicid;
3806 irte->lo.fields_remap.valid = 1;
3807}
3808
3809static void irte_activate(void *entry, u16 devid, u16 index)
3810{
3811 union irte *irte = (union irte *) entry;
3812
3813 irte->fields.valid = 1;
3814 modify_irte(devid, index, irte);
3815}
3816
3817static void irte_ga_activate(void *entry, u16 devid, u16 index)
3818{
3819 struct irte_ga *irte = (struct irte_ga *) entry;
3820
3821 irte->lo.fields_remap.valid = 1;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003822 modify_irte_ga(devid, index, irte, NULL);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003823}
3824
3825static void irte_deactivate(void *entry, u16 devid, u16 index)
3826{
3827 union irte *irte = (union irte *) entry;
3828
3829 irte->fields.valid = 0;
3830 modify_irte(devid, index, irte);
3831}
3832
3833static void irte_ga_deactivate(void *entry, u16 devid, u16 index)
3834{
3835 struct irte_ga *irte = (struct irte_ga *) entry;
3836
3837 irte->lo.fields_remap.valid = 0;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05003838 modify_irte_ga(devid, index, irte, NULL);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003839}
3840
3841static void irte_set_affinity(void *entry, u16 devid, u16 index,
3842 u8 vector, u32 dest_apicid)
3843{
3844 union irte *irte = (union irte *) entry;
3845
3846 irte->fields.vector = vector;
3847 irte->fields.destination = dest_apicid;
3848 modify_irte(devid, index, irte);
3849}
3850
3851static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
3852 u8 vector, u32 dest_apicid)
3853{
3854 struct irte_ga *irte = (struct irte_ga *) entry;
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003855 struct iommu_dev_data *dev_data = search_dev_data(devid);
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003856
Suravee Suthikulpanit84a21db2017-06-26 04:28:04 -05003857 if (!dev_data || !dev_data->use_vapic ||
3858 !irte->lo.fields_remap.guest_mode) {
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003859 irte->hi.fields.vector = vector;
3860 irte->lo.fields_remap.destination = dest_apicid;
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05003861 modify_irte_ga(devid, index, irte, NULL);
3862 }
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003863}
3864
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003865#define IRTE_ALLOCATED (~1U)
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05003866static void irte_set_allocated(struct irq_remap_table *table, int index)
3867{
3868 table->table[index] = IRTE_ALLOCATED;
3869}
3870
3871static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3872{
3873 struct irte_ga *ptr = (struct irte_ga *)table->table;
3874 struct irte_ga *irte = &ptr[index];
3875
3876 memset(&irte->lo.val, 0, sizeof(u64));
3877 memset(&irte->hi.val, 0, sizeof(u64));
3878 irte->hi.fields.vector = 0xff;
3879}
3880
3881static bool irte_is_allocated(struct irq_remap_table *table, int index)
3882{
3883 union irte *ptr = (union irte *)table->table;
3884 union irte *irte = &ptr[index];
3885
3886 return irte->val != 0;
3887}
3888
3889static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3890{
3891 struct irte_ga *ptr = (struct irte_ga *)table->table;
3892 struct irte_ga *irte = &ptr[index];
3893
3894 return irte->hi.fields.vector != 0;
3895}
3896
3897static void irte_clear_allocated(struct irq_remap_table *table, int index)
3898{
3899 table->table[index] = 0;
3900}
3901
3902static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3903{
3904 struct irte_ga *ptr = (struct irte_ga *)table->table;
3905 struct irte_ga *irte = &ptr[index];
3906
3907 memset(&irte->lo.val, 0, sizeof(u64));
3908 memset(&irte->hi.val, 0, sizeof(u64));
3909}
3910
Jiang Liu7c71d302015-04-13 14:11:33 +08003911static int get_devid(struct irq_alloc_info *info)
Joerg Roedel5527de72012-06-26 11:17:32 +02003912{
Jiang Liu7c71d302015-04-13 14:11:33 +08003913 int devid = -1;
Joerg Roedel5527de72012-06-26 11:17:32 +02003914
Jiang Liu7c71d302015-04-13 14:11:33 +08003915 switch (info->type) {
3916 case X86_IRQ_ALLOC_TYPE_IOAPIC:
3917 devid = get_ioapic_devid(info->ioapic_id);
3918 break;
3919 case X86_IRQ_ALLOC_TYPE_HPET:
3920 devid = get_hpet_devid(info->hpet_id);
3921 break;
3922 case X86_IRQ_ALLOC_TYPE_MSI:
3923 case X86_IRQ_ALLOC_TYPE_MSIX:
3924 devid = get_device_id(&info->msi_dev->dev);
3925 break;
3926 default:
3927 BUG_ON(1);
3928 break;
Joerg Roedel5527de72012-06-26 11:17:32 +02003929 }
3930
Jiang Liu7c71d302015-04-13 14:11:33 +08003931 return devid;
Joerg Roedel5527de72012-06-26 11:17:32 +02003932}
3933
Jiang Liu7c71d302015-04-13 14:11:33 +08003934static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
Joerg Roedel5527de72012-06-26 11:17:32 +02003935{
Jiang Liu7c71d302015-04-13 14:11:33 +08003936 struct amd_iommu *iommu;
3937 int devid;
Joerg Roedel5527de72012-06-26 11:17:32 +02003938
Jiang Liu7c71d302015-04-13 14:11:33 +08003939 if (!info)
3940 return NULL;
Joerg Roedel5527de72012-06-26 11:17:32 +02003941
Jiang Liu7c71d302015-04-13 14:11:33 +08003942 devid = get_devid(info);
3943 if (devid >= 0) {
3944 iommu = amd_iommu_rlookup_table[devid];
3945 if (iommu)
3946 return iommu->ir_domain;
3947 }
Joerg Roedel5527de72012-06-26 11:17:32 +02003948
Jiang Liu7c71d302015-04-13 14:11:33 +08003949 return NULL;
Joerg Roedel5527de72012-06-26 11:17:32 +02003950}
3951
Jiang Liu7c71d302015-04-13 14:11:33 +08003952static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003953{
Jiang Liu7c71d302015-04-13 14:11:33 +08003954 struct amd_iommu *iommu;
3955 int devid;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003956
Jiang Liu7c71d302015-04-13 14:11:33 +08003957 if (!info)
3958 return NULL;
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003959
Jiang Liu7c71d302015-04-13 14:11:33 +08003960 switch (info->type) {
3961 case X86_IRQ_ALLOC_TYPE_MSI:
3962 case X86_IRQ_ALLOC_TYPE_MSIX:
3963 devid = get_device_id(&info->msi_dev->dev);
Joerg Roedel9ee35e42016-04-21 18:21:31 +02003964 if (devid < 0)
Wan Zongshun7aba6cb2016-04-01 09:06:02 -04003965 return NULL;
3966
Dan Carpenter1fb260b2016-01-07 12:36:06 +03003967 iommu = amd_iommu_rlookup_table[devid];
3968 if (iommu)
3969 return iommu->msi_domain;
Jiang Liu7c71d302015-04-13 14:11:33 +08003970 break;
3971 default:
3972 break;
3973 }
Joerg Roedel0b4d48c2012-06-26 14:54:17 +02003974
Jiang Liu7c71d302015-04-13 14:11:33 +08003975 return NULL;
Joerg Roedeld9761952012-06-26 16:00:08 +02003976}
3977
Joerg Roedel6b474b82012-06-26 16:46:04 +02003978struct irq_remap_ops amd_iommu_irq_ops = {
Joerg Roedel6b474b82012-06-26 16:46:04 +02003979 .prepare = amd_iommu_prepare,
3980 .enable = amd_iommu_enable,
3981 .disable = amd_iommu_disable,
3982 .reenable = amd_iommu_reenable,
3983 .enable_faulting = amd_iommu_enable_faulting,
Jiang Liu7c71d302015-04-13 14:11:33 +08003984 .get_ir_irq_domain = get_ir_irq_domain,
3985 .get_irq_domain = get_irq_domain,
Joerg Roedel6b474b82012-06-26 16:46:04 +02003986};
Jiang Liu7c71d302015-04-13 14:11:33 +08003987
3988static void irq_remapping_prepare_irte(struct amd_ir_data *data,
3989 struct irq_cfg *irq_cfg,
3990 struct irq_alloc_info *info,
3991 int devid, int index, int sub_handle)
3992{
3993 struct irq_2_irte *irte_info = &data->irq_2_irte;
3994 struct msi_msg *msg = &data->msi_entry;
Jiang Liu7c71d302015-04-13 14:11:33 +08003995 struct IO_APIC_route_entry *entry;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05003996 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
3997
3998 if (!iommu)
3999 return;
Jiang Liu7c71d302015-04-13 14:11:33 +08004000
Jiang Liu7c71d302015-04-13 14:11:33 +08004001 data->irq_2_irte.devid = devid;
4002 data->irq_2_irte.index = index + sub_handle;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004003 iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
4004 apic->irq_dest_mode, irq_cfg->vector,
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05004005 irq_cfg->dest_apicid, devid);
Jiang Liu7c71d302015-04-13 14:11:33 +08004006
4007 switch (info->type) {
4008 case X86_IRQ_ALLOC_TYPE_IOAPIC:
4009 /* Setup IOAPIC entry */
4010 entry = info->ioapic_entry;
4011 info->ioapic_entry = NULL;
4012 memset(entry, 0, sizeof(*entry));
4013 entry->vector = index;
4014 entry->mask = 0;
4015 entry->trigger = info->ioapic_trigger;
4016 entry->polarity = info->ioapic_polarity;
4017 /* Mask level triggered irqs. */
4018 if (info->ioapic_trigger)
4019 entry->mask = 1;
4020 break;
4021
4022 case X86_IRQ_ALLOC_TYPE_HPET:
4023 case X86_IRQ_ALLOC_TYPE_MSI:
4024 case X86_IRQ_ALLOC_TYPE_MSIX:
4025 msg->address_hi = MSI_ADDR_BASE_HI;
4026 msg->address_lo = MSI_ADDR_BASE_LO;
4027 msg->data = irte_info->index;
4028 break;
4029
4030 default:
4031 BUG_ON(1);
4032 break;
4033 }
4034}
4035
Suravee Suthikulpanit880ac602016-08-23 13:52:34 -05004036struct amd_irte_ops irte_32_ops = {
4037 .prepare = irte_prepare,
4038 .activate = irte_activate,
4039 .deactivate = irte_deactivate,
4040 .set_affinity = irte_set_affinity,
4041 .set_allocated = irte_set_allocated,
4042 .is_allocated = irte_is_allocated,
4043 .clear_allocated = irte_clear_allocated,
4044};
4045
4046struct amd_irte_ops irte_128_ops = {
4047 .prepare = irte_ga_prepare,
4048 .activate = irte_ga_activate,
4049 .deactivate = irte_ga_deactivate,
4050 .set_affinity = irte_ga_set_affinity,
4051 .set_allocated = irte_ga_set_allocated,
4052 .is_allocated = irte_ga_is_allocated,
4053 .clear_allocated = irte_ga_clear_allocated,
4054};
4055
Jiang Liu7c71d302015-04-13 14:11:33 +08004056static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
4057 unsigned int nr_irqs, void *arg)
4058{
4059 struct irq_alloc_info *info = arg;
4060 struct irq_data *irq_data;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004061 struct amd_ir_data *data = NULL;
Jiang Liu7c71d302015-04-13 14:11:33 +08004062 struct irq_cfg *cfg;
4063 int i, ret, devid;
4064 int index = -1;
4065
4066 if (!info)
4067 return -EINVAL;
4068 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
4069 info->type != X86_IRQ_ALLOC_TYPE_MSIX)
4070 return -EINVAL;
4071
4072 /*
4073 * With IRQ remapping enabled, don't need contiguous CPU vectors
4074 * to support multiple MSI interrupts.
4075 */
4076 if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
4077 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
4078
4079 devid = get_devid(info);
4080 if (devid < 0)
4081 return -EINVAL;
4082
4083 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
4084 if (ret < 0)
4085 return ret;
4086
Jiang Liu7c71d302015-04-13 14:11:33 +08004087 if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
4088 if (get_irq_table(devid, true))
4089 index = info->ioapic_pin;
4090 else
4091 ret = -ENOMEM;
4092 } else {
Joerg Roedel53b9ec32017-10-06 12:22:06 +02004093 bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
4094
4095 index = alloc_irq_index(devid, nr_irqs, align);
Jiang Liu7c71d302015-04-13 14:11:33 +08004096 }
4097 if (index < 0) {
4098 pr_warn("Failed to allocate IRTE\n");
Wei Yongjun517abe42016-07-28 02:10:26 +00004099 ret = index;
Jiang Liu7c71d302015-04-13 14:11:33 +08004100 goto out_free_parent;
4101 }
4102
4103 for (i = 0; i < nr_irqs; i++) {
4104 irq_data = irq_domain_get_irq_data(domain, virq + i);
4105 cfg = irqd_cfg(irq_data);
4106 if (!irq_data || !cfg) {
4107 ret = -EINVAL;
4108 goto out_free_data;
4109 }
4110
Joerg Roedela130e692015-08-13 11:07:25 +02004111 ret = -ENOMEM;
4112 data = kzalloc(sizeof(*data), GFP_KERNEL);
4113 if (!data)
4114 goto out_free_data;
4115
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004116 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
4117 data->entry = kzalloc(sizeof(union irte), GFP_KERNEL);
4118 else
4119 data->entry = kzalloc(sizeof(struct irte_ga),
4120 GFP_KERNEL);
4121 if (!data->entry) {
4122 kfree(data);
4123 goto out_free_data;
4124 }
4125
Jiang Liu7c71d302015-04-13 14:11:33 +08004126 irq_data->hwirq = (devid << 16) + i;
4127 irq_data->chip_data = data;
4128 irq_data->chip = &amd_ir_chip;
4129 irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
4130 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
4131 }
Joerg Roedela130e692015-08-13 11:07:25 +02004132
Jiang Liu7c71d302015-04-13 14:11:33 +08004133 return 0;
4134
4135out_free_data:
4136 for (i--; i >= 0; i--) {
4137 irq_data = irq_domain_get_irq_data(domain, virq + i);
4138 if (irq_data)
4139 kfree(irq_data->chip_data);
4140 }
4141 for (i = 0; i < nr_irqs; i++)
4142 free_irte(devid, index + i);
4143out_free_parent:
4144 irq_domain_free_irqs_common(domain, virq, nr_irqs);
4145 return ret;
4146}
4147
4148static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
4149 unsigned int nr_irqs)
4150{
4151 struct irq_2_irte *irte_info;
4152 struct irq_data *irq_data;
4153 struct amd_ir_data *data;
4154 int i;
4155
4156 for (i = 0; i < nr_irqs; i++) {
4157 irq_data = irq_domain_get_irq_data(domain, virq + i);
4158 if (irq_data && irq_data->chip_data) {
4159 data = irq_data->chip_data;
4160 irte_info = &data->irq_2_irte;
4161 free_irte(irte_info->devid, irte_info->index);
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004162 kfree(data->entry);
Jiang Liu7c71d302015-04-13 14:11:33 +08004163 kfree(data);
4164 }
4165 }
4166 irq_domain_free_irqs_common(domain, virq, nr_irqs);
4167}
4168
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02004169static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
4170 struct amd_ir_data *ir_data,
4171 struct irq_2_irte *irte_info,
4172 struct irq_cfg *cfg);
4173
Thomas Gleixner72491642017-09-13 23:29:10 +02004174static int irq_remapping_activate(struct irq_domain *domain,
Thomas Gleixner702cb0a2017-12-29 16:59:06 +01004175 struct irq_data *irq_data, bool reserve)
Jiang Liu7c71d302015-04-13 14:11:33 +08004176{
4177 struct amd_ir_data *data = irq_data->chip_data;
4178 struct irq_2_irte *irte_info = &data->irq_2_irte;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004179 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02004180 struct irq_cfg *cfg = irqd_cfg(irq_data);
Jiang Liu7c71d302015-04-13 14:11:33 +08004181
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02004182 if (!iommu)
4183 return 0;
4184
4185 iommu->irte_ops->activate(data->entry, irte_info->devid,
4186 irte_info->index);
4187 amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
Thomas Gleixner72491642017-09-13 23:29:10 +02004188 return 0;
Jiang Liu7c71d302015-04-13 14:11:33 +08004189}
4190
4191static void irq_remapping_deactivate(struct irq_domain *domain,
4192 struct irq_data *irq_data)
4193{
4194 struct amd_ir_data *data = irq_data->chip_data;
4195 struct irq_2_irte *irte_info = &data->irq_2_irte;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004196 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
Jiang Liu7c71d302015-04-13 14:11:33 +08004197
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004198 if (iommu)
4199 iommu->irte_ops->deactivate(data->entry, irte_info->devid,
4200 irte_info->index);
Jiang Liu7c71d302015-04-13 14:11:33 +08004201}
4202
Tobias Klausere2f9d452017-05-24 16:31:16 +02004203static const struct irq_domain_ops amd_ir_domain_ops = {
Jiang Liu7c71d302015-04-13 14:11:33 +08004204 .alloc = irq_remapping_alloc,
4205 .free = irq_remapping_free,
4206 .activate = irq_remapping_activate,
4207 .deactivate = irq_remapping_deactivate,
4208};
4209
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05004210static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
4211{
4212 struct amd_iommu *iommu;
4213 struct amd_iommu_pi_data *pi_data = vcpu_info;
4214 struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
4215 struct amd_ir_data *ir_data = data->chip_data;
4216 struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
4217 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
Suravee Suthikulpanitd98de492016-08-23 13:52:40 -05004218 struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
4219
4220 /* Note:
4221 * This device has never been set up for guest mode.
4222 * we should not modify the IRTE
4223 */
4224 if (!dev_data || !dev_data->use_vapic)
4225 return 0;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05004226
4227 pi_data->ir_data = ir_data;
4228
4229 /* Note:
4230 * SVM tries to set up for VAPIC mode, but we are in
4231 * legacy mode. So, we force legacy mode instead.
4232 */
4233 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
4234 pr_debug("AMD-Vi: %s: Fall back to using intr legacy remap\n",
4235 __func__);
4236 pi_data->is_guest_mode = false;
4237 }
4238
4239 iommu = amd_iommu_rlookup_table[irte_info->devid];
4240 if (iommu == NULL)
4241 return -EINVAL;
4242
4243 pi_data->prev_ga_tag = ir_data->cached_ga_tag;
4244 if (pi_data->is_guest_mode) {
4245 /* Setting */
4246 irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
4247 irte->hi.fields.vector = vcpu_pi_info->vector;
Suravee Suthikulpanitefe6f242017-07-05 21:29:59 -05004248 irte->lo.fields_vapic.ga_log_intr = 1;
Suravee Suthikulpanitb9fc6b52016-08-23 13:52:39 -05004249 irte->lo.fields_vapic.guest_mode = 1;
4250 irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
4251
4252 ir_data->cached_ga_tag = pi_data->ga_tag;
4253 } else {
4254 /* Un-Setting */
4255 struct irq_cfg *cfg = irqd_cfg(data);
4256
4257 irte->hi.val = 0;
4258 irte->lo.val = 0;
4259 irte->hi.fields.vector = cfg->vector;
4260 irte->lo.fields_remap.guest_mode = 0;
4261 irte->lo.fields_remap.destination = cfg->dest_apicid;
4262 irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
4263 irte->lo.fields_remap.dm = apic->irq_dest_mode;
4264
4265 /*
4266 * This communicates the ga_tag back to the caller
4267 * so that it can do all the necessary clean up.
4268 */
4269 ir_data->cached_ga_tag = 0;
4270 }
4271
4272 return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
4273}
4274
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02004275
4276static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
4277 struct amd_ir_data *ir_data,
4278 struct irq_2_irte *irte_info,
4279 struct irq_cfg *cfg)
4280{
4281
4282 /*
4283 * Atomically updates the IRTE with the new destination, vector
4284 * and flushes the interrupt entry cache.
4285 */
4286 iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
4287 irte_info->index, cfg->vector,
4288 cfg->dest_apicid);
4289}
4290
Jiang Liu7c71d302015-04-13 14:11:33 +08004291static int amd_ir_set_affinity(struct irq_data *data,
4292 const struct cpumask *mask, bool force)
4293{
4294 struct amd_ir_data *ir_data = data->chip_data;
4295 struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
4296 struct irq_cfg *cfg = irqd_cfg(data);
4297 struct irq_data *parent = data->parent_data;
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004298 struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
Jiang Liu7c71d302015-04-13 14:11:33 +08004299 int ret;
4300
Suravee Suthikulpanit77bdab42016-08-23 13:52:35 -05004301 if (!iommu)
4302 return -ENODEV;
4303
Jiang Liu7c71d302015-04-13 14:11:33 +08004304 ret = parent->chip->irq_set_affinity(parent, mask, force);
4305 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
4306 return ret;
4307
Thomas Gleixner5ba204a2017-09-13 23:29:48 +02004308 amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
Jiang Liu7c71d302015-04-13 14:11:33 +08004309 /*
4310 * After this point, all the interrupts will start arriving
4311 * at the new destination. So, time to cleanup the previous
4312 * vector allocation.
4313 */
Jiang Liuc6c20022015-04-14 10:30:02 +08004314 send_cleanup_vector(cfg);
Jiang Liu7c71d302015-04-13 14:11:33 +08004315
4316 return IRQ_SET_MASK_OK_DONE;
4317}
4318
4319static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4320{
4321 struct amd_ir_data *ir_data = irq_data->chip_data;
4322
4323 *msg = ir_data->msi_entry;
4324}
4325
4326static struct irq_chip amd_ir_chip = {
Thomas Gleixner290be192017-06-20 01:37:02 +02004327 .name = "AMD-IR",
4328 .irq_ack = ir_ack_apic_edge,
4329 .irq_set_affinity = amd_ir_set_affinity,
4330 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
4331 .irq_compose_msi_msg = ir_compose_msi_msg,
Jiang Liu7c71d302015-04-13 14:11:33 +08004332};
4333
4334int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
4335{
Thomas Gleixner3e49a812017-06-20 01:37:12 +02004336 struct fwnode_handle *fn;
4337
4338 fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
4339 if (!fn)
4340 return -ENOMEM;
4341 iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
4342 irq_domain_free_fwnode(fn);
Jiang Liu7c71d302015-04-13 14:11:33 +08004343 if (!iommu->ir_domain)
4344 return -ENOMEM;
4345
4346 iommu->ir_domain->parent = arch_get_ir_parent_domain();
Thomas Gleixner3e49a812017-06-20 01:37:12 +02004347 iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
4348 "AMD-IR-MSI",
4349 iommu->index);
Jiang Liu7c71d302015-04-13 14:11:33 +08004350 return 0;
4351}
Suravee Suthikulpanit8dbea3f2016-08-23 13:52:38 -05004352
4353int amd_iommu_update_ga(int cpu, bool is_run, void *data)
4354{
4355 unsigned long flags;
4356 struct amd_iommu *iommu;
4357 struct irq_remap_table *irt;
4358 struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
4359 int devid = ir_data->irq_2_irte.devid;
4360 struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
4361 struct irte_ga *ref = (struct irte_ga *) ir_data->ref;
4362
4363 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
4364 !ref || !entry || !entry->lo.fields_vapic.guest_mode)
4365 return 0;
4366
4367 iommu = amd_iommu_rlookup_table[devid];
4368 if (!iommu)
4369 return -ENODEV;
4370
4371 irt = get_irq_table(devid, false);
4372 if (!irt)
4373 return -ENODEV;
4374
4375 spin_lock_irqsave(&irt->lock, flags);
4376
4377 if (ref->lo.fields_vapic.guest_mode) {
4378 if (cpu >= 0)
4379 ref->lo.fields_vapic.destination = cpu;
4380 ref->lo.fields_vapic.is_run = is_run;
4381 barrier();
4382 }
4383
4384 spin_unlock_irqrestore(&irt->lock, flags);
4385
4386 iommu_flush_irt(iommu, devid);
4387 iommu_completion_wait(iommu);
4388 return 0;
4389}
4390EXPORT_SYMBOL(amd_iommu_update_ga);
Joerg Roedel2b324502012-06-21 16:29:10 +02004391#endif