blob: 83d65d96676f1ac9c6f30b1c02556dc21dabfca2 [file] [log] [blame]
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +02001/*
2 * Copyright (C) 2016, Semihalf
3 * Author: Tomasz Nowicki <tn@semihalf.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * This file implements early detection/parsing of I/O mapping
15 * reported to OS through firmware via I/O Remapping Table (IORT)
16 * IORT document number: ARM DEN 0049A
17 */
18
19#define pr_fmt(fmt) "ACPI: IORT: " fmt
20
21#include <linux/acpi_iort.h>
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +000022#include <linux/iommu.h>
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +020023#include <linux/kernel.h>
Lorenzo Pieralisi7936df92016-11-21 10:01:35 +000024#include <linux/list.h>
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +020025#include <linux/pci.h>
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +000026#include <linux/platform_device.h>
Lorenzo Pieralisi7936df92016-11-21 10:01:35 +000027#include <linux/slab.h>
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +020028
Lorenzo Pieralisiea50b522016-11-21 10:01:46 +000029#define IORT_TYPE_MASK(type) (1 << (type))
30#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +000031#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
32 (1 << ACPI_IORT_NODE_SMMU_V3))
Lorenzo Pieralisiea50b522016-11-21 10:01:46 +000033
Robert Richter12275bf2017-06-22 21:20:54 +020034/* Until ACPICA headers cover IORT rev. C */
35#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
36#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
37#endif
38
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +020039struct iort_its_msi_chip {
40 struct list_head list;
41 struct fwnode_handle *fw_node;
42 u32 translation_id;
43};
44
Lorenzo Pieralisi7936df92016-11-21 10:01:35 +000045struct iort_fwnode {
46 struct list_head list;
47 struct acpi_iort_node *iort_node;
48 struct fwnode_handle *fwnode;
49};
50static LIST_HEAD(iort_fwnode_list);
51static DEFINE_SPINLOCK(iort_fwnode_lock);
52
53/**
54 * iort_set_fwnode() - Create iort_fwnode and use it to register
55 * iommu data in the iort_fwnode_list
56 *
57 * @node: IORT table node associated with the IOMMU
58 * @fwnode: fwnode associated with the IORT node
59 *
60 * Returns: 0 on success
61 * <0 on failure
62 */
63static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
64 struct fwnode_handle *fwnode)
65{
66 struct iort_fwnode *np;
67
68 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
69
70 if (WARN_ON(!np))
71 return -ENOMEM;
72
73 INIT_LIST_HEAD(&np->list);
74 np->iort_node = iort_node;
75 np->fwnode = fwnode;
76
77 spin_lock(&iort_fwnode_lock);
78 list_add_tail(&np->list, &iort_fwnode_list);
79 spin_unlock(&iort_fwnode_lock);
80
81 return 0;
82}
83
84/**
85 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
86 *
87 * @node: IORT table node to be looked-up
88 *
89 * Returns: fwnode_handle pointer on success, NULL on failure
90 */
91static inline
92struct fwnode_handle *iort_get_fwnode(struct acpi_iort_node *node)
93{
94 struct iort_fwnode *curr;
95 struct fwnode_handle *fwnode = NULL;
96
97 spin_lock(&iort_fwnode_lock);
98 list_for_each_entry(curr, &iort_fwnode_list, list) {
99 if (curr->iort_node == node) {
100 fwnode = curr->fwnode;
101 break;
102 }
103 }
104 spin_unlock(&iort_fwnode_lock);
105
106 return fwnode;
107}
108
109/**
110 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
111 *
112 * @node: IORT table node associated with fwnode to delete
113 */
114static inline void iort_delete_fwnode(struct acpi_iort_node *node)
115{
116 struct iort_fwnode *curr, *tmp;
117
118 spin_lock(&iort_fwnode_lock);
119 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
120 if (curr->iort_node == node) {
121 list_del(&curr->list);
122 kfree(curr);
123 break;
124 }
125 }
126 spin_unlock(&iort_fwnode_lock);
127}
128
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200129typedef acpi_status (*iort_find_node_callback)
130 (struct acpi_iort_node *node, void *context);
131
132/* Root pointer to the mapped IORT table */
133static struct acpi_table_header *iort_table;
134
135static LIST_HEAD(iort_msi_chip_list);
136static DEFINE_SPINLOCK(iort_msi_chip_lock);
137
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200138/**
139 * iort_register_domain_token() - register domain token and related ITS ID
140 * to the list from where we can get it back later on.
141 * @trans_id: ITS ID.
142 * @fw_node: Domain token.
143 *
144 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
145 */
146int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
147{
148 struct iort_its_msi_chip *its_msi_chip;
149
150 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
151 if (!its_msi_chip)
152 return -ENOMEM;
153
154 its_msi_chip->fw_node = fw_node;
155 its_msi_chip->translation_id = trans_id;
156
157 spin_lock(&iort_msi_chip_lock);
158 list_add(&its_msi_chip->list, &iort_msi_chip_list);
159 spin_unlock(&iort_msi_chip_lock);
160
161 return 0;
162}
163
164/**
165 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
166 * @trans_id: ITS ID.
167 *
168 * Returns: none.
169 */
170void iort_deregister_domain_token(int trans_id)
171{
172 struct iort_its_msi_chip *its_msi_chip, *t;
173
174 spin_lock(&iort_msi_chip_lock);
175 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
176 if (its_msi_chip->translation_id == trans_id) {
177 list_del(&its_msi_chip->list);
178 kfree(its_msi_chip);
179 break;
180 }
181 }
182 spin_unlock(&iort_msi_chip_lock);
183}
184
185/**
186 * iort_find_domain_token() - Find domain token based on given ITS ID
187 * @trans_id: ITS ID.
188 *
189 * Returns: domain token when find on the list, NULL otherwise
190 */
191struct fwnode_handle *iort_find_domain_token(int trans_id)
192{
193 struct fwnode_handle *fw_node = NULL;
194 struct iort_its_msi_chip *its_msi_chip;
195
196 spin_lock(&iort_msi_chip_lock);
197 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
198 if (its_msi_chip->translation_id == trans_id) {
199 fw_node = its_msi_chip->fw_node;
200 break;
201 }
202 }
203 spin_unlock(&iort_msi_chip_lock);
204
205 return fw_node;
206}
207
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200208static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
209 iort_find_node_callback callback,
210 void *context)
211{
212 struct acpi_iort_node *iort_node, *iort_end;
213 struct acpi_table_iort *iort;
214 int i;
215
216 if (!iort_table)
217 return NULL;
218
219 /* Get the first IORT node */
220 iort = (struct acpi_table_iort *)iort_table;
221 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
222 iort->node_offset);
223 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
224 iort_table->length);
225
226 for (i = 0; i < iort->node_count; i++) {
227 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
228 "IORT node pointer overflows, bad table!\n"))
229 return NULL;
230
231 if (iort_node->type == type &&
232 ACPI_SUCCESS(callback(iort_node, context)))
Hanjun Guod89cf2e2017-03-07 20:39:56 +0800233 return iort_node;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200234
235 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
236 iort_node->length);
237 }
238
239 return NULL;
240}
241
Lorenzo Pieralisibdca0c02016-11-21 10:01:40 +0000242static acpi_status
243iort_match_type_callback(struct acpi_iort_node *node, void *context)
244{
245 return AE_OK;
246}
247
248bool iort_node_match(u8 type)
249{
250 struct acpi_iort_node *node;
251
252 node = iort_scan_node(type, iort_match_type_callback, NULL);
253
254 return node != NULL;
255}
256
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200257static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
258 void *context)
259{
260 struct device *dev = context;
Hanjun Guoc92bdfe2017-03-07 20:39:58 +0800261 acpi_status status = AE_NOT_FOUND;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200262
263 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
264 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
265 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
266 struct acpi_iort_named_component *ncomp;
267
Hanjun Guoc92bdfe2017-03-07 20:39:58 +0800268 if (!adev)
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200269 goto out;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200270
271 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
272 if (ACPI_FAILURE(status)) {
273 dev_warn(dev, "Can't get device full path name\n");
274 goto out;
275 }
276
277 ncomp = (struct acpi_iort_named_component *)node->node_data;
278 status = !strcmp(ncomp->device_name, buf.pointer) ?
279 AE_OK : AE_NOT_FOUND;
280 acpi_os_free(buf.pointer);
281 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
282 struct acpi_iort_root_complex *pci_rc;
283 struct pci_bus *bus;
284
285 bus = to_pci_bus(dev);
286 pci_rc = (struct acpi_iort_root_complex *)node->node_data;
287
288 /*
289 * It is assumed that PCI segment numbers maps one-to-one
290 * with root complexes. Each segment number can represent only
291 * one root complex.
292 */
293 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
294 AE_OK : AE_NOT_FOUND;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200295 }
296out:
297 return status;
298}
299
300static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
301 u32 *rid_out)
302{
303 /* Single mapping does not care for input id */
304 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
305 if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
306 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
307 *rid_out = map->output_base;
308 return 0;
309 }
310
311 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
312 map, type);
313 return -ENXIO;
314 }
315
316 if (rid_in < map->input_base ||
317 (rid_in >= map->input_base + map->id_count))
318 return -ENXIO;
319
320 *rid_out = map->output_base + (rid_in - map->input_base);
321 return 0;
322}
323
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000324static
325struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
Hanjun Guo8ca4f1d2017-03-07 20:40:04 +0800326 u32 *id_out, int index)
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000327{
328 struct acpi_iort_node *parent;
329 struct acpi_iort_id_mapping *map;
330
331 if (!node->mapping_offset || !node->mapping_count ||
332 index >= node->mapping_count)
333 return NULL;
334
335 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
Lorenzo Pieralisi030abd82017-01-05 17:32:16 +0000336 node->mapping_offset + index * sizeof(*map));
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000337
338 /* Firmware bug! */
339 if (!map->output_reference) {
340 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
341 node, node->type);
342 return NULL;
343 }
344
345 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
346 map->output_reference);
347
Lorenzo Pieralisi030abd82017-01-05 17:32:16 +0000348 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000349 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
350 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
Lorenzo Pieralisi030abd82017-01-05 17:32:16 +0000351 *id_out = map->output_base;
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000352 return parent;
353 }
354 }
355
356 return NULL;
357}
358
Hanjun Guo697f6092017-03-07 20:40:03 +0800359static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
360 u32 id_in, u32 *id_out,
361 u8 type_mask)
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200362{
Hanjun Guo697f6092017-03-07 20:40:03 +0800363 u32 id = id_in;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200364
365 /* Parse the ID mapping tree to find specified node type */
366 while (node) {
367 struct acpi_iort_id_mapping *map;
368 int i;
369
Lorenzo Pieralisiea50b522016-11-21 10:01:46 +0000370 if (IORT_TYPE_MASK(node->type) & type_mask) {
Hanjun Guo697f6092017-03-07 20:40:03 +0800371 if (id_out)
372 *id_out = id;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200373 return node;
374 }
375
376 if (!node->mapping_offset || !node->mapping_count)
377 goto fail_map;
378
379 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
380 node->mapping_offset);
381
382 /* Firmware bug! */
383 if (!map->output_reference) {
384 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
385 node, node->type);
386 goto fail_map;
387 }
388
Hanjun Guo697f6092017-03-07 20:40:03 +0800389 /* Do the ID translation */
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200390 for (i = 0; i < node->mapping_count; i++, map++) {
Hanjun Guo697f6092017-03-07 20:40:03 +0800391 if (!iort_id_map(map, node->type, id, &id))
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200392 break;
393 }
394
395 if (i == node->mapping_count)
396 goto fail_map;
397
398 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
399 map->output_reference);
400 }
401
402fail_map:
Hanjun Guo697f6092017-03-07 20:40:03 +0800403 /* Map input ID to output ID unchanged on mapping failure */
404 if (id_out)
405 *id_out = id_in;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200406
407 return NULL;
408}
409
Hanjun Guo8ca4f1d2017-03-07 20:40:04 +0800410static
411struct acpi_iort_node *iort_node_map_platform_id(struct acpi_iort_node *node,
412 u32 *id_out, u8 type_mask,
413 int index)
414{
415 struct acpi_iort_node *parent;
416 u32 id;
417
418 /* step 1: retrieve the initial dev id */
419 parent = iort_node_get_id(node, &id, index);
420 if (!parent)
421 return NULL;
422
423 /*
424 * optional step 2: map the initial dev id if its parent is not
425 * the target type we want, map it again for the use cases such
426 * as NC (named component) -> SMMU -> ITS. If the type is matched,
427 * return the initial dev id and its parent pointer directly.
428 */
429 if (!(IORT_TYPE_MASK(parent->type) & type_mask))
430 parent = iort_node_map_id(parent, id, id_out, type_mask);
431 else
432 if (id_out)
433 *id_out = id;
434
435 return parent;
436}
437
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200438static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
439{
440 struct pci_bus *pbus;
441
442 if (!dev_is_pci(dev))
443 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
444 iort_match_node_callback, dev);
445
446 /* Find a PCI root bus */
447 pbus = to_pci_dev(dev)->bus;
448 while (!pci_is_root_bus(pbus))
449 pbus = pbus->parent;
450
451 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
452 iort_match_node_callback, &pbus->dev);
453}
454
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200455/**
456 * iort_msi_map_rid() - Map a MSI requester ID for a device
457 * @dev: The device for which the mapping is to be done.
458 * @req_id: The device requester ID.
459 *
460 * Returns: mapped MSI RID on success, input requester ID otherwise
461 */
462u32 iort_msi_map_rid(struct device *dev, u32 req_id)
463{
464 struct acpi_iort_node *node;
465 u32 dev_id;
466
467 node = iort_find_dev_node(dev);
468 if (!node)
469 return req_id;
470
Hanjun Guo697f6092017-03-07 20:40:03 +0800471 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200472 return dev_id;
473}
474
475/**
Hanjun Guoae7c1832017-03-07 20:40:05 +0800476 * iort_pmsi_get_dev_id() - Get the device id for a device
477 * @dev: The device for which the mapping is to be done.
478 * @dev_id: The device ID found.
479 *
480 * Returns: 0 for successful find a dev id, -ENODEV on error
481 */
482int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
483{
484 int i;
485 struct acpi_iort_node *node;
486
487 node = iort_find_dev_node(dev);
488 if (!node)
489 return -ENODEV;
490
491 for (i = 0; i < node->mapping_count; i++) {
492 if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i))
493 return 0;
494 }
495
496 return -ENODEV;
497}
498
499/**
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200500 * iort_dev_find_its_id() - Find the ITS identifier for a device
501 * @dev: The device.
Hanjun Guo6cb6bf52017-03-07 20:39:57 +0800502 * @req_id: Device's requester ID
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200503 * @idx: Index of the ITS identifier list.
504 * @its_id: ITS identifier.
505 *
506 * Returns: 0 on success, appropriate error value otherwise
507 */
508static int iort_dev_find_its_id(struct device *dev, u32 req_id,
509 unsigned int idx, int *its_id)
510{
511 struct acpi_iort_its_group *its;
512 struct acpi_iort_node *node;
513
514 node = iort_find_dev_node(dev);
515 if (!node)
516 return -ENXIO;
517
Hanjun Guo697f6092017-03-07 20:40:03 +0800518 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200519 if (!node)
520 return -ENXIO;
521
522 /* Move to ITS specific data */
523 its = (struct acpi_iort_its_group *)node->node_data;
524 if (idx > its->its_count) {
525 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
526 idx, its->its_count);
527 return -ENXIO;
528 }
529
530 *its_id = its->identifiers[idx];
531 return 0;
532}
533
534/**
535 * iort_get_device_domain() - Find MSI domain related to a device
536 * @dev: The device.
537 * @req_id: Requester ID for the device.
538 *
539 * Returns: the MSI domain for this device, NULL otherwise
540 */
541struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
542{
543 struct fwnode_handle *handle;
544 int its_id;
545
546 if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
547 return NULL;
548
549 handle = iort_find_domain_token(its_id);
550 if (!handle)
551 return NULL;
552
553 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
554}
555
Hanjun Guod4f54a12017-03-07 20:40:06 +0800556/**
557 * iort_get_platform_device_domain() - Find MSI domain related to a
558 * platform device
559 * @dev: the dev pointer associated with the platform device
560 *
561 * Returns: the MSI domain for this device, NULL otherwise
562 */
563static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
564{
565 struct acpi_iort_node *node, *msi_parent;
566 struct fwnode_handle *iort_fwnode;
567 struct acpi_iort_its_group *its;
568 int i;
569
570 /* find its associated iort node */
571 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
572 iort_match_node_callback, dev);
573 if (!node)
574 return NULL;
575
576 /* then find its msi parent node */
577 for (i = 0; i < node->mapping_count; i++) {
578 msi_parent = iort_node_map_platform_id(node, NULL,
579 IORT_MSI_TYPE, i);
580 if (msi_parent)
581 break;
582 }
583
584 if (!msi_parent)
585 return NULL;
586
587 /* Move to ITS specific data */
588 its = (struct acpi_iort_its_group *)msi_parent->node_data;
589
590 iort_fwnode = iort_find_domain_token(its->identifiers[0]);
591 if (!iort_fwnode)
592 return NULL;
593
594 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
595}
596
597void acpi_configure_pmsi_domain(struct device *dev)
598{
599 struct irq_domain *msi_domain;
600
601 msi_domain = iort_get_platform_device_domain(dev);
602 if (msi_domain)
603 dev_set_msi_domain(dev, msi_domain);
604}
605
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000606static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
607{
608 u32 *rid = data;
609
610 *rid = alias;
611 return 0;
612}
613
614static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
615 struct fwnode_handle *fwnode,
616 const struct iommu_ops *ops)
617{
618 int ret = iommu_fwspec_init(dev, fwnode, ops);
619
620 if (!ret)
621 ret = iommu_fwspec_add_ids(dev, &streamid, 1);
622
623 return ret;
624}
625
Lorenzo Pieralisi1d9029d2017-04-10 16:50:59 +0530626static inline bool iort_iommu_driver_enabled(u8 type)
627{
628 switch (type) {
629 case ACPI_IORT_NODE_SMMU_V3:
630 return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
631 case ACPI_IORT_NODE_SMMU:
632 return IS_BUILTIN(CONFIG_ARM_SMMU);
633 default:
634 pr_warn("IORT node type %u does not describe an SMMU\n", type);
635 return false;
636 }
637}
638
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100639#ifdef CONFIG_IOMMU_API
640static inline
641const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
642{
643 return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
644}
645
646static inline
647int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
648{
649 int err = 0;
650
651 if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus &&
652 !dev->iommu_group)
653 err = ops->add_device(dev);
654
655 return err;
656}
657#else
658static inline
659const struct iommu_ops *iort_fwspec_iommu_ops(struct iommu_fwspec *fwspec)
660{ return NULL; }
661static inline
662int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
663{ return 0; }
664#endif
665
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000666static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
667 struct acpi_iort_node *node,
668 u32 streamid)
669{
670 const struct iommu_ops *ops = NULL;
671 int ret = -ENODEV;
672 struct fwnode_handle *iort_fwnode;
673
Sricharan R5a1bb632017-04-10 16:51:03 +0530674 /*
675 * If we already translated the fwspec there
676 * is nothing left to do, return the iommu_ops.
677 */
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100678 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
679 if (ops)
680 return ops;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000681
682 if (node) {
683 iort_fwnode = iort_get_fwnode(node);
684 if (!iort_fwnode)
685 return NULL;
686
Joerg Roedel534766d2017-01-31 16:58:42 +0100687 ops = iommu_ops_from_fwnode(iort_fwnode);
Sricharan R5a1bb632017-04-10 16:51:03 +0530688 /*
689 * If the ops look-up fails, this means that either
690 * the SMMU drivers have not been probed yet or that
691 * the SMMU drivers are not built in the kernel;
692 * Depending on whether the SMMU drivers are built-in
693 * in the kernel or not, defer the IOMMU configuration
694 * or just abort it.
695 */
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000696 if (!ops)
Sricharan R5a1bb632017-04-10 16:51:03 +0530697 return iort_iommu_driver_enabled(node->type) ?
698 ERR_PTR(-EPROBE_DEFER) : NULL;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000699
700 ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
701 }
702
703 return ret ? NULL : ops;
704}
705
706/**
Lorenzo Pieralisi18b709b2016-12-06 14:20:11 +0000707 * iort_set_dma_mask - Set-up dma mask for a device.
708 *
709 * @dev: device to configure
710 */
711void iort_set_dma_mask(struct device *dev)
712{
713 /*
714 * Set default coherent_dma_mask to 32 bit. Drivers are expected to
715 * setup the correct supported mask.
716 */
717 if (!dev->coherent_dma_mask)
718 dev->coherent_dma_mask = DMA_BIT_MASK(32);
719
720 /*
721 * Set it to coherent_dma_mask by default if the architecture
722 * code has not set it.
723 */
724 if (!dev->dma_mask)
725 dev->dma_mask = &dev->coherent_dma_mask;
726}
727
728/**
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000729 * iort_iommu_configure - Set-up IOMMU configuration for a device.
730 *
731 * @dev: device to configure
732 *
733 * Returns: iommu_ops pointer on configuration success
734 * NULL on configuration failure
735 */
736const struct iommu_ops *iort_iommu_configure(struct device *dev)
737{
738 struct acpi_iort_node *node, *parent;
739 const struct iommu_ops *ops = NULL;
740 u32 streamid = 0;
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100741 int err;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000742
743 if (dev_is_pci(dev)) {
744 struct pci_bus *bus = to_pci_dev(dev)->bus;
745 u32 rid;
746
747 pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
748 &rid);
749
750 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
751 iort_match_node_callback, &bus->dev);
752 if (!node)
753 return NULL;
754
Hanjun Guo697f6092017-03-07 20:40:03 +0800755 parent = iort_node_map_id(node, rid, &streamid,
756 IORT_IOMMU_TYPE);
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000757
758 ops = iort_iommu_xlate(dev, parent, streamid);
759
760 } else {
761 int i = 0;
762
763 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
764 iort_match_node_callback, dev);
765 if (!node)
766 return NULL;
767
Hanjun Guo8ca4f1d2017-03-07 20:40:04 +0800768 parent = iort_node_map_platform_id(node, &streamid,
769 IORT_IOMMU_TYPE, i++);
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000770
771 while (parent) {
772 ops = iort_iommu_xlate(dev, parent, streamid);
Sricharan R5a1bb632017-04-10 16:51:03 +0530773 if (IS_ERR_OR_NULL(ops))
774 return ops;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000775
Hanjun Guo8ca4f1d2017-03-07 20:40:04 +0800776 parent = iort_node_map_platform_id(node, &streamid,
777 IORT_IOMMU_TYPE,
778 i++);
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000779 }
780 }
781
Sricharan R5a1bb632017-04-10 16:51:03 +0530782 /*
783 * If we have reason to believe the IOMMU driver missed the initial
784 * add_device callback for dev, replay it to get things in order.
785 */
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100786 err = iort_add_device_replay(ops, dev);
787 if (err)
788 ops = ERR_PTR(err);
Sricharan R5a1bb632017-04-10 16:51:03 +0530789
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000790 return ops;
791}
792
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000793static void __init acpi_iort_register_irq(int hwirq, const char *name,
794 int trigger,
795 struct resource *res)
796{
797 int irq = acpi_register_gsi(NULL, hwirq, trigger,
798 ACPI_ACTIVE_HIGH);
799
800 if (irq <= 0) {
801 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
802 name);
803 return;
804 }
805
806 res->start = irq;
807 res->end = irq;
808 res->flags = IORESOURCE_IRQ;
809 res->name = name;
810}
811
812static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
813{
814 struct acpi_iort_smmu_v3 *smmu;
815 /* Always present mem resource */
816 int num_res = 1;
817
818 /* Retrieve SMMUv3 specific data */
819 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
820
821 if (smmu->event_gsiv)
822 num_res++;
823
824 if (smmu->pri_gsiv)
825 num_res++;
826
827 if (smmu->gerr_gsiv)
828 num_res++;
829
830 if (smmu->sync_gsiv)
831 num_res++;
832
833 return num_res;
834}
835
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530836static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
837{
838 /*
839 * Cavium ThunderX2 implementation doesn't not support unique
840 * irq line. Use single irq line for all the SMMUv3 interrupts.
841 */
842 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
843 return false;
844
845 /*
846 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
847 * SPI numbers here.
848 */
849 return smmu->event_gsiv == smmu->pri_gsiv &&
850 smmu->event_gsiv == smmu->gerr_gsiv &&
851 smmu->event_gsiv == smmu->sync_gsiv;
852}
853
Linu Cherian403e8c72017-06-22 17:35:36 +0530854static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
855{
856 /*
857 * Override the size, for Cavium ThunderX2 implementation
858 * which doesn't support the page 1 SMMU register space.
859 */
860 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
861 return SZ_64K;
862
863 return SZ_128K;
864}
865
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000866static void __init arm_smmu_v3_init_resources(struct resource *res,
867 struct acpi_iort_node *node)
868{
869 struct acpi_iort_smmu_v3 *smmu;
870 int num_res = 0;
871
872 /* Retrieve SMMUv3 specific data */
873 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
874
875 res[num_res].start = smmu->base_address;
Linu Cherian403e8c72017-06-22 17:35:36 +0530876 res[num_res].end = smmu->base_address +
877 arm_smmu_v3_resource_size(smmu) - 1;
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000878 res[num_res].flags = IORESOURCE_MEM;
879
880 num_res++;
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530881 if (arm_smmu_v3_is_combined_irq(smmu)) {
882 if (smmu->event_gsiv)
883 acpi_iort_register_irq(smmu->event_gsiv, "combined",
884 ACPI_EDGE_SENSITIVE,
885 &res[num_res++]);
886 } else {
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000887
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530888 if (smmu->event_gsiv)
889 acpi_iort_register_irq(smmu->event_gsiv, "eventq",
890 ACPI_EDGE_SENSITIVE,
891 &res[num_res++]);
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000892
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530893 if (smmu->pri_gsiv)
894 acpi_iort_register_irq(smmu->pri_gsiv, "priq",
895 ACPI_EDGE_SENSITIVE,
896 &res[num_res++]);
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000897
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530898 if (smmu->gerr_gsiv)
899 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
900 ACPI_EDGE_SENSITIVE,
901 &res[num_res++]);
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000902
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530903 if (smmu->sync_gsiv)
904 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
905 ACPI_EDGE_SENSITIVE,
906 &res[num_res++]);
907 }
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000908}
909
910static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
911{
912 struct acpi_iort_smmu_v3 *smmu;
913
914 /* Retrieve SMMUv3 specific data */
915 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
916
917 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
918}
919
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +0000920static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
921{
922 struct acpi_iort_smmu *smmu;
923
924 /* Retrieve SMMU specific data */
925 smmu = (struct acpi_iort_smmu *)node->node_data;
926
927 /*
928 * Only consider the global fault interrupt and ignore the
929 * configuration access interrupt.
930 *
931 * MMIO address and global fault interrupt resources are always
932 * present so add them to the context interrupt count as a static
933 * value.
934 */
935 return smmu->context_interrupt_count + 2;
936}
937
938static void __init arm_smmu_init_resources(struct resource *res,
939 struct acpi_iort_node *node)
940{
941 struct acpi_iort_smmu *smmu;
942 int i, hw_irq, trigger, num_res = 0;
943 u64 *ctx_irq, *glb_irq;
944
945 /* Retrieve SMMU specific data */
946 smmu = (struct acpi_iort_smmu *)node->node_data;
947
948 res[num_res].start = smmu->base_address;
949 res[num_res].end = smmu->base_address + smmu->span - 1;
950 res[num_res].flags = IORESOURCE_MEM;
951 num_res++;
952
953 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
954 /* Global IRQs */
955 hw_irq = IORT_IRQ_MASK(glb_irq[0]);
956 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
957
958 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
959 &res[num_res++]);
960
961 /* Context IRQs */
962 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
963 for (i = 0; i < smmu->context_interrupt_count; i++) {
964 hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
965 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
966
967 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
968 &res[num_res++]);
969 }
970}
971
972static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
973{
974 struct acpi_iort_smmu *smmu;
975
976 /* Retrieve SMMU specific data */
977 smmu = (struct acpi_iort_smmu *)node->node_data;
978
979 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
980}
981
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +0000982struct iort_iommu_config {
983 const char *name;
984 int (*iommu_init)(struct acpi_iort_node *node);
985 bool (*iommu_is_coherent)(struct acpi_iort_node *node);
986 int (*iommu_count_resources)(struct acpi_iort_node *node);
987 void (*iommu_init_resources)(struct resource *res,
988 struct acpi_iort_node *node);
989};
990
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000991static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
992 .name = "arm-smmu-v3",
993 .iommu_is_coherent = arm_smmu_v3_is_coherent,
994 .iommu_count_resources = arm_smmu_v3_count_resources,
995 .iommu_init_resources = arm_smmu_v3_init_resources
996};
997
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +0000998static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
999 .name = "arm-smmu",
1000 .iommu_is_coherent = arm_smmu_is_coherent,
1001 .iommu_count_resources = arm_smmu_count_resources,
1002 .iommu_init_resources = arm_smmu_init_resources
1003};
1004
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001005static __init
1006const struct iort_iommu_config *iort_get_iommu_cfg(struct acpi_iort_node *node)
1007{
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00001008 switch (node->type) {
1009 case ACPI_IORT_NODE_SMMU_V3:
1010 return &iort_arm_smmu_v3_cfg;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001011 case ACPI_IORT_NODE_SMMU:
1012 return &iort_arm_smmu_cfg;
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00001013 default:
1014 return NULL;
1015 }
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001016}
1017
1018/**
1019 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
1020 * @node: Pointer to SMMU ACPI IORT node
1021 *
1022 * Returns: 0 on success, <0 failure
1023 */
1024static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
1025{
1026 struct fwnode_handle *fwnode;
1027 struct platform_device *pdev;
1028 struct resource *r;
1029 enum dev_dma_attr attr;
1030 int ret, count;
1031 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
1032
1033 if (!ops)
1034 return -ENODEV;
1035
1036 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1037 if (!pdev)
Dan Carpenter5e5afa62017-01-17 16:36:23 +03001038 return -ENOMEM;
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001039
1040 count = ops->iommu_count_resources(node);
1041
1042 r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1043 if (!r) {
1044 ret = -ENOMEM;
1045 goto dev_put;
1046 }
1047
1048 ops->iommu_init_resources(r, node);
1049
1050 ret = platform_device_add_resources(pdev, r, count);
1051 /*
1052 * Resources are duplicated in platform_device_add_resources,
1053 * free their allocated memory
1054 */
1055 kfree(r);
1056
1057 if (ret)
1058 goto dev_put;
1059
1060 /*
1061 * Add a copy of IORT node pointer to platform_data to
1062 * be used to retrieve IORT data information.
1063 */
1064 ret = platform_device_add_data(pdev, &node, sizeof(node));
1065 if (ret)
1066 goto dev_put;
1067
1068 /*
1069 * We expect the dma masks to be equivalent for
1070 * all SMMUs set-ups
1071 */
1072 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1073
1074 fwnode = iort_get_fwnode(node);
1075
1076 if (!fwnode) {
1077 ret = -ENODEV;
1078 goto dev_put;
1079 }
1080
1081 pdev->dev.fwnode = fwnode;
1082
1083 attr = ops->iommu_is_coherent(node) ?
1084 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1085
1086 /* Configure DMA for the page table walker */
1087 acpi_dma_configure(&pdev->dev, attr);
1088
1089 ret = platform_device_add(pdev);
1090 if (ret)
1091 goto dma_deconfigure;
1092
1093 return 0;
1094
1095dma_deconfigure:
1096 acpi_dma_deconfigure(&pdev->dev);
1097dev_put:
1098 platform_device_put(pdev);
1099
1100 return ret;
1101}
1102
1103static void __init iort_init_platform_devices(void)
1104{
1105 struct acpi_iort_node *iort_node, *iort_end;
1106 struct acpi_table_iort *iort;
1107 struct fwnode_handle *fwnode;
1108 int i, ret;
1109
1110 /*
1111 * iort_table and iort both point to the start of IORT table, but
1112 * have different struct types
1113 */
1114 iort = (struct acpi_table_iort *)iort_table;
1115
1116 /* Get the first IORT node */
1117 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1118 iort->node_offset);
1119 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1120 iort_table->length);
1121
1122 for (i = 0; i < iort->node_count; i++) {
1123 if (iort_node >= iort_end) {
1124 pr_err("iort node pointer overflows, bad table\n");
1125 return;
1126 }
1127
1128 if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
1129 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
1130
1131 fwnode = acpi_alloc_fwnode_static();
1132 if (!fwnode)
1133 return;
1134
1135 iort_set_fwnode(iort_node, fwnode);
1136
1137 ret = iort_add_smmu_platform_device(iort_node);
1138 if (ret) {
1139 iort_delete_fwnode(iort_node);
1140 acpi_free_fwnode_static(fwnode);
1141 return;
1142 }
1143 }
1144
1145 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1146 iort_node->length);
1147 }
1148}
1149
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +02001150void __init acpi_iort_init(void)
1151{
1152 acpi_status status;
1153
1154 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
Lorenzo Pieralisi34ceea22016-11-21 10:01:34 +00001155 if (ACPI_FAILURE(status)) {
1156 if (status != AE_NOT_FOUND) {
1157 const char *msg = acpi_format_exception(status);
1158
1159 pr_err("Failed to get table, %s\n", msg);
1160 }
1161
1162 return;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +02001163 }
Lorenzo Pieralisi34ceea22016-11-21 10:01:34 +00001164
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001165 iort_init_platform_devices();
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +02001166}