blob: 5b5630e52281fb10a570ff8538998aa445ec4fe8 [file] [log] [blame]
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +02001/*
2 * Copyright (C) 2016, Semihalf
3 * Author: Tomasz Nowicki <tn@semihalf.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * This file implements early detection/parsing of I/O mapping
15 * reported to OS through firmware via I/O Remapping Table (IORT)
16 * IORT document number: ARM DEN 0049A
17 */
18
19#define pr_fmt(fmt) "ACPI: IORT: " fmt
20
21#include <linux/acpi_iort.h>
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +000022#include <linux/iommu.h>
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +020023#include <linux/kernel.h>
Lorenzo Pieralisi7936df92016-11-21 10:01:35 +000024#include <linux/list.h>
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +020025#include <linux/pci.h>
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +000026#include <linux/platform_device.h>
Lorenzo Pieralisi7936df92016-11-21 10:01:35 +000027#include <linux/slab.h>
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +020028
Lorenzo Pieralisiea50b522016-11-21 10:01:46 +000029#define IORT_TYPE_MASK(type) (1 << (type))
30#define IORT_MSI_TYPE (1 << ACPI_IORT_NODE_ITS_GROUP)
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +000031#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
32 (1 << ACPI_IORT_NODE_SMMU_V3))
Lorenzo Pieralisiea50b522016-11-21 10:01:46 +000033
Robert Richter12275bf2017-06-22 21:20:54 +020034/* Until ACPICA headers cover IORT rev. C */
35#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
36#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
37#endif
38
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +020039struct iort_its_msi_chip {
40 struct list_head list;
41 struct fwnode_handle *fw_node;
42 u32 translation_id;
43};
44
Lorenzo Pieralisi7936df92016-11-21 10:01:35 +000045struct iort_fwnode {
46 struct list_head list;
47 struct acpi_iort_node *iort_node;
48 struct fwnode_handle *fwnode;
49};
50static LIST_HEAD(iort_fwnode_list);
51static DEFINE_SPINLOCK(iort_fwnode_lock);
52
53/**
54 * iort_set_fwnode() - Create iort_fwnode and use it to register
55 * iommu data in the iort_fwnode_list
56 *
57 * @node: IORT table node associated with the IOMMU
58 * @fwnode: fwnode associated with the IORT node
59 *
60 * Returns: 0 on success
61 * <0 on failure
62 */
63static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
64 struct fwnode_handle *fwnode)
65{
66 struct iort_fwnode *np;
67
68 np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
69
70 if (WARN_ON(!np))
71 return -ENOMEM;
72
73 INIT_LIST_HEAD(&np->list);
74 np->iort_node = iort_node;
75 np->fwnode = fwnode;
76
77 spin_lock(&iort_fwnode_lock);
78 list_add_tail(&np->list, &iort_fwnode_list);
79 spin_unlock(&iort_fwnode_lock);
80
81 return 0;
82}
83
84/**
85 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
86 *
87 * @node: IORT table node to be looked-up
88 *
89 * Returns: fwnode_handle pointer on success, NULL on failure
90 */
Lorenzo Pieralisie3d49392017-09-28 14:03:33 +010091static inline struct fwnode_handle *iort_get_fwnode(
92 struct acpi_iort_node *node)
Lorenzo Pieralisi7936df92016-11-21 10:01:35 +000093{
94 struct iort_fwnode *curr;
95 struct fwnode_handle *fwnode = NULL;
96
97 spin_lock(&iort_fwnode_lock);
98 list_for_each_entry(curr, &iort_fwnode_list, list) {
99 if (curr->iort_node == node) {
100 fwnode = curr->fwnode;
101 break;
102 }
103 }
104 spin_unlock(&iort_fwnode_lock);
105
106 return fwnode;
107}
108
109/**
110 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
111 *
112 * @node: IORT table node associated with fwnode to delete
113 */
114static inline void iort_delete_fwnode(struct acpi_iort_node *node)
115{
116 struct iort_fwnode *curr, *tmp;
117
118 spin_lock(&iort_fwnode_lock);
119 list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
120 if (curr->iort_node == node) {
121 list_del(&curr->list);
122 kfree(curr);
123 break;
124 }
125 }
126 spin_unlock(&iort_fwnode_lock);
127}
128
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200129typedef acpi_status (*iort_find_node_callback)
130 (struct acpi_iort_node *node, void *context);
131
132/* Root pointer to the mapped IORT table */
133static struct acpi_table_header *iort_table;
134
135static LIST_HEAD(iort_msi_chip_list);
136static DEFINE_SPINLOCK(iort_msi_chip_lock);
137
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200138/**
139 * iort_register_domain_token() - register domain token and related ITS ID
140 * to the list from where we can get it back later on.
141 * @trans_id: ITS ID.
142 * @fw_node: Domain token.
143 *
144 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
145 */
146int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
147{
148 struct iort_its_msi_chip *its_msi_chip;
149
150 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
151 if (!its_msi_chip)
152 return -ENOMEM;
153
154 its_msi_chip->fw_node = fw_node;
155 its_msi_chip->translation_id = trans_id;
156
157 spin_lock(&iort_msi_chip_lock);
158 list_add(&its_msi_chip->list, &iort_msi_chip_list);
159 spin_unlock(&iort_msi_chip_lock);
160
161 return 0;
162}
163
164/**
165 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
166 * @trans_id: ITS ID.
167 *
168 * Returns: none.
169 */
170void iort_deregister_domain_token(int trans_id)
171{
172 struct iort_its_msi_chip *its_msi_chip, *t;
173
174 spin_lock(&iort_msi_chip_lock);
175 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
176 if (its_msi_chip->translation_id == trans_id) {
177 list_del(&its_msi_chip->list);
178 kfree(its_msi_chip);
179 break;
180 }
181 }
182 spin_unlock(&iort_msi_chip_lock);
183}
184
185/**
186 * iort_find_domain_token() - Find domain token based on given ITS ID
187 * @trans_id: ITS ID.
188 *
189 * Returns: domain token when find on the list, NULL otherwise
190 */
191struct fwnode_handle *iort_find_domain_token(int trans_id)
192{
193 struct fwnode_handle *fw_node = NULL;
194 struct iort_its_msi_chip *its_msi_chip;
195
196 spin_lock(&iort_msi_chip_lock);
197 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
198 if (its_msi_chip->translation_id == trans_id) {
199 fw_node = its_msi_chip->fw_node;
200 break;
201 }
202 }
203 spin_unlock(&iort_msi_chip_lock);
204
205 return fw_node;
206}
207
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200208static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
209 iort_find_node_callback callback,
210 void *context)
211{
212 struct acpi_iort_node *iort_node, *iort_end;
213 struct acpi_table_iort *iort;
214 int i;
215
216 if (!iort_table)
217 return NULL;
218
219 /* Get the first IORT node */
220 iort = (struct acpi_table_iort *)iort_table;
221 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
222 iort->node_offset);
223 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
224 iort_table->length);
225
226 for (i = 0; i < iort->node_count; i++) {
227 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
228 "IORT node pointer overflows, bad table!\n"))
229 return NULL;
230
231 if (iort_node->type == type &&
232 ACPI_SUCCESS(callback(iort_node, context)))
Hanjun Guod89cf2e2017-03-07 20:39:56 +0800233 return iort_node;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200234
235 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
236 iort_node->length);
237 }
238
239 return NULL;
240}
241
242static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
243 void *context)
244{
245 struct device *dev = context;
Hanjun Guoc92bdfe2017-03-07 20:39:58 +0800246 acpi_status status = AE_NOT_FOUND;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200247
248 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
249 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
250 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
251 struct acpi_iort_named_component *ncomp;
252
Hanjun Guoc92bdfe2017-03-07 20:39:58 +0800253 if (!adev)
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200254 goto out;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200255
256 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
257 if (ACPI_FAILURE(status)) {
258 dev_warn(dev, "Can't get device full path name\n");
259 goto out;
260 }
261
262 ncomp = (struct acpi_iort_named_component *)node->node_data;
263 status = !strcmp(ncomp->device_name, buf.pointer) ?
264 AE_OK : AE_NOT_FOUND;
265 acpi_os_free(buf.pointer);
266 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
267 struct acpi_iort_root_complex *pci_rc;
268 struct pci_bus *bus;
269
270 bus = to_pci_bus(dev);
271 pci_rc = (struct acpi_iort_root_complex *)node->node_data;
272
273 /*
274 * It is assumed that PCI segment numbers maps one-to-one
275 * with root complexes. Each segment number can represent only
276 * one root complex.
277 */
278 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
279 AE_OK : AE_NOT_FOUND;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200280 }
281out:
282 return status;
283}
284
285static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
286 u32 *rid_out)
287{
288 /* Single mapping does not care for input id */
289 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
290 if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
291 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
292 *rid_out = map->output_base;
293 return 0;
294 }
295
296 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
297 map, type);
298 return -ENXIO;
299 }
300
301 if (rid_in < map->input_base ||
302 (rid_in >= map->input_base + map->id_count))
303 return -ENXIO;
304
305 *rid_out = map->output_base + (rid_in - map->input_base);
306 return 0;
307}
308
Lorenzo Pieralisie3d49392017-09-28 14:03:33 +0100309static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
310 u32 *id_out, int index)
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000311{
312 struct acpi_iort_node *parent;
313 struct acpi_iort_id_mapping *map;
314
315 if (!node->mapping_offset || !node->mapping_count ||
316 index >= node->mapping_count)
317 return NULL;
318
319 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
Lorenzo Pieralisi030abd82017-01-05 17:32:16 +0000320 node->mapping_offset + index * sizeof(*map));
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000321
322 /* Firmware bug! */
323 if (!map->output_reference) {
324 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
325 node, node->type);
326 return NULL;
327 }
328
329 parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
330 map->output_reference);
331
Lorenzo Pieralisi030abd82017-01-05 17:32:16 +0000332 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000333 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
334 node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
Lorenzo Pieralisi030abd82017-01-05 17:32:16 +0000335 *id_out = map->output_base;
Lorenzo Pieralisi618f5352016-11-21 10:01:47 +0000336 return parent;
337 }
338 }
339
340 return NULL;
341}
342
Hanjun Guo697f6092017-03-07 20:40:03 +0800343static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
344 u32 id_in, u32 *id_out,
345 u8 type_mask)
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200346{
Hanjun Guo697f6092017-03-07 20:40:03 +0800347 u32 id = id_in;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200348
349 /* Parse the ID mapping tree to find specified node type */
350 while (node) {
351 struct acpi_iort_id_mapping *map;
352 int i;
353
Lorenzo Pieralisiea50b522016-11-21 10:01:46 +0000354 if (IORT_TYPE_MASK(node->type) & type_mask) {
Hanjun Guo697f6092017-03-07 20:40:03 +0800355 if (id_out)
356 *id_out = id;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200357 return node;
358 }
359
360 if (!node->mapping_offset || !node->mapping_count)
361 goto fail_map;
362
363 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
364 node->mapping_offset);
365
366 /* Firmware bug! */
367 if (!map->output_reference) {
368 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
369 node, node->type);
370 goto fail_map;
371 }
372
Hanjun Guo697f6092017-03-07 20:40:03 +0800373 /* Do the ID translation */
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200374 for (i = 0; i < node->mapping_count; i++, map++) {
Hanjun Guo697f6092017-03-07 20:40:03 +0800375 if (!iort_id_map(map, node->type, id, &id))
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200376 break;
377 }
378
379 if (i == node->mapping_count)
380 goto fail_map;
381
382 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
383 map->output_reference);
384 }
385
386fail_map:
Hanjun Guo697f6092017-03-07 20:40:03 +0800387 /* Map input ID to output ID unchanged on mapping failure */
388 if (id_out)
389 *id_out = id_in;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200390
391 return NULL;
392}
393
Lorenzo Pieralisie3d49392017-09-28 14:03:33 +0100394static struct acpi_iort_node *iort_node_map_platform_id(
395 struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
396 int index)
Hanjun Guo8ca4f1d2017-03-07 20:40:04 +0800397{
398 struct acpi_iort_node *parent;
399 u32 id;
400
401 /* step 1: retrieve the initial dev id */
402 parent = iort_node_get_id(node, &id, index);
403 if (!parent)
404 return NULL;
405
406 /*
407 * optional step 2: map the initial dev id if its parent is not
408 * the target type we want, map it again for the use cases such
409 * as NC (named component) -> SMMU -> ITS. If the type is matched,
410 * return the initial dev id and its parent pointer directly.
411 */
412 if (!(IORT_TYPE_MASK(parent->type) & type_mask))
413 parent = iort_node_map_id(parent, id, id_out, type_mask);
414 else
415 if (id_out)
416 *id_out = id;
417
418 return parent;
419}
420
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +0200421static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
422{
423 struct pci_bus *pbus;
424
425 if (!dev_is_pci(dev))
426 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
427 iort_match_node_callback, dev);
428
429 /* Find a PCI root bus */
430 pbus = to_pci_dev(dev)->bus;
431 while (!pci_is_root_bus(pbus))
432 pbus = pbus->parent;
433
434 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
435 iort_match_node_callback, &pbus->dev);
436}
437
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200438/**
439 * iort_msi_map_rid() - Map a MSI requester ID for a device
440 * @dev: The device for which the mapping is to be done.
441 * @req_id: The device requester ID.
442 *
443 * Returns: mapped MSI RID on success, input requester ID otherwise
444 */
445u32 iort_msi_map_rid(struct device *dev, u32 req_id)
446{
447 struct acpi_iort_node *node;
448 u32 dev_id;
449
450 node = iort_find_dev_node(dev);
451 if (!node)
452 return req_id;
453
Hanjun Guo697f6092017-03-07 20:40:03 +0800454 iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200455 return dev_id;
456}
457
458/**
Hanjun Guoae7c1832017-03-07 20:40:05 +0800459 * iort_pmsi_get_dev_id() - Get the device id for a device
460 * @dev: The device for which the mapping is to be done.
461 * @dev_id: The device ID found.
462 *
463 * Returns: 0 for successful find a dev id, -ENODEV on error
464 */
465int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
466{
467 int i;
468 struct acpi_iort_node *node;
469
470 node = iort_find_dev_node(dev);
471 if (!node)
472 return -ENODEV;
473
474 for (i = 0; i < node->mapping_count; i++) {
475 if (iort_node_map_platform_id(node, dev_id, IORT_MSI_TYPE, i))
476 return 0;
477 }
478
479 return -ENODEV;
480}
481
482/**
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200483 * iort_dev_find_its_id() - Find the ITS identifier for a device
484 * @dev: The device.
Hanjun Guo6cb6bf52017-03-07 20:39:57 +0800485 * @req_id: Device's requester ID
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200486 * @idx: Index of the ITS identifier list.
487 * @its_id: ITS identifier.
488 *
489 * Returns: 0 on success, appropriate error value otherwise
490 */
491static int iort_dev_find_its_id(struct device *dev, u32 req_id,
492 unsigned int idx, int *its_id)
493{
494 struct acpi_iort_its_group *its;
495 struct acpi_iort_node *node;
496
497 node = iort_find_dev_node(dev);
498 if (!node)
499 return -ENXIO;
500
Hanjun Guo697f6092017-03-07 20:40:03 +0800501 node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
Tomasz Nowicki4bf2efd2016-09-12 20:32:21 +0200502 if (!node)
503 return -ENXIO;
504
505 /* Move to ITS specific data */
506 its = (struct acpi_iort_its_group *)node->node_data;
507 if (idx > its->its_count) {
508 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
509 idx, its->its_count);
510 return -ENXIO;
511 }
512
513 *its_id = its->identifiers[idx];
514 return 0;
515}
516
517/**
518 * iort_get_device_domain() - Find MSI domain related to a device
519 * @dev: The device.
520 * @req_id: Requester ID for the device.
521 *
522 * Returns: the MSI domain for this device, NULL otherwise
523 */
524struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
525{
526 struct fwnode_handle *handle;
527 int its_id;
528
529 if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
530 return NULL;
531
532 handle = iort_find_domain_token(its_id);
533 if (!handle)
534 return NULL;
535
536 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
537}
538
Hanjun Guod4f54a12017-03-07 20:40:06 +0800539/**
540 * iort_get_platform_device_domain() - Find MSI domain related to a
541 * platform device
542 * @dev: the dev pointer associated with the platform device
543 *
544 * Returns: the MSI domain for this device, NULL otherwise
545 */
546static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
547{
548 struct acpi_iort_node *node, *msi_parent;
549 struct fwnode_handle *iort_fwnode;
550 struct acpi_iort_its_group *its;
551 int i;
552
553 /* find its associated iort node */
554 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
555 iort_match_node_callback, dev);
556 if (!node)
557 return NULL;
558
559 /* then find its msi parent node */
560 for (i = 0; i < node->mapping_count; i++) {
561 msi_parent = iort_node_map_platform_id(node, NULL,
562 IORT_MSI_TYPE, i);
563 if (msi_parent)
564 break;
565 }
566
567 if (!msi_parent)
568 return NULL;
569
570 /* Move to ITS specific data */
571 its = (struct acpi_iort_its_group *)msi_parent->node_data;
572
573 iort_fwnode = iort_find_domain_token(its->identifiers[0]);
574 if (!iort_fwnode)
575 return NULL;
576
577 return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
578}
579
580void acpi_configure_pmsi_domain(struct device *dev)
581{
582 struct irq_domain *msi_domain;
583
584 msi_domain = iort_get_platform_device_domain(dev);
585 if (msi_domain)
586 dev_set_msi_domain(dev, msi_domain);
587}
588
Robin Murphybc8648d2017-08-04 17:42:06 +0100589static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
590 void *data)
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000591{
592 u32 *rid = data;
593
594 *rid = alias;
595 return 0;
596}
597
598static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
599 struct fwnode_handle *fwnode,
600 const struct iommu_ops *ops)
601{
602 int ret = iommu_fwspec_init(dev, fwnode, ops);
603
604 if (!ret)
605 ret = iommu_fwspec_add_ids(dev, &streamid, 1);
606
607 return ret;
608}
609
Lorenzo Pieralisi1d9029d2017-04-10 16:50:59 +0530610static inline bool iort_iommu_driver_enabled(u8 type)
611{
612 switch (type) {
613 case ACPI_IORT_NODE_SMMU_V3:
614 return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
615 case ACPI_IORT_NODE_SMMU:
616 return IS_BUILTIN(CONFIG_ARM_SMMU);
617 default:
618 pr_warn("IORT node type %u does not describe an SMMU\n", type);
619 return false;
620 }
621}
622
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100623#ifdef CONFIG_IOMMU_API
Lorenzo Pieralisie3d49392017-09-28 14:03:33 +0100624static inline const struct iommu_ops *iort_fwspec_iommu_ops(
625 struct iommu_fwspec *fwspec)
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100626{
627 return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
628}
629
Lorenzo Pieralisie3d49392017-09-28 14:03:33 +0100630static inline int iort_add_device_replay(const struct iommu_ops *ops,
631 struct device *dev)
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100632{
633 int err = 0;
634
Robin Murphybc8648d2017-08-04 17:42:06 +0100635 if (ops->add_device && dev->bus && !dev->iommu_group)
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100636 err = ops->add_device(dev);
637
638 return err;
639}
640#else
Lorenzo Pieralisie3d49392017-09-28 14:03:33 +0100641static inline const struct iommu_ops *iort_fwspec_iommu_ops(
642 struct iommu_fwspec *fwspec)
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100643{ return NULL; }
Lorenzo Pieralisie3d49392017-09-28 14:03:33 +0100644static inline int iort_add_device_replay(const struct iommu_ops *ops,
645 struct device *dev)
Lorenzo Pieralisid49f2de2017-04-28 16:59:49 +0100646{ return 0; }
647#endif
648
Robin Murphybc8648d2017-08-04 17:42:06 +0100649static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
650 u32 streamid)
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000651{
Robin Murphybc8648d2017-08-04 17:42:06 +0100652 const struct iommu_ops *ops;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000653 struct fwnode_handle *iort_fwnode;
654
Robin Murphybc8648d2017-08-04 17:42:06 +0100655 if (!node)
656 return -ENODEV;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000657
Robin Murphybc8648d2017-08-04 17:42:06 +0100658 iort_fwnode = iort_get_fwnode(node);
659 if (!iort_fwnode)
660 return -ENODEV;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000661
Robin Murphybc8648d2017-08-04 17:42:06 +0100662 /*
663 * If the ops look-up fails, this means that either
664 * the SMMU drivers have not been probed yet or that
665 * the SMMU drivers are not built in the kernel;
666 * Depending on whether the SMMU drivers are built-in
667 * in the kernel or not, defer the IOMMU configuration
668 * or just abort it.
669 */
670 ops = iommu_ops_from_fwnode(iort_fwnode);
671 if (!ops)
672 return iort_iommu_driver_enabled(node->type) ?
673 -EPROBE_DEFER : -ENODEV;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000674
Robin Murphybc8648d2017-08-04 17:42:06 +0100675 return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
676}
677
678struct iort_pci_alias_info {
679 struct device *dev;
680 struct acpi_iort_node *node;
681};
682
683static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
684{
685 struct iort_pci_alias_info *info = data;
686 struct acpi_iort_node *parent;
687 u32 streamid;
688
689 parent = iort_node_map_id(info->node, alias, &streamid,
690 IORT_IOMMU_TYPE);
691 return iort_iommu_xlate(info->dev, parent, streamid);
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000692}
693
Lorenzo Pieralisi10d8ab22017-08-03 13:32:39 +0100694static int nc_dma_get_range(struct device *dev, u64 *size)
695{
696 struct acpi_iort_node *node;
697 struct acpi_iort_named_component *ncomp;
698
699 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
700 iort_match_node_callback, dev);
701 if (!node)
702 return -ENODEV;
703
704 ncomp = (struct acpi_iort_named_component *)node->node_data;
705
706 *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
707 1ULL<<ncomp->memory_address_limit;
708
709 return 0;
710}
711
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000712/**
Lorenzo Pieralisi7ad42632017-08-07 11:29:49 +0100713 * iort_dma_setup() - Set-up device DMA parameters.
Lorenzo Pieralisi18b709b2016-12-06 14:20:11 +0000714 *
715 * @dev: device to configure
Lorenzo Pieralisi7ad42632017-08-07 11:29:49 +0100716 * @dma_addr: device DMA address result pointer
717 * @size: DMA range size result pointer
Lorenzo Pieralisi18b709b2016-12-06 14:20:11 +0000718 */
Lorenzo Pieralisi7ad42632017-08-07 11:29:49 +0100719void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
Lorenzo Pieralisi18b709b2016-12-06 14:20:11 +0000720{
Lorenzo Pieralisi7ad42632017-08-07 11:29:49 +0100721 u64 mask, dmaaddr = 0, size = 0, offset = 0;
722 int ret, msb;
723
Lorenzo Pieralisi18b709b2016-12-06 14:20:11 +0000724 /*
725 * Set default coherent_dma_mask to 32 bit. Drivers are expected to
726 * setup the correct supported mask.
727 */
728 if (!dev->coherent_dma_mask)
729 dev->coherent_dma_mask = DMA_BIT_MASK(32);
730
731 /*
732 * Set it to coherent_dma_mask by default if the architecture
733 * code has not set it.
734 */
735 if (!dev->dma_mask)
736 dev->dma_mask = &dev->coherent_dma_mask;
Lorenzo Pieralisi7ad42632017-08-07 11:29:49 +0100737
738 size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
739
Lorenzo Pieralisi10d8ab22017-08-03 13:32:39 +0100740 if (dev_is_pci(dev))
Lorenzo Pieralisi7ad42632017-08-07 11:29:49 +0100741 ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
Lorenzo Pieralisi10d8ab22017-08-03 13:32:39 +0100742 else
743 ret = nc_dma_get_range(dev, &size);
744
745 if (!ret) {
746 msb = fls64(dmaaddr + size - 1);
747 /*
748 * Round-up to the power-of-two mask or set
749 * the mask to the whole 64-bit address space
750 * in case the DMA region covers the full
751 * memory window.
752 */
753 mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
754 /*
755 * Limit coherent and dma mask based on size
756 * retrieved from firmware.
757 */
758 dev->coherent_dma_mask = mask;
759 *dev->dma_mask = mask;
Lorenzo Pieralisi7ad42632017-08-07 11:29:49 +0100760 }
761
762 *dma_addr = dmaaddr;
763 *dma_size = size;
764
765 dev->dma_pfn_offset = PFN_DOWN(offset);
766 dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
Lorenzo Pieralisi18b709b2016-12-06 14:20:11 +0000767}
768
769/**
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000770 * iort_iommu_configure - Set-up IOMMU configuration for a device.
771 *
772 * @dev: device to configure
773 *
774 * Returns: iommu_ops pointer on configuration success
775 * NULL on configuration failure
776 */
777const struct iommu_ops *iort_iommu_configure(struct device *dev)
778{
779 struct acpi_iort_node *node, *parent;
Robin Murphybc8648d2017-08-04 17:42:06 +0100780 const struct iommu_ops *ops;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000781 u32 streamid = 0;
Robin Murphybc8648d2017-08-04 17:42:06 +0100782 int err = -ENODEV;
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000783
Lorenzo Pieralisi4dac3212017-05-27 19:17:44 +0530784 /*
785 * If we already translated the fwspec there
786 * is nothing left to do, return the iommu_ops.
787 */
788 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
789 if (ops)
790 return ops;
791
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000792 if (dev_is_pci(dev)) {
793 struct pci_bus *bus = to_pci_dev(dev)->bus;
Robin Murphybc8648d2017-08-04 17:42:06 +0100794 struct iort_pci_alias_info info = { .dev = dev };
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000795
796 node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
797 iort_match_node_callback, &bus->dev);
798 if (!node)
799 return NULL;
800
Robin Murphybc8648d2017-08-04 17:42:06 +0100801 info.node = node;
802 err = pci_for_each_dma_alias(to_pci_dev(dev),
803 iort_pci_iommu_init, &info);
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000804 } else {
805 int i = 0;
806
807 node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
808 iort_match_node_callback, dev);
809 if (!node)
810 return NULL;
811
Robin Murphybc8648d2017-08-04 17:42:06 +0100812 do {
Hanjun Guo8ca4f1d2017-03-07 20:40:04 +0800813 parent = iort_node_map_platform_id(node, &streamid,
814 IORT_IOMMU_TYPE,
815 i++);
Robin Murphybc8648d2017-08-04 17:42:06 +0100816
817 if (parent)
818 err = iort_iommu_xlate(dev, parent, streamid);
819 } while (parent && !err);
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000820 }
821
Sricharan R5a1bb632017-04-10 16:51:03 +0530822 /*
823 * If we have reason to believe the IOMMU driver missed the initial
824 * add_device callback for dev, replay it to get things in order.
825 */
Robin Murphybc8648d2017-08-04 17:42:06 +0100826 if (!err) {
Arnd Bergmann4d360372017-08-10 17:45:22 +0100827 ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
Robin Murphybc8648d2017-08-04 17:42:06 +0100828 err = iort_add_device_replay(ops, dev);
829 }
Sricharan R5a1bb632017-04-10 16:51:03 +0530830
Sricharan R058f8c32017-05-27 19:17:42 +0530831 /* Ignore all other errors apart from EPROBE_DEFER */
Robin Murphybc8648d2017-08-04 17:42:06 +0100832 if (err == -EPROBE_DEFER) {
833 ops = ERR_PTR(err);
834 } else if (err) {
835 dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
Sricharan R058f8c32017-05-27 19:17:42 +0530836 ops = NULL;
837 }
838
Lorenzo Pieralisi643b8e42016-11-21 10:01:48 +0000839 return ops;
840}
841
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000842static void __init acpi_iort_register_irq(int hwirq, const char *name,
843 int trigger,
844 struct resource *res)
845{
846 int irq = acpi_register_gsi(NULL, hwirq, trigger,
847 ACPI_ACTIVE_HIGH);
848
849 if (irq <= 0) {
850 pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
851 name);
852 return;
853 }
854
855 res->start = irq;
856 res->end = irq;
857 res->flags = IORESOURCE_IRQ;
858 res->name = name;
859}
860
861static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
862{
863 struct acpi_iort_smmu_v3 *smmu;
864 /* Always present mem resource */
865 int num_res = 1;
866
867 /* Retrieve SMMUv3 specific data */
868 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
869
870 if (smmu->event_gsiv)
871 num_res++;
872
873 if (smmu->pri_gsiv)
874 num_res++;
875
876 if (smmu->gerr_gsiv)
877 num_res++;
878
879 if (smmu->sync_gsiv)
880 num_res++;
881
882 return num_res;
883}
884
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530885static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
886{
887 /*
888 * Cavium ThunderX2 implementation doesn't not support unique
889 * irq line. Use single irq line for all the SMMUv3 interrupts.
890 */
891 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
892 return false;
893
894 /*
895 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
896 * SPI numbers here.
897 */
898 return smmu->event_gsiv == smmu->pri_gsiv &&
899 smmu->event_gsiv == smmu->gerr_gsiv &&
900 smmu->event_gsiv == smmu->sync_gsiv;
901}
902
Linu Cherian403e8c72017-06-22 17:35:36 +0530903static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
904{
905 /*
906 * Override the size, for Cavium ThunderX2 implementation
907 * which doesn't support the page 1 SMMU register space.
908 */
909 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
910 return SZ_64K;
911
912 return SZ_128K;
913}
914
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000915static void __init arm_smmu_v3_init_resources(struct resource *res,
916 struct acpi_iort_node *node)
917{
918 struct acpi_iort_smmu_v3 *smmu;
919 int num_res = 0;
920
921 /* Retrieve SMMUv3 specific data */
922 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
923
924 res[num_res].start = smmu->base_address;
Linu Cherian403e8c72017-06-22 17:35:36 +0530925 res[num_res].end = smmu->base_address +
926 arm_smmu_v3_resource_size(smmu) - 1;
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000927 res[num_res].flags = IORESOURCE_MEM;
928
929 num_res++;
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530930 if (arm_smmu_v3_is_combined_irq(smmu)) {
931 if (smmu->event_gsiv)
932 acpi_iort_register_irq(smmu->event_gsiv, "combined",
933 ACPI_EDGE_SENSITIVE,
934 &res[num_res++]);
935 } else {
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000936
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530937 if (smmu->event_gsiv)
938 acpi_iort_register_irq(smmu->event_gsiv, "eventq",
939 ACPI_EDGE_SENSITIVE,
940 &res[num_res++]);
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000941
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530942 if (smmu->pri_gsiv)
943 acpi_iort_register_irq(smmu->pri_gsiv, "priq",
944 ACPI_EDGE_SENSITIVE,
945 &res[num_res++]);
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000946
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530947 if (smmu->gerr_gsiv)
948 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
949 ACPI_EDGE_SENSITIVE,
950 &res[num_res++]);
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000951
Geetha Sowjanyaf9354482017-06-23 19:04:36 +0530952 if (smmu->sync_gsiv)
953 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
954 ACPI_EDGE_SENSITIVE,
955 &res[num_res++]);
956 }
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +0000957}
958
959static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
960{
961 struct acpi_iort_smmu_v3 *smmu;
962
963 /* Retrieve SMMUv3 specific data */
964 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
965
966 return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
967}
968
Lorenzo Pieralisi75808132017-09-28 13:57:10 +0100969#if defined(CONFIG_ACPI_NUMA)
Ganapatrao Kulkarni5fe0ce32017-08-02 10:58:25 -0700970/*
971 * set numa proximity domain for smmuv3 device
972 */
973static void __init arm_smmu_v3_set_proximity(struct device *dev,
974 struct acpi_iort_node *node)
975{
976 struct acpi_iort_smmu_v3 *smmu;
977
978 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
979 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
980 set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
981 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
982 smmu->base_address,
983 smmu->pxm);
984 }
985}
986#else
987#define arm_smmu_v3_set_proximity NULL
988#endif
989
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +0000990static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
991{
992 struct acpi_iort_smmu *smmu;
993
994 /* Retrieve SMMU specific data */
995 smmu = (struct acpi_iort_smmu *)node->node_data;
996
997 /*
998 * Only consider the global fault interrupt and ignore the
999 * configuration access interrupt.
1000 *
1001 * MMIO address and global fault interrupt resources are always
1002 * present so add them to the context interrupt count as a static
1003 * value.
1004 */
1005 return smmu->context_interrupt_count + 2;
1006}
1007
1008static void __init arm_smmu_init_resources(struct resource *res,
1009 struct acpi_iort_node *node)
1010{
1011 struct acpi_iort_smmu *smmu;
1012 int i, hw_irq, trigger, num_res = 0;
1013 u64 *ctx_irq, *glb_irq;
1014
1015 /* Retrieve SMMU specific data */
1016 smmu = (struct acpi_iort_smmu *)node->node_data;
1017
1018 res[num_res].start = smmu->base_address;
1019 res[num_res].end = smmu->base_address + smmu->span - 1;
1020 res[num_res].flags = IORESOURCE_MEM;
1021 num_res++;
1022
1023 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1024 /* Global IRQs */
1025 hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1026 trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1027
1028 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1029 &res[num_res++]);
1030
1031 /* Context IRQs */
1032 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1033 for (i = 0; i < smmu->context_interrupt_count; i++) {
1034 hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1035 trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1036
1037 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1038 &res[num_res++]);
1039 }
1040}
1041
1042static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
1043{
1044 struct acpi_iort_smmu *smmu;
1045
1046 /* Retrieve SMMU specific data */
1047 smmu = (struct acpi_iort_smmu *)node->node_data;
1048
1049 return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
1050}
1051
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001052struct iort_iommu_config {
1053 const char *name;
1054 int (*iommu_init)(struct acpi_iort_node *node);
1055 bool (*iommu_is_coherent)(struct acpi_iort_node *node);
1056 int (*iommu_count_resources)(struct acpi_iort_node *node);
1057 void (*iommu_init_resources)(struct resource *res,
1058 struct acpi_iort_node *node);
Ganapatrao Kulkarni5fe0ce32017-08-02 10:58:25 -07001059 void (*iommu_set_proximity)(struct device *dev,
1060 struct acpi_iort_node *node);
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001061};
1062
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00001063static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
1064 .name = "arm-smmu-v3",
1065 .iommu_is_coherent = arm_smmu_v3_is_coherent,
1066 .iommu_count_resources = arm_smmu_v3_count_resources,
Ganapatrao Kulkarni5fe0ce32017-08-02 10:58:25 -07001067 .iommu_init_resources = arm_smmu_v3_init_resources,
1068 .iommu_set_proximity = arm_smmu_v3_set_proximity,
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00001069};
1070
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001071static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
1072 .name = "arm-smmu",
1073 .iommu_is_coherent = arm_smmu_is_coherent,
1074 .iommu_count_resources = arm_smmu_count_resources,
1075 .iommu_init_resources = arm_smmu_init_resources
1076};
1077
Lorenzo Pieralisie3d49392017-09-28 14:03:33 +01001078static __init const struct iort_iommu_config *iort_get_iommu_cfg(
1079 struct acpi_iort_node *node)
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001080{
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00001081 switch (node->type) {
1082 case ACPI_IORT_NODE_SMMU_V3:
1083 return &iort_arm_smmu_v3_cfg;
Lorenzo Pieralisid6fcd3b2016-11-21 10:01:45 +00001084 case ACPI_IORT_NODE_SMMU:
1085 return &iort_arm_smmu_cfg;
Lorenzo Pieralisie4dadfa2016-11-21 10:01:43 +00001086 default:
1087 return NULL;
1088 }
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001089}
1090
1091/**
1092 * iort_add_smmu_platform_device() - Allocate a platform device for SMMU
1093 * @node: Pointer to SMMU ACPI IORT node
1094 *
1095 * Returns: 0 on success, <0 failure
1096 */
1097static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
1098{
1099 struct fwnode_handle *fwnode;
1100 struct platform_device *pdev;
1101 struct resource *r;
1102 enum dev_dma_attr attr;
1103 int ret, count;
1104 const struct iort_iommu_config *ops = iort_get_iommu_cfg(node);
1105
1106 if (!ops)
1107 return -ENODEV;
1108
1109 pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1110 if (!pdev)
Dan Carpenter5e5afa62017-01-17 16:36:23 +03001111 return -ENOMEM;
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001112
Ganapatrao Kulkarni5fe0ce32017-08-02 10:58:25 -07001113 if (ops->iommu_set_proximity)
1114 ops->iommu_set_proximity(&pdev->dev, node);
1115
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001116 count = ops->iommu_count_resources(node);
1117
1118 r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1119 if (!r) {
1120 ret = -ENOMEM;
1121 goto dev_put;
1122 }
1123
1124 ops->iommu_init_resources(r, node);
1125
1126 ret = platform_device_add_resources(pdev, r, count);
1127 /*
1128 * Resources are duplicated in platform_device_add_resources,
1129 * free their allocated memory
1130 */
1131 kfree(r);
1132
1133 if (ret)
1134 goto dev_put;
1135
1136 /*
1137 * Add a copy of IORT node pointer to platform_data to
1138 * be used to retrieve IORT data information.
1139 */
1140 ret = platform_device_add_data(pdev, &node, sizeof(node));
1141 if (ret)
1142 goto dev_put;
1143
1144 /*
1145 * We expect the dma masks to be equivalent for
1146 * all SMMUs set-ups
1147 */
1148 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1149
1150 fwnode = iort_get_fwnode(node);
1151
1152 if (!fwnode) {
1153 ret = -ENODEV;
1154 goto dev_put;
1155 }
1156
1157 pdev->dev.fwnode = fwnode;
1158
1159 attr = ops->iommu_is_coherent(node) ?
1160 DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1161
1162 /* Configure DMA for the page table walker */
1163 acpi_dma_configure(&pdev->dev, attr);
1164
1165 ret = platform_device_add(pdev);
1166 if (ret)
1167 goto dma_deconfigure;
1168
1169 return 0;
1170
1171dma_deconfigure:
1172 acpi_dma_deconfigure(&pdev->dev);
1173dev_put:
1174 platform_device_put(pdev);
1175
1176 return ret;
1177}
1178
1179static void __init iort_init_platform_devices(void)
1180{
1181 struct acpi_iort_node *iort_node, *iort_end;
1182 struct acpi_table_iort *iort;
1183 struct fwnode_handle *fwnode;
1184 int i, ret;
1185
1186 /*
1187 * iort_table and iort both point to the start of IORT table, but
1188 * have different struct types
1189 */
1190 iort = (struct acpi_table_iort *)iort_table;
1191
1192 /* Get the first IORT node */
1193 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1194 iort->node_offset);
1195 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1196 iort_table->length);
1197
1198 for (i = 0; i < iort->node_count; i++) {
1199 if (iort_node >= iort_end) {
1200 pr_err("iort node pointer overflows, bad table\n");
1201 return;
1202 }
1203
1204 if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
1205 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
1206
1207 fwnode = acpi_alloc_fwnode_static();
1208 if (!fwnode)
1209 return;
1210
1211 iort_set_fwnode(iort_node, fwnode);
1212
1213 ret = iort_add_smmu_platform_device(iort_node);
1214 if (ret) {
1215 iort_delete_fwnode(iort_node);
1216 acpi_free_fwnode_static(fwnode);
1217 return;
1218 }
1219 }
1220
1221 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1222 iort_node->length);
1223 }
1224}
1225
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +02001226void __init acpi_iort_init(void)
1227{
1228 acpi_status status;
1229
1230 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
Lorenzo Pieralisi34ceea22016-11-21 10:01:34 +00001231 if (ACPI_FAILURE(status)) {
1232 if (status != AE_NOT_FOUND) {
1233 const char *msg = acpi_format_exception(status);
1234
1235 pr_err("Failed to get table, %s\n", msg);
1236 }
1237
1238 return;
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +02001239 }
Lorenzo Pieralisi34ceea22016-11-21 10:01:34 +00001240
Lorenzo Pieralisi846f0e92016-11-21 10:01:41 +00001241 iort_init_platform_devices();
Tomasz Nowicki88ef16d2016-09-12 20:54:20 +02001242}