Hiroshi Doyu | 4e0ee78 | 2012-06-25 14:23:54 +0300 | [diff] [blame] | 1 | /* |
| 2 | * OF helpers for IOMMU |
| 3 | * |
| 4 | * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms and conditions of the GNU General Public License, |
| 8 | * version 2, as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT |
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 13 | * more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License along with |
| 16 | * this program; if not, write to the Free Software Foundation, Inc., |
| 17 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | */ |
| 19 | |
| 20 | #include <linux/export.h> |
Will Deacon | 7eba1d5 | 2014-08-27 16:20:32 +0100 | [diff] [blame] | 21 | #include <linux/iommu.h> |
Hiroshi Doyu | 4e0ee78 | 2012-06-25 14:23:54 +0300 | [diff] [blame] | 22 | #include <linux/limits.h> |
| 23 | #include <linux/of.h> |
Brian Norris | cbff563 | 2013-12-04 17:22:53 -0800 | [diff] [blame] | 24 | #include <linux/of_iommu.h> |
Robin Murphy | b996444 | 2016-09-12 17:13:41 +0100 | [diff] [blame] | 25 | #include <linux/of_pci.h> |
Robin Murphy | a42a7a1 | 2014-12-05 13:41:02 +0000 | [diff] [blame] | 26 | #include <linux/slab.h> |
Hiroshi Doyu | 4e0ee78 | 2012-06-25 14:23:54 +0300 | [diff] [blame] | 27 | |
Will Deacon | 1cd076b | 2014-08-27 14:40:58 +0100 | [diff] [blame] | 28 | static const struct of_device_id __iommu_of_table_sentinel |
| 29 | __used __section(__iommu_of_table_end); |
| 30 | |
Hiroshi Doyu | 4e0ee78 | 2012-06-25 14:23:54 +0300 | [diff] [blame] | 31 | /** |
| 32 | * of_get_dma_window - Parse *dma-window property and returns 0 if found. |
| 33 | * |
| 34 | * @dn: device node |
| 35 | * @prefix: prefix for property name if any |
| 36 | * @index: index to start to parse |
| 37 | * @busno: Returns busno if supported. Otherwise pass NULL |
| 38 | * @addr: Returns address that DMA starts |
| 39 | * @size: Returns the range that DMA can handle |
| 40 | * |
| 41 | * This supports different formats flexibly. "prefix" can be |
| 42 | * configured if any. "busno" and "index" are optionally |
| 43 | * specified. Set 0(or NULL) if not used. |
| 44 | */ |
| 45 | int of_get_dma_window(struct device_node *dn, const char *prefix, int index, |
| 46 | unsigned long *busno, dma_addr_t *addr, size_t *size) |
| 47 | { |
| 48 | const __be32 *dma_window, *end; |
| 49 | int bytes, cur_index = 0; |
| 50 | char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX]; |
| 51 | |
| 52 | if (!dn || !addr || !size) |
| 53 | return -EINVAL; |
| 54 | |
| 55 | if (!prefix) |
| 56 | prefix = ""; |
| 57 | |
| 58 | snprintf(propname, sizeof(propname), "%sdma-window", prefix); |
| 59 | snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix); |
| 60 | snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix); |
| 61 | |
| 62 | dma_window = of_get_property(dn, propname, &bytes); |
| 63 | if (!dma_window) |
| 64 | return -ENODEV; |
| 65 | end = dma_window + bytes / sizeof(*dma_window); |
| 66 | |
| 67 | while (dma_window < end) { |
| 68 | u32 cells; |
| 69 | const void *prop; |
| 70 | |
| 71 | /* busno is one cell if supported */ |
| 72 | if (busno) |
| 73 | *busno = be32_to_cpup(dma_window++); |
| 74 | |
| 75 | prop = of_get_property(dn, addrname, NULL); |
| 76 | if (!prop) |
| 77 | prop = of_get_property(dn, "#address-cells", NULL); |
| 78 | |
| 79 | cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn); |
| 80 | if (!cells) |
| 81 | return -EINVAL; |
| 82 | *addr = of_read_number(dma_window, cells); |
| 83 | dma_window += cells; |
| 84 | |
| 85 | prop = of_get_property(dn, sizename, NULL); |
| 86 | cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn); |
| 87 | if (!cells) |
| 88 | return -EINVAL; |
| 89 | *size = of_read_number(dma_window, cells); |
| 90 | dma_window += cells; |
| 91 | |
| 92 | if (cur_index++ == index) |
| 93 | break; |
| 94 | } |
| 95 | return 0; |
| 96 | } |
| 97 | EXPORT_SYMBOL_GPL(of_get_dma_window); |
Will Deacon | 1cd076b | 2014-08-27 14:40:58 +0100 | [diff] [blame] | 98 | |
Robin Murphy | d7b0558 | 2017-04-10 16:50:57 +0530 | [diff] [blame^] | 99 | static bool of_iommu_driver_present(struct device_node *np) |
| 100 | { |
| 101 | /* |
| 102 | * If the IOMMU still isn't ready by the time we reach init, assume |
| 103 | * it never will be. We don't want to defer indefinitely, nor attempt |
| 104 | * to dereference __iommu_of_table after it's been freed. |
| 105 | */ |
| 106 | if (system_state > SYSTEM_BOOTING) |
| 107 | return false; |
| 108 | |
| 109 | return of_match_node(&__iommu_of_table, np); |
| 110 | } |
| 111 | |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 112 | static const struct iommu_ops |
| 113 | *of_iommu_xlate(struct device *dev, struct of_phandle_args *iommu_spec) |
| 114 | { |
| 115 | const struct iommu_ops *ops; |
| 116 | struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; |
| 117 | int err; |
| 118 | |
| 119 | ops = iommu_ops_from_fwnode(fwnode); |
Robin Murphy | d7b0558 | 2017-04-10 16:50:57 +0530 | [diff] [blame^] | 120 | if ((ops && !ops->of_xlate) || |
| 121 | (!ops && !of_iommu_driver_present(iommu_spec->np))) |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 122 | return NULL; |
| 123 | |
| 124 | err = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); |
| 125 | if (err) |
| 126 | return ERR_PTR(err); |
Robin Murphy | d7b0558 | 2017-04-10 16:50:57 +0530 | [diff] [blame^] | 127 | /* |
| 128 | * The otherwise-empty fwspec handily serves to indicate the specific |
| 129 | * IOMMU device we're waiting for, which will be useful if we ever get |
| 130 | * a proper probe-ordering dependency mechanism in future. |
| 131 | */ |
| 132 | if (!ops) |
| 133 | return ERR_PTR(-EPROBE_DEFER); |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 134 | |
| 135 | err = ops->of_xlate(dev, iommu_spec); |
| 136 | if (err) |
| 137 | return ERR_PTR(err); |
| 138 | |
| 139 | return ops; |
| 140 | } |
| 141 | |
Robin Murphy | b996444 | 2016-09-12 17:13:41 +0100 | [diff] [blame] | 142 | static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data) |
| 143 | { |
| 144 | struct of_phandle_args *iommu_spec = data; |
| 145 | |
| 146 | iommu_spec->args[0] = alias; |
| 147 | return iommu_spec->np == pdev->bus->dev.of_node; |
| 148 | } |
| 149 | |
| 150 | static const struct iommu_ops |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 151 | *of_pci_iommu_init(struct pci_dev *pdev, struct device_node *bridge_np) |
Robin Murphy | b996444 | 2016-09-12 17:13:41 +0100 | [diff] [blame] | 152 | { |
| 153 | const struct iommu_ops *ops; |
| 154 | struct of_phandle_args iommu_spec; |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 155 | int err; |
Robin Murphy | b996444 | 2016-09-12 17:13:41 +0100 | [diff] [blame] | 156 | |
| 157 | /* |
| 158 | * Start by tracing the RID alias down the PCI topology as |
| 159 | * far as the host bridge whose OF node we have... |
| 160 | * (we're not even attempting to handle multi-alias devices yet) |
| 161 | */ |
| 162 | iommu_spec.args_count = 1; |
| 163 | iommu_spec.np = bridge_np; |
| 164 | pci_for_each_dma_alias(pdev, __get_pci_rid, &iommu_spec); |
| 165 | /* |
| 166 | * ...then find out what that becomes once it escapes the PCI |
| 167 | * bus into the system beyond, and which IOMMU it ends up at. |
| 168 | */ |
| 169 | iommu_spec.np = NULL; |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 170 | err = of_pci_map_rid(bridge_np, iommu_spec.args[0], "iommu-map", |
| 171 | "iommu-map-mask", &iommu_spec.np, |
| 172 | iommu_spec.args); |
| 173 | if (err) |
| 174 | return err == -ENODEV ? NULL : ERR_PTR(err); |
Robin Murphy | b996444 | 2016-09-12 17:13:41 +0100 | [diff] [blame] | 175 | |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 176 | ops = of_iommu_xlate(&pdev->dev, &iommu_spec); |
Robin Murphy | b996444 | 2016-09-12 17:13:41 +0100 | [diff] [blame] | 177 | |
| 178 | of_node_put(iommu_spec.np); |
| 179 | return ops; |
| 180 | } |
| 181 | |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 182 | static const struct iommu_ops |
| 183 | *of_platform_iommu_init(struct device *dev, struct device_node *np) |
Will Deacon | 7eba1d5 | 2014-08-27 16:20:32 +0100 | [diff] [blame] | 184 | { |
| 185 | struct of_phandle_args iommu_spec; |
Robin Murphy | 53c92d7 | 2016-04-07 18:42:05 +0100 | [diff] [blame] | 186 | const struct iommu_ops *ops = NULL; |
Will Deacon | 7eba1d5 | 2014-08-27 16:20:32 +0100 | [diff] [blame] | 187 | int idx = 0; |
| 188 | |
| 189 | /* |
| 190 | * We don't currently walk up the tree looking for a parent IOMMU. |
| 191 | * See the `Notes:' section of |
| 192 | * Documentation/devicetree/bindings/iommu/iommu.txt |
| 193 | */ |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 194 | while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", |
| 195 | idx, &iommu_spec)) { |
| 196 | ops = of_iommu_xlate(dev, &iommu_spec); |
| 197 | of_node_put(iommu_spec.np); |
Will Deacon | 7eba1d5 | 2014-08-27 16:20:32 +0100 | [diff] [blame] | 198 | idx++; |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 199 | if (IS_ERR_OR_NULL(ops)) |
| 200 | break; |
Will Deacon | 7eba1d5 | 2014-08-27 16:20:32 +0100 | [diff] [blame] | 201 | } |
| 202 | |
| 203 | return ops; |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 204 | } |
Will Deacon | 7eba1d5 | 2014-08-27 16:20:32 +0100 | [diff] [blame] | 205 | |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 206 | const struct iommu_ops *of_iommu_configure(struct device *dev, |
| 207 | struct device_node *master_np) |
| 208 | { |
| 209 | const struct iommu_ops *ops; |
Robin Murphy | d7b0558 | 2017-04-10 16:50:57 +0530 | [diff] [blame^] | 210 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 211 | |
| 212 | if (!master_np) |
| 213 | return NULL; |
| 214 | |
Robin Murphy | d7b0558 | 2017-04-10 16:50:57 +0530 | [diff] [blame^] | 215 | if (fwspec) { |
| 216 | if (fwspec->ops) |
| 217 | return fwspec->ops; |
| 218 | |
| 219 | /* In the deferred case, start again from scratch */ |
| 220 | iommu_fwspec_free(dev); |
| 221 | } |
| 222 | |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 223 | if (dev_is_pci(dev)) |
| 224 | ops = of_pci_iommu_init(to_pci_dev(dev), master_np); |
| 225 | else |
| 226 | ops = of_platform_iommu_init(dev, master_np); |
Robin Murphy | d7b0558 | 2017-04-10 16:50:57 +0530 | [diff] [blame^] | 227 | /* |
| 228 | * If we have reason to believe the IOMMU driver missed the initial |
| 229 | * add_device callback for dev, replay it to get things in order. |
| 230 | */ |
| 231 | if (!IS_ERR_OR_NULL(ops) && ops->add_device && |
| 232 | dev->bus && !dev->iommu_group) { |
| 233 | int err = ops->add_device(dev); |
| 234 | |
| 235 | if (err) |
| 236 | ops = ERR_PTR(err); |
| 237 | } |
Robin Murphy | 2a0c575 | 2017-04-10 16:50:56 +0530 | [diff] [blame] | 238 | |
| 239 | return IS_ERR(ops) ? NULL : ops; |
Will Deacon | 7eba1d5 | 2014-08-27 16:20:32 +0100 | [diff] [blame] | 240 | } |
| 241 | |
Kefeng Wang | bb8e15d | 2016-06-01 14:06:15 +0800 | [diff] [blame] | 242 | static int __init of_iommu_init(void) |
Will Deacon | 1cd076b | 2014-08-27 14:40:58 +0100 | [diff] [blame] | 243 | { |
| 244 | struct device_node *np; |
| 245 | const struct of_device_id *match, *matches = &__iommu_of_table; |
| 246 | |
| 247 | for_each_matching_node_and_match(np, matches, &match) { |
| 248 | const of_iommu_init_fn init_fn = match->data; |
| 249 | |
| 250 | if (init_fn(np)) |
| 251 | pr_err("Failed to initialise IOMMU %s\n", |
| 252 | of_node_full_name(np)); |
| 253 | } |
Kefeng Wang | bb8e15d | 2016-06-01 14:06:15 +0800 | [diff] [blame] | 254 | |
| 255 | return 0; |
Will Deacon | 1cd076b | 2014-08-27 14:40:58 +0100 | [diff] [blame] | 256 | } |
Kefeng Wang | bb8e15d | 2016-06-01 14:06:15 +0800 | [diff] [blame] | 257 | postcore_initcall_sync(of_iommu_init); |