blob: ccef8806bb58771b16a87dc80edc32e585597d29 [file] [log] [blame]
Gregory CLEMENT009f1312012-08-02 11:16:29 +03001/*
Thomas Petazzonie12f12a2014-11-13 10:39:00 +01002 * Coherency fabric (Aurora) support for Armada 370, 375, 38x and XP
3 * platforms.
Gregory CLEMENT009f1312012-08-02 11:16:29 +03004 *
5 * Copyright (C) 2012 Marvell
6 *
7 * Yehuda Yitschak <yehuday@marvell.com>
8 * Gregory Clement <gregory.clement@free-electrons.com>
9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 *
Thomas Petazzonie12f12a2014-11-13 10:39:00 +010015 * The Armada 370, 375, 38x and XP SOCs have a coherency fabric which is
Gregory CLEMENT009f1312012-08-02 11:16:29 +030016 * responsible for ensuring hardware coherency between all CPUs and between
17 * CPUs and I/O masters. This file initializes the coherency fabric and
18 * supplies basic routines for configuring and controlling hardware coherency
19 */
20
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +020021#define pr_fmt(fmt) "mvebu-coherency: " fmt
22
Gregory CLEMENT009f1312012-08-02 11:16:29 +030023#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/of_address.h>
26#include <linux/io.h>
27#include <linux/smp.h>
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020028#include <linux/dma-mapping.h>
29#include <linux/platform_device.h>
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +020030#include <linux/slab.h>
31#include <linux/mbus.h>
Thomas Petazzonib0063aa2014-05-13 18:04:30 +020032#include <linux/pci.h>
Gregory CLEMENT009f1312012-08-02 11:16:29 +030033#include <asm/smp_plat.h>
Thomas Petazzoni580ff0e2013-06-06 12:24:28 +020034#include <asm/cacheflush.h>
Thomas Petazzoni497a9232014-05-15 16:59:34 +020035#include <asm/mach/map.h>
Jisheng Zhangb12634e2013-11-07 17:02:38 +080036#include "coherency.h"
Thomas Petazzoni39438562014-05-05 17:05:26 +020037#include "mvebu-soc-id.h"
Gregory CLEMENT009f1312012-08-02 11:16:29 +030038
Paul Gortmaker8bd26e32013-06-17 15:43:14 -040039unsigned long coherency_phys_base;
Gregory CLEMENTccd6a132014-04-14 17:10:05 +020040void __iomem *coherency_base;
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020041static void __iomem *coherency_cpu_base;
Gregory CLEMENT009f1312012-08-02 11:16:29 +030042
43/* Coherency fabric registers */
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020044#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
45
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020046enum {
Thomas Petazzoni501f9282014-04-14 15:47:00 +020047 COHERENCY_FABRIC_TYPE_NONE,
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020048 COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +020049 COHERENCY_FABRIC_TYPE_ARMADA_375,
Thomas Petazzonid0de9322014-04-14 15:47:06 +020050 COHERENCY_FABRIC_TYPE_ARMADA_380,
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020051};
52
Gregory CLEMENT009f1312012-08-02 11:16:29 +030053static struct of_device_id of_coherency_table[] = {
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020054 {.compatible = "marvell,coherency-fabric",
55 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +020056 {.compatible = "marvell,armada-375-coherency-fabric",
57 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 },
Thomas Petazzonid0de9322014-04-14 15:47:06 +020058 {.compatible = "marvell,armada-380-coherency-fabric",
59 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 },
Gregory CLEMENT009f1312012-08-02 11:16:29 +030060 { /* end of list */ },
61};
62
Gregory CLEMENT2e8a5942014-04-14 17:10:08 +020063/* Functions defined in coherency_ll.S */
64int ll_enable_coherency(void);
65void ll_add_cpu_to_smp_group(void);
Gregory CLEMENT009f1312012-08-02 11:16:29 +030066
Gregory CLEMENT952f4ca2014-04-14 17:10:07 +020067int set_cpu_coherent(void)
Gregory CLEMENT009f1312012-08-02 11:16:29 +030068{
69 if (!coherency_base) {
Gregory CLEMENTb41375f2014-04-14 17:10:06 +020070 pr_warn("Can't make current CPU cache coherent.\n");
Gregory CLEMENT009f1312012-08-02 11:16:29 +030071 pr_warn("Coherency fabric is not initialized\n");
72 return 1;
73 }
74
Gregory CLEMENT2e8a5942014-04-14 17:10:08 +020075 ll_add_cpu_to_smp_group();
76 return ll_enable_coherency();
Gregory CLEMENT009f1312012-08-02 11:16:29 +030077}
78
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020079static inline void mvebu_hwcc_sync_io_barrier(void)
80{
81 writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
82 while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
83}
84
85static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
86 unsigned long offset, size_t size,
87 enum dma_data_direction dir,
88 struct dma_attrs *attrs)
89{
90 if (dir != DMA_TO_DEVICE)
91 mvebu_hwcc_sync_io_barrier();
92 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
93}
94
95
96static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
97 size_t size, enum dma_data_direction dir,
98 struct dma_attrs *attrs)
99{
100 if (dir != DMA_TO_DEVICE)
101 mvebu_hwcc_sync_io_barrier();
102}
103
104static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
105 size_t size, enum dma_data_direction dir)
106{
107 if (dir != DMA_TO_DEVICE)
108 mvebu_hwcc_sync_io_barrier();
109}
110
111static struct dma_map_ops mvebu_hwcc_dma_ops = {
112 .alloc = arm_dma_alloc,
113 .free = arm_dma_free,
114 .mmap = arm_dma_mmap,
115 .map_page = mvebu_hwcc_dma_map_page,
116 .unmap_page = mvebu_hwcc_dma_unmap_page,
117 .get_sgtable = arm_dma_get_sgtable,
118 .map_sg = arm_dma_map_sg,
119 .unmap_sg = arm_dma_unmap_sg,
120 .sync_single_for_cpu = mvebu_hwcc_dma_sync,
121 .sync_single_for_device = mvebu_hwcc_dma_sync,
122 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
123 .sync_sg_for_device = arm_dma_sync_sg_for_device,
124 .set_dma_mask = arm_dma_set_mask,
125};
126
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200127static int mvebu_hwcc_notifier(struct notifier_block *nb,
128 unsigned long event, void *__dev)
Gregory CLEMENTe60304f2012-10-12 19:20:36 +0200129{
130 struct device *dev = __dev;
131
132 if (event != BUS_NOTIFY_ADD_DEVICE)
133 return NOTIFY_DONE;
134 set_dma_ops(dev, &mvebu_hwcc_dma_ops);
135
136 return NOTIFY_OK;
137}
138
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200139static struct notifier_block mvebu_hwcc_nb = {
140 .notifier_call = mvebu_hwcc_notifier,
Gregory CLEMENTe60304f2012-10-12 19:20:36 +0200141};
142
Ezequiel Garciaa728b972014-07-08 10:37:37 -0300143static struct notifier_block mvebu_hwcc_pci_nb = {
144 .notifier_call = mvebu_hwcc_notifier,
145};
146
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200147static void __init armada_370_coherency_init(struct device_node *np)
148{
149 struct resource res;
150
151 of_address_to_resource(np, 0, &res);
152 coherency_phys_base = res.start;
153 /*
154 * Ensure secondary CPUs will see the updated value,
155 * which they read before they join the coherency
156 * fabric, and therefore before they are coherent with
157 * the boot CPU cache.
158 */
159 sync_cache_w(&coherency_phys_base);
160 coherency_base = of_iomap(np, 0);
161 coherency_cpu_base = of_iomap(np, 1);
Gregory CLEMENT952f4ca2014-04-14 17:10:07 +0200162 set_cpu_coherent();
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200163}
164
Thomas Petazzoni497a9232014-05-15 16:59:34 +0200165/*
166 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
167 * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
168 * is needed as a workaround for a deadlock issue between the PCIe
169 * interface and the cache controller.
170 */
171static void __iomem *
172armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
173 unsigned int mtype, void *caller)
174{
175 struct resource pcie_mem;
176
177 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
178
179 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
180 mtype = MT_UNCACHED;
181
182 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
183}
184
Thomas Petazzonid0de9322014-04-14 15:47:06 +0200185static void __init armada_375_380_coherency_init(struct device_node *np)
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200186{
Thomas Petazzoni497a9232014-05-15 16:59:34 +0200187 struct device_node *cache_dn;
188
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200189 coherency_cpu_base = of_iomap(np, 0);
Thomas Petazzoni497a9232014-05-15 16:59:34 +0200190 arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
191
192 /*
Thomas Petazzonidcad6882015-01-28 12:55:45 +0100193 * We should switch the PL310 to I/O coherency mode only if
194 * I/O coherency is actually enabled.
195 */
196 if (!coherency_available())
197 return;
198
199 /*
Thomas Petazzoni497a9232014-05-15 16:59:34 +0200200 * Add the PL310 property "arm,io-coherent". This makes sure the
201 * outer sync operation is not used, which allows to
202 * workaround the system erratum that causes deadlocks when
203 * doing PCIe in an SMP situation on Armada 375 and Armada
204 * 38x.
205 */
206 for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") {
207 struct property *p;
208
209 p = kzalloc(sizeof(*p), GFP_KERNEL);
210 p->name = kstrdup("arm,io-coherent", GFP_KERNEL);
211 of_add_property(cache_dn, p);
212 }
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200213}
214
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200215static int coherency_type(void)
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300216{
217 struct device_node *np;
Thomas Petazzoni5fbba082014-04-14 15:47:02 +0200218 const struct of_device_id *match;
Thomas Petazzonie5535542014-11-13 10:38:57 +0100219 int type;
220
221 /*
222 * The coherency fabric is needed:
223 * - For coherency between processors on Armada XP, so only
224 * when SMP is enabled.
225 * - For coherency between the processor and I/O devices, but
226 * this coherency requires many pre-requisites (write
227 * allocate cache policy, shareable pages, SMP bit set) that
228 * are only meant in SMP situations.
229 *
230 * Note that this means that on Armada 370, there is currently
231 * no way to use hardware I/O coherency, because even when
232 * CONFIG_SMP is enabled, is_smp() returns false due to the
233 * Armada 370 being a single-core processor. To lift this
234 * limitation, we would have to find a way to make the cache
235 * policy set to write-allocate (on all Armada SoCs), and to
236 * set the shareable attribute in page tables (on all Armada
237 * SoCs except the Armada 370). Unfortunately, such decisions
238 * are taken very early in the kernel boot process, at a point
239 * where we don't know yet on which SoC we are running.
240
241 */
242 if (!is_smp())
243 return COHERENCY_FABRIC_TYPE_NONE;
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300244
Thomas Petazzoni5fbba082014-04-14 15:47:02 +0200245 np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
Thomas Petazzonie5535542014-11-13 10:38:57 +0100246 if (!np)
247 return COHERENCY_FABRIC_TYPE_NONE;
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200248
Thomas Petazzonie5535542014-11-13 10:38:57 +0100249 type = (int) match->data;
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200250
Thomas Petazzonie5535542014-11-13 10:38:57 +0100251 of_node_put(np);
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200252
Thomas Petazzonie5535542014-11-13 10:38:57 +0100253 return type;
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200254}
255
Thomas Petazzoni8f1e8ee2015-01-16 17:11:27 +0100256/*
257 * As a precaution, we currently completely disable hardware I/O
258 * coherency, until enough testing is done with automatic I/O
259 * synchronization barriers to validate that it is a proper solution.
260 */
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200261int coherency_available(void)
262{
Thomas Petazzoni8f1e8ee2015-01-16 17:11:27 +0100263 return false;
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200264}
265
266int __init coherency_init(void)
267{
268 int type = coherency_type();
269 struct device_node *np;
270
271 np = of_find_matching_node(NULL, of_coherency_table);
272
273 if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
274 armada_370_coherency_init(np);
Thomas Petazzonid0de9322014-04-14 15:47:06 +0200275 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 ||
276 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
277 armada_375_380_coherency_init(np);
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200278
Thomas Petazzoni2eb04ae2014-10-27 16:32:35 +0100279 of_node_put(np);
280
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300281 return 0;
282}
Thomas Petazzoni865e0522013-06-05 09:04:55 +0200283
284static int __init coherency_late_init(void)
285{
Thomas Petazzonief01c6c2014-11-13 10:38:59 +0100286 if (coherency_available())
287 bus_register_notifier(&platform_bus_type,
288 &mvebu_hwcc_nb);
Thomas Petazzoni865e0522013-06-05 09:04:55 +0200289 return 0;
290}
291
292postcore_initcall(coherency_late_init);
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200293
Thomas Petazzoni8828ccc2014-05-20 17:13:03 +0200294#if IS_ENABLED(CONFIG_PCI)
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200295static int __init coherency_pci_init(void)
296{
297 if (coherency_available())
298 bus_register_notifier(&pci_bus_type,
Ezequiel Garciaa728b972014-07-08 10:37:37 -0300299 &mvebu_hwcc_pci_nb);
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200300 return 0;
301}
302
303arch_initcall(coherency_pci_init);
Thomas Petazzoni8828ccc2014-05-20 17:13:03 +0200304#endif