blob: 044b51185fccb2e68c1f89c4efb3822704d28488 [file] [log] [blame]
Gregory CLEMENT009f1312012-08-02 11:16:29 +03001/*
2 * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * The Armada 370 and Armada XP SOCs have a coherency fabric which is
15 * responsible for ensuring hardware coherency between all CPUs and between
16 * CPUs and I/O masters. This file initializes the coherency fabric and
17 * supplies basic routines for configuring and controlling hardware coherency
18 */
19
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +020020#define pr_fmt(fmt) "mvebu-coherency: " fmt
21
Gregory CLEMENT009f1312012-08-02 11:16:29 +030022#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/of_address.h>
25#include <linux/io.h>
26#include <linux/smp.h>
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020027#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +020029#include <linux/slab.h>
30#include <linux/mbus.h>
31#include <linux/clk.h>
Thomas Petazzonib0063aa2014-05-13 18:04:30 +020032#include <linux/pci.h>
Gregory CLEMENT009f1312012-08-02 11:16:29 +030033#include <asm/smp_plat.h>
Thomas Petazzoni580ff0e2013-06-06 12:24:28 +020034#include <asm/cacheflush.h>
Thomas Petazzoni497a9232014-05-15 16:59:34 +020035#include <asm/mach/map.h>
Gregory CLEMENT009f1312012-08-02 11:16:29 +030036#include "armada-370-xp.h"
Jisheng Zhangb12634e2013-11-07 17:02:38 +080037#include "coherency.h"
Thomas Petazzoni39438562014-05-05 17:05:26 +020038#include "mvebu-soc-id.h"
Gregory CLEMENT009f1312012-08-02 11:16:29 +030039
Paul Gortmaker8bd26e32013-06-17 15:43:14 -040040unsigned long coherency_phys_base;
Gregory CLEMENTccd6a132014-04-14 17:10:05 +020041void __iomem *coherency_base;
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020042static void __iomem *coherency_cpu_base;
Gregory CLEMENT009f1312012-08-02 11:16:29 +030043
44/* Coherency fabric registers */
45#define COHERENCY_FABRIC_CFG_OFFSET 0x4
46
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020047#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
48
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020049enum {
Thomas Petazzoni501f9282014-04-14 15:47:00 +020050 COHERENCY_FABRIC_TYPE_NONE,
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020051 COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +020052 COHERENCY_FABRIC_TYPE_ARMADA_375,
Thomas Petazzonid0de9322014-04-14 15:47:06 +020053 COHERENCY_FABRIC_TYPE_ARMADA_380,
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020054};
55
Gregory CLEMENT009f1312012-08-02 11:16:29 +030056static struct of_device_id of_coherency_table[] = {
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020057 {.compatible = "marvell,coherency-fabric",
58 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +020059 {.compatible = "marvell,armada-375-coherency-fabric",
60 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 },
Thomas Petazzonid0de9322014-04-14 15:47:06 +020061 {.compatible = "marvell,armada-380-coherency-fabric",
62 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 },
Gregory CLEMENT009f1312012-08-02 11:16:29 +030063 { /* end of list */ },
64};
65
Gregory CLEMENT2e8a5942014-04-14 17:10:08 +020066/* Functions defined in coherency_ll.S */
67int ll_enable_coherency(void);
68void ll_add_cpu_to_smp_group(void);
Gregory CLEMENT009f1312012-08-02 11:16:29 +030069
Gregory CLEMENT952f4ca2014-04-14 17:10:07 +020070int set_cpu_coherent(void)
Gregory CLEMENT009f1312012-08-02 11:16:29 +030071{
72 if (!coherency_base) {
Gregory CLEMENTb41375f2014-04-14 17:10:06 +020073 pr_warn("Can't make current CPU cache coherent.\n");
Gregory CLEMENT009f1312012-08-02 11:16:29 +030074 pr_warn("Coherency fabric is not initialized\n");
75 return 1;
76 }
77
Gregory CLEMENT2e8a5942014-04-14 17:10:08 +020078 ll_add_cpu_to_smp_group();
79 return ll_enable_coherency();
Gregory CLEMENT009f1312012-08-02 11:16:29 +030080}
81
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +020082/*
83 * The below code implements the I/O coherency workaround on Armada
84 * 375. This workaround consists in using the two channels of the
85 * first XOR engine to trigger a XOR transaction that serves as the
86 * I/O coherency barrier.
87 */
88
89static void __iomem *xor_base, *xor_high_base;
90static dma_addr_t coherency_wa_buf_phys[CONFIG_NR_CPUS];
91static void *coherency_wa_buf[CONFIG_NR_CPUS];
92static bool coherency_wa_enabled;
93
94#define XOR_CONFIG(chan) (0x10 + (chan * 4))
95#define XOR_ACTIVATION(chan) (0x20 + (chan * 4))
96#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
97#define WINDOW_BASE(w) (0x250 + ((w) << 2))
98#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
99#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
100#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
101#define XOR_DEST_POINTER(chan) (0x2B0 + (chan * 4))
102#define XOR_BLOCK_SIZE(chan) (0x2C0 + (chan * 4))
103#define XOR_INIT_VALUE_LOW 0x2E0
104#define XOR_INIT_VALUE_HIGH 0x2E4
105
106static inline void mvebu_hwcc_armada375_sync_io_barrier_wa(void)
107{
108 int idx = smp_processor_id();
109
110 /* Write '1' to the first word of the buffer */
111 writel(0x1, coherency_wa_buf[idx]);
112
113 /* Wait until the engine is idle */
114 while ((readl(xor_base + XOR_ACTIVATION(idx)) >> 4) & 0x3)
115 ;
116
117 dmb();
118
119 /* Trigger channel */
120 writel(0x1, xor_base + XOR_ACTIVATION(idx));
121
122 /* Poll the data until it is cleared by the XOR transaction */
123 while (readl(coherency_wa_buf[idx]))
124 ;
125}
126
127static void __init armada_375_coherency_init_wa(void)
128{
129 const struct mbus_dram_target_info *dram;
130 struct device_node *xor_node;
131 struct property *xor_status;
132 struct clk *xor_clk;
133 u32 win_enable = 0;
134 int i;
135
136 pr_warn("enabling coherency workaround for Armada 375 Z1, one XOR engine disabled\n");
137
138 /*
139 * Since the workaround uses one XOR engine, we grab a
140 * reference to its Device Tree node first.
141 */
142 xor_node = of_find_compatible_node(NULL, NULL, "marvell,orion-xor");
143 BUG_ON(!xor_node);
144
145 /*
146 * Then we mark it as disabled so that the real XOR driver
147 * will not use it.
148 */
149 xor_status = kzalloc(sizeof(struct property), GFP_KERNEL);
150 BUG_ON(!xor_status);
151
152 xor_status->value = kstrdup("disabled", GFP_KERNEL);
153 BUG_ON(!xor_status->value);
154
155 xor_status->length = 8;
156 xor_status->name = kstrdup("status", GFP_KERNEL);
157 BUG_ON(!xor_status->name);
158
159 of_update_property(xor_node, xor_status);
160
161 /*
162 * And we remap the registers, get the clock, and do the
163 * initial configuration of the XOR engine.
164 */
165 xor_base = of_iomap(xor_node, 0);
166 xor_high_base = of_iomap(xor_node, 1);
167
168 xor_clk = of_clk_get_by_name(xor_node, NULL);
169 BUG_ON(!xor_clk);
170
171 clk_prepare_enable(xor_clk);
172
173 dram = mv_mbus_dram_info();
174
175 for (i = 0; i < 8; i++) {
176 writel(0, xor_base + WINDOW_BASE(i));
177 writel(0, xor_base + WINDOW_SIZE(i));
178 if (i < 4)
179 writel(0, xor_base + WINDOW_REMAP_HIGH(i));
180 }
181
182 for (i = 0; i < dram->num_cs; i++) {
183 const struct mbus_dram_window *cs = dram->cs + i;
184 writel((cs->base & 0xffff0000) |
185 (cs->mbus_attr << 8) |
186 dram->mbus_dram_target_id, xor_base + WINDOW_BASE(i));
187 writel((cs->size - 1) & 0xffff0000, xor_base + WINDOW_SIZE(i));
188
189 win_enable |= (1 << i);
190 win_enable |= 3 << (16 + (2 * i));
191 }
192
193 writel(win_enable, xor_base + WINDOW_BAR_ENABLE(0));
194 writel(win_enable, xor_base + WINDOW_BAR_ENABLE(1));
195 writel(0, xor_base + WINDOW_OVERRIDE_CTRL(0));
196 writel(0, xor_base + WINDOW_OVERRIDE_CTRL(1));
197
198 for (i = 0; i < CONFIG_NR_CPUS; i++) {
199 coherency_wa_buf[i] = kzalloc(PAGE_SIZE, GFP_KERNEL);
200 BUG_ON(!coherency_wa_buf[i]);
201
202 /*
203 * We can't use the DMA mapping API, since we don't
204 * have a valid 'struct device' pointer
205 */
206 coherency_wa_buf_phys[i] =
207 virt_to_phys(coherency_wa_buf[i]);
208 BUG_ON(!coherency_wa_buf_phys[i]);
209
210 /*
211 * Configure the XOR engine for memset operation, with
212 * a 128 bytes block size
213 */
214 writel(0x444, xor_base + XOR_CONFIG(i));
215 writel(128, xor_base + XOR_BLOCK_SIZE(i));
216 writel(coherency_wa_buf_phys[i],
217 xor_base + XOR_DEST_POINTER(i));
218 }
219
220 writel(0x0, xor_base + XOR_INIT_VALUE_LOW);
221 writel(0x0, xor_base + XOR_INIT_VALUE_HIGH);
222
223 coherency_wa_enabled = true;
224}
225
Gregory CLEMENTe60304f2012-10-12 19:20:36 +0200226static inline void mvebu_hwcc_sync_io_barrier(void)
227{
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +0200228 if (coherency_wa_enabled) {
229 mvebu_hwcc_armada375_sync_io_barrier_wa();
230 return;
231 }
232
Gregory CLEMENTe60304f2012-10-12 19:20:36 +0200233 writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
234 while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
235}
236
237static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
238 unsigned long offset, size_t size,
239 enum dma_data_direction dir,
240 struct dma_attrs *attrs)
241{
242 if (dir != DMA_TO_DEVICE)
243 mvebu_hwcc_sync_io_barrier();
244 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
245}
246
247
248static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
249 size_t size, enum dma_data_direction dir,
250 struct dma_attrs *attrs)
251{
252 if (dir != DMA_TO_DEVICE)
253 mvebu_hwcc_sync_io_barrier();
254}
255
256static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
257 size_t size, enum dma_data_direction dir)
258{
259 if (dir != DMA_TO_DEVICE)
260 mvebu_hwcc_sync_io_barrier();
261}
262
263static struct dma_map_ops mvebu_hwcc_dma_ops = {
264 .alloc = arm_dma_alloc,
265 .free = arm_dma_free,
266 .mmap = arm_dma_mmap,
267 .map_page = mvebu_hwcc_dma_map_page,
268 .unmap_page = mvebu_hwcc_dma_unmap_page,
269 .get_sgtable = arm_dma_get_sgtable,
270 .map_sg = arm_dma_map_sg,
271 .unmap_sg = arm_dma_unmap_sg,
272 .sync_single_for_cpu = mvebu_hwcc_dma_sync,
273 .sync_single_for_device = mvebu_hwcc_dma_sync,
274 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
275 .sync_sg_for_device = arm_dma_sync_sg_for_device,
276 .set_dma_mask = arm_dma_set_mask,
277};
278
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200279static int mvebu_hwcc_notifier(struct notifier_block *nb,
280 unsigned long event, void *__dev)
Gregory CLEMENTe60304f2012-10-12 19:20:36 +0200281{
282 struct device *dev = __dev;
283
284 if (event != BUS_NOTIFY_ADD_DEVICE)
285 return NOTIFY_DONE;
286 set_dma_ops(dev, &mvebu_hwcc_dma_ops);
287
288 return NOTIFY_OK;
289}
290
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200291static struct notifier_block mvebu_hwcc_nb = {
292 .notifier_call = mvebu_hwcc_notifier,
Gregory CLEMENTe60304f2012-10-12 19:20:36 +0200293};
294
Ezequiel Garciaa728b972014-07-08 10:37:37 -0300295static struct notifier_block mvebu_hwcc_pci_nb = {
296 .notifier_call = mvebu_hwcc_notifier,
297};
298
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200299static void __init armada_370_coherency_init(struct device_node *np)
300{
301 struct resource res;
302
303 of_address_to_resource(np, 0, &res);
304 coherency_phys_base = res.start;
305 /*
306 * Ensure secondary CPUs will see the updated value,
307 * which they read before they join the coherency
308 * fabric, and therefore before they are coherent with
309 * the boot CPU cache.
310 */
311 sync_cache_w(&coherency_phys_base);
312 coherency_base = of_iomap(np, 0);
313 coherency_cpu_base = of_iomap(np, 1);
Gregory CLEMENT952f4ca2014-04-14 17:10:07 +0200314 set_cpu_coherent();
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200315}
316
Thomas Petazzoni497a9232014-05-15 16:59:34 +0200317/*
318 * This ioremap hook is used on Armada 375/38x to ensure that PCIe
319 * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
320 * is needed as a workaround for a deadlock issue between the PCIe
321 * interface and the cache controller.
322 */
323static void __iomem *
324armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
325 unsigned int mtype, void *caller)
326{
327 struct resource pcie_mem;
328
329 mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
330
331 if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
332 mtype = MT_UNCACHED;
333
334 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
335}
336
Thomas Petazzonid0de9322014-04-14 15:47:06 +0200337static void __init armada_375_380_coherency_init(struct device_node *np)
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200338{
Thomas Petazzoni497a9232014-05-15 16:59:34 +0200339 struct device_node *cache_dn;
340
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200341 coherency_cpu_base = of_iomap(np, 0);
Thomas Petazzoni497a9232014-05-15 16:59:34 +0200342 arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
343
344 /*
345 * Add the PL310 property "arm,io-coherent". This makes sure the
346 * outer sync operation is not used, which allows to
347 * workaround the system erratum that causes deadlocks when
348 * doing PCIe in an SMP situation on Armada 375 and Armada
349 * 38x.
350 */
351 for_each_compatible_node(cache_dn, NULL, "arm,pl310-cache") {
352 struct property *p;
353
354 p = kzalloc(sizeof(*p), GFP_KERNEL);
355 p->name = kstrdup("arm,io-coherent", GFP_KERNEL);
356 of_add_property(cache_dn, p);
357 }
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200358}
359
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200360static int coherency_type(void)
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300361{
362 struct device_node *np;
Thomas Petazzoni5fbba082014-04-14 15:47:02 +0200363 const struct of_device_id *match;
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300364
Thomas Petazzoni5fbba082014-04-14 15:47:02 +0200365 np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300366 if (np) {
Thomas Petazzoni5fbba082014-04-14 15:47:02 +0200367 int type = (int) match->data;
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200368
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200369 /* Armada 370/XP coherency works in both UP and SMP */
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200370 if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200371 return type;
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200372
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200373 /* Armada 375 coherency works only on SMP */
374 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 && is_smp())
375 return type;
376
Thomas Petazzonid0de9322014-04-14 15:47:06 +0200377 /* Armada 380 coherency works only on SMP */
378 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_380 && is_smp())
379 return type;
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300380 }
381
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200382 return COHERENCY_FABRIC_TYPE_NONE;
383}
384
385int coherency_available(void)
386{
387 return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
388}
389
390int __init coherency_init(void)
391{
392 int type = coherency_type();
393 struct device_node *np;
394
395 np = of_find_matching_node(NULL, of_coherency_table);
396
397 if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
398 armada_370_coherency_init(np);
Thomas Petazzonid0de9322014-04-14 15:47:06 +0200399 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 ||
400 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
401 armada_375_380_coherency_init(np);
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200402
Thomas Petazzoni2eb04ae2014-10-27 16:32:35 +0100403 of_node_put(np);
404
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300405 return 0;
406}
Thomas Petazzoni865e0522013-06-05 09:04:55 +0200407
408static int __init coherency_late_init(void)
409{
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +0200410 int type = coherency_type();
411
412 if (type == COHERENCY_FABRIC_TYPE_NONE)
413 return 0;
414
Thomas Petazzoni39438562014-05-05 17:05:26 +0200415 if (type == COHERENCY_FABRIC_TYPE_ARMADA_375) {
416 u32 dev, rev;
417
418 if (mvebu_get_soc_id(&dev, &rev) == 0 &&
419 rev == ARMADA_375_Z1_REV)
420 armada_375_coherency_init_wa();
421 }
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +0200422
423 bus_register_notifier(&platform_bus_type,
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200424 &mvebu_hwcc_nb);
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +0200425
Thomas Petazzoni865e0522013-06-05 09:04:55 +0200426 return 0;
427}
428
429postcore_initcall(coherency_late_init);
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200430
Thomas Petazzoni8828ccc2014-05-20 17:13:03 +0200431#if IS_ENABLED(CONFIG_PCI)
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200432static int __init coherency_pci_init(void)
433{
434 if (coherency_available())
435 bus_register_notifier(&pci_bus_type,
Ezequiel Garciaa728b972014-07-08 10:37:37 -0300436 &mvebu_hwcc_pci_nb);
Thomas Petazzonib0063aa2014-05-13 18:04:30 +0200437 return 0;
438}
439
440arch_initcall(coherency_pci_init);
Thomas Petazzoni8828ccc2014-05-20 17:13:03 +0200441#endif