blob: 9d5ccd372712ffa0e16e50e17585468a86ab2e09 [file] [log] [blame]
Gregory CLEMENT009f1312012-08-02 11:16:29 +03001/*
2 * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Yehuda Yitschak <yehuday@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 *
14 * The Armada 370 and Armada XP SOCs have a coherency fabric which is
15 * responsible for ensuring hardware coherency between all CPUs and between
16 * CPUs and I/O masters. This file initializes the coherency fabric and
17 * supplies basic routines for configuring and controlling hardware coherency
18 */
19
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +020020#define pr_fmt(fmt) "mvebu-coherency: " fmt
21
Gregory CLEMENT009f1312012-08-02 11:16:29 +030022#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/of_address.h>
25#include <linux/io.h>
26#include <linux/smp.h>
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020027#include <linux/dma-mapping.h>
28#include <linux/platform_device.h>
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +020029#include <linux/slab.h>
30#include <linux/mbus.h>
31#include <linux/clk.h>
Gregory CLEMENT009f1312012-08-02 11:16:29 +030032#include <asm/smp_plat.h>
Thomas Petazzoni580ff0e2013-06-06 12:24:28 +020033#include <asm/cacheflush.h>
Gregory CLEMENT009f1312012-08-02 11:16:29 +030034#include "armada-370-xp.h"
Jisheng Zhangb12634e2013-11-07 17:02:38 +080035#include "coherency.h"
Gregory CLEMENT009f1312012-08-02 11:16:29 +030036
Paul Gortmaker8bd26e32013-06-17 15:43:14 -040037unsigned long coherency_phys_base;
Gregory CLEMENTccd6a132014-04-14 17:10:05 +020038void __iomem *coherency_base;
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020039static void __iomem *coherency_cpu_base;
Gregory CLEMENT009f1312012-08-02 11:16:29 +030040
41/* Coherency fabric registers */
42#define COHERENCY_FABRIC_CFG_OFFSET 0x4
43
Gregory CLEMENTe60304f2012-10-12 19:20:36 +020044#define IO_SYNC_BARRIER_CTL_OFFSET 0x0
45
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020046enum {
Thomas Petazzoni501f9282014-04-14 15:47:00 +020047 COHERENCY_FABRIC_TYPE_NONE,
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020048 COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +020049 COHERENCY_FABRIC_TYPE_ARMADA_375,
Thomas Petazzonid0de9322014-04-14 15:47:06 +020050 COHERENCY_FABRIC_TYPE_ARMADA_380,
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020051};
52
Gregory CLEMENT009f1312012-08-02 11:16:29 +030053static struct of_device_id of_coherency_table[] = {
Thomas Petazzoni924d38f2014-04-14 15:46:59 +020054 {.compatible = "marvell,coherency-fabric",
55 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +020056 {.compatible = "marvell,armada-375-coherency-fabric",
57 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_375 },
Thomas Petazzonid0de9322014-04-14 15:47:06 +020058 {.compatible = "marvell,armada-380-coherency-fabric",
59 .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_380 },
Gregory CLEMENT009f1312012-08-02 11:16:29 +030060 { /* end of list */ },
61};
62
Gregory CLEMENT2e8a5942014-04-14 17:10:08 +020063/* Functions defined in coherency_ll.S */
64int ll_enable_coherency(void);
65void ll_add_cpu_to_smp_group(void);
Gregory CLEMENT009f1312012-08-02 11:16:29 +030066
Gregory CLEMENT952f4ca2014-04-14 17:10:07 +020067int set_cpu_coherent(void)
Gregory CLEMENT009f1312012-08-02 11:16:29 +030068{
69 if (!coherency_base) {
Gregory CLEMENTb41375f2014-04-14 17:10:06 +020070 pr_warn("Can't make current CPU cache coherent.\n");
Gregory CLEMENT009f1312012-08-02 11:16:29 +030071 pr_warn("Coherency fabric is not initialized\n");
72 return 1;
73 }
74
Gregory CLEMENT2e8a5942014-04-14 17:10:08 +020075 ll_add_cpu_to_smp_group();
76 return ll_enable_coherency();
Gregory CLEMENT009f1312012-08-02 11:16:29 +030077}
78
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +020079/*
80 * The below code implements the I/O coherency workaround on Armada
81 * 375. This workaround consists in using the two channels of the
82 * first XOR engine to trigger a XOR transaction that serves as the
83 * I/O coherency barrier.
84 */
85
86static void __iomem *xor_base, *xor_high_base;
87static dma_addr_t coherency_wa_buf_phys[CONFIG_NR_CPUS];
88static void *coherency_wa_buf[CONFIG_NR_CPUS];
89static bool coherency_wa_enabled;
90
91#define XOR_CONFIG(chan) (0x10 + (chan * 4))
92#define XOR_ACTIVATION(chan) (0x20 + (chan * 4))
93#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
94#define WINDOW_BASE(w) (0x250 + ((w) << 2))
95#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
96#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
97#define WINDOW_OVERRIDE_CTRL(chan) (0x2A0 + ((chan) << 2))
98#define XOR_DEST_POINTER(chan) (0x2B0 + (chan * 4))
99#define XOR_BLOCK_SIZE(chan) (0x2C0 + (chan * 4))
100#define XOR_INIT_VALUE_LOW 0x2E0
101#define XOR_INIT_VALUE_HIGH 0x2E4
102
103static inline void mvebu_hwcc_armada375_sync_io_barrier_wa(void)
104{
105 int idx = smp_processor_id();
106
107 /* Write '1' to the first word of the buffer */
108 writel(0x1, coherency_wa_buf[idx]);
109
110 /* Wait until the engine is idle */
111 while ((readl(xor_base + XOR_ACTIVATION(idx)) >> 4) & 0x3)
112 ;
113
114 dmb();
115
116 /* Trigger channel */
117 writel(0x1, xor_base + XOR_ACTIVATION(idx));
118
119 /* Poll the data until it is cleared by the XOR transaction */
120 while (readl(coherency_wa_buf[idx]))
121 ;
122}
123
124static void __init armada_375_coherency_init_wa(void)
125{
126 const struct mbus_dram_target_info *dram;
127 struct device_node *xor_node;
128 struct property *xor_status;
129 struct clk *xor_clk;
130 u32 win_enable = 0;
131 int i;
132
133 pr_warn("enabling coherency workaround for Armada 375 Z1, one XOR engine disabled\n");
134
135 /*
136 * Since the workaround uses one XOR engine, we grab a
137 * reference to its Device Tree node first.
138 */
139 xor_node = of_find_compatible_node(NULL, NULL, "marvell,orion-xor");
140 BUG_ON(!xor_node);
141
142 /*
143 * Then we mark it as disabled so that the real XOR driver
144 * will not use it.
145 */
146 xor_status = kzalloc(sizeof(struct property), GFP_KERNEL);
147 BUG_ON(!xor_status);
148
149 xor_status->value = kstrdup("disabled", GFP_KERNEL);
150 BUG_ON(!xor_status->value);
151
152 xor_status->length = 8;
153 xor_status->name = kstrdup("status", GFP_KERNEL);
154 BUG_ON(!xor_status->name);
155
156 of_update_property(xor_node, xor_status);
157
158 /*
159 * And we remap the registers, get the clock, and do the
160 * initial configuration of the XOR engine.
161 */
162 xor_base = of_iomap(xor_node, 0);
163 xor_high_base = of_iomap(xor_node, 1);
164
165 xor_clk = of_clk_get_by_name(xor_node, NULL);
166 BUG_ON(!xor_clk);
167
168 clk_prepare_enable(xor_clk);
169
170 dram = mv_mbus_dram_info();
171
172 for (i = 0; i < 8; i++) {
173 writel(0, xor_base + WINDOW_BASE(i));
174 writel(0, xor_base + WINDOW_SIZE(i));
175 if (i < 4)
176 writel(0, xor_base + WINDOW_REMAP_HIGH(i));
177 }
178
179 for (i = 0; i < dram->num_cs; i++) {
180 const struct mbus_dram_window *cs = dram->cs + i;
181 writel((cs->base & 0xffff0000) |
182 (cs->mbus_attr << 8) |
183 dram->mbus_dram_target_id, xor_base + WINDOW_BASE(i));
184 writel((cs->size - 1) & 0xffff0000, xor_base + WINDOW_SIZE(i));
185
186 win_enable |= (1 << i);
187 win_enable |= 3 << (16 + (2 * i));
188 }
189
190 writel(win_enable, xor_base + WINDOW_BAR_ENABLE(0));
191 writel(win_enable, xor_base + WINDOW_BAR_ENABLE(1));
192 writel(0, xor_base + WINDOW_OVERRIDE_CTRL(0));
193 writel(0, xor_base + WINDOW_OVERRIDE_CTRL(1));
194
195 for (i = 0; i < CONFIG_NR_CPUS; i++) {
196 coherency_wa_buf[i] = kzalloc(PAGE_SIZE, GFP_KERNEL);
197 BUG_ON(!coherency_wa_buf[i]);
198
199 /*
200 * We can't use the DMA mapping API, since we don't
201 * have a valid 'struct device' pointer
202 */
203 coherency_wa_buf_phys[i] =
204 virt_to_phys(coherency_wa_buf[i]);
205 BUG_ON(!coherency_wa_buf_phys[i]);
206
207 /*
208 * Configure the XOR engine for memset operation, with
209 * a 128 bytes block size
210 */
211 writel(0x444, xor_base + XOR_CONFIG(i));
212 writel(128, xor_base + XOR_BLOCK_SIZE(i));
213 writel(coherency_wa_buf_phys[i],
214 xor_base + XOR_DEST_POINTER(i));
215 }
216
217 writel(0x0, xor_base + XOR_INIT_VALUE_LOW);
218 writel(0x0, xor_base + XOR_INIT_VALUE_HIGH);
219
220 coherency_wa_enabled = true;
221}
222
Gregory CLEMENTe60304f2012-10-12 19:20:36 +0200223static inline void mvebu_hwcc_sync_io_barrier(void)
224{
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +0200225 if (coherency_wa_enabled) {
226 mvebu_hwcc_armada375_sync_io_barrier_wa();
227 return;
228 }
229
Gregory CLEMENTe60304f2012-10-12 19:20:36 +0200230 writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
231 while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
232}
233
234static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
235 unsigned long offset, size_t size,
236 enum dma_data_direction dir,
237 struct dma_attrs *attrs)
238{
239 if (dir != DMA_TO_DEVICE)
240 mvebu_hwcc_sync_io_barrier();
241 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
242}
243
244
245static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
246 size_t size, enum dma_data_direction dir,
247 struct dma_attrs *attrs)
248{
249 if (dir != DMA_TO_DEVICE)
250 mvebu_hwcc_sync_io_barrier();
251}
252
253static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
254 size_t size, enum dma_data_direction dir)
255{
256 if (dir != DMA_TO_DEVICE)
257 mvebu_hwcc_sync_io_barrier();
258}
259
260static struct dma_map_ops mvebu_hwcc_dma_ops = {
261 .alloc = arm_dma_alloc,
262 .free = arm_dma_free,
263 .mmap = arm_dma_mmap,
264 .map_page = mvebu_hwcc_dma_map_page,
265 .unmap_page = mvebu_hwcc_dma_unmap_page,
266 .get_sgtable = arm_dma_get_sgtable,
267 .map_sg = arm_dma_map_sg,
268 .unmap_sg = arm_dma_unmap_sg,
269 .sync_single_for_cpu = mvebu_hwcc_dma_sync,
270 .sync_single_for_device = mvebu_hwcc_dma_sync,
271 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
272 .sync_sg_for_device = arm_dma_sync_sg_for_device,
273 .set_dma_mask = arm_dma_set_mask,
274};
275
276static int mvebu_hwcc_platform_notifier(struct notifier_block *nb,
277 unsigned long event, void *__dev)
278{
279 struct device *dev = __dev;
280
281 if (event != BUS_NOTIFY_ADD_DEVICE)
282 return NOTIFY_DONE;
283 set_dma_ops(dev, &mvebu_hwcc_dma_ops);
284
285 return NOTIFY_OK;
286}
287
288static struct notifier_block mvebu_hwcc_platform_nb = {
289 .notifier_call = mvebu_hwcc_platform_notifier,
290};
291
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200292static void __init armada_370_coherency_init(struct device_node *np)
293{
294 struct resource res;
295
296 of_address_to_resource(np, 0, &res);
297 coherency_phys_base = res.start;
298 /*
299 * Ensure secondary CPUs will see the updated value,
300 * which they read before they join the coherency
301 * fabric, and therefore before they are coherent with
302 * the boot CPU cache.
303 */
304 sync_cache_w(&coherency_phys_base);
305 coherency_base = of_iomap(np, 0);
306 coherency_cpu_base = of_iomap(np, 1);
Gregory CLEMENT952f4ca2014-04-14 17:10:07 +0200307 set_cpu_coherent();
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200308}
309
Thomas Petazzonid0de9322014-04-14 15:47:06 +0200310static void __init armada_375_380_coherency_init(struct device_node *np)
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200311{
312 coherency_cpu_base = of_iomap(np, 0);
313}
314
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200315static int coherency_type(void)
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300316{
317 struct device_node *np;
Thomas Petazzoni5fbba082014-04-14 15:47:02 +0200318 const struct of_device_id *match;
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300319
Thomas Petazzoni5fbba082014-04-14 15:47:02 +0200320 np = of_find_matching_node_and_match(NULL, of_coherency_table, &match);
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300321 if (np) {
Thomas Petazzoni5fbba082014-04-14 15:47:02 +0200322 int type = (int) match->data;
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200323
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200324 /* Armada 370/XP coherency works in both UP and SMP */
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200325 if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200326 return type;
Thomas Petazzoni924d38f2014-04-14 15:46:59 +0200327
Thomas Petazzoni77fa4b92014-04-14 15:47:04 +0200328 /* Armada 375 coherency works only on SMP */
329 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 && is_smp())
330 return type;
331
Thomas Petazzonid0de9322014-04-14 15:47:06 +0200332 /* Armada 380 coherency works only on SMP */
333 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_380 && is_smp())
334 return type;
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300335 }
336
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200337 return COHERENCY_FABRIC_TYPE_NONE;
338}
339
340int coherency_available(void)
341{
342 return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
343}
344
345int __init coherency_init(void)
346{
347 int type = coherency_type();
348 struct device_node *np;
349
350 np = of_find_matching_node(NULL, of_coherency_table);
351
352 if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
353 armada_370_coherency_init(np);
Thomas Petazzonid0de9322014-04-14 15:47:06 +0200354 else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375 ||
355 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
356 armada_375_380_coherency_init(np);
Thomas Petazzoni501f9282014-04-14 15:47:00 +0200357
Gregory CLEMENT009f1312012-08-02 11:16:29 +0300358 return 0;
359}
Thomas Petazzoni865e0522013-06-05 09:04:55 +0200360
361static int __init coherency_late_init(void)
362{
Thomas Petazzoni5ab5afd2014-04-14 15:47:05 +0200363 int type = coherency_type();
364
365 if (type == COHERENCY_FABRIC_TYPE_NONE)
366 return 0;
367
368 if (type == COHERENCY_FABRIC_TYPE_ARMADA_375)
369 armada_375_coherency_init_wa();
370
371 bus_register_notifier(&platform_bus_type,
372 &mvebu_hwcc_platform_nb);
373
Thomas Petazzoni865e0522013-06-05 09:04:55 +0200374 return 0;
375}
376
377postcore_initcall(coherency_late_init);