blob: 6d6b25c46b1f21b2ccdfae69686a52e2e5ce5a27 [file] [log] [blame]
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07001/*
2 * GPMC support functions
3 *
4 * Copyright (C) 2005-2006 Nokia Corporation
5 *
6 * Author: Juha Yrjola
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/err.h>
15#include <linux/clk.h>
Imre Deakf37e4582006-09-25 12:41:33 +030016#include <linux/ioport.h>
17#include <linux/spinlock.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070018
19#include <asm/io.h>
Kyungmin Park7f245162006-12-29 16:48:51 -080020#include <asm/mach-types.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070021#include <asm/arch/gpmc.h>
22
23#undef DEBUG
24
25#define GPMC_BASE 0x6800a000
26#define GPMC_REVISION 0x00
27#define GPMC_SYSCONFIG 0x10
28#define GPMC_SYSSTATUS 0x14
29#define GPMC_IRQSTATUS 0x18
30#define GPMC_IRQENABLE 0x1c
31#define GPMC_TIMEOUT_CONTROL 0x40
32#define GPMC_ERR_ADDRESS 0x44
33#define GPMC_ERR_TYPE 0x48
34#define GPMC_CONFIG 0x50
35#define GPMC_STATUS 0x54
36#define GPMC_PREFETCH_CONFIG1 0x1e0
37#define GPMC_PREFETCH_CONFIG2 0x1e4
38#define GPMC_PREFETCH_CONTROL 0x1e8
39#define GPMC_PREFETCH_STATUS 0x1f0
40#define GPMC_ECC_CONFIG 0x1f4
41#define GPMC_ECC_CONTROL 0x1f8
42#define GPMC_ECC_SIZE_CONFIG 0x1fc
43
44#define GPMC_CS0 0x60
45#define GPMC_CS_SIZE 0x30
46
Imre Deakf37e4582006-09-25 12:41:33 +030047#define GPMC_CS_NUM 8
48#define GPMC_MEM_START 0x00000000
49#define GPMC_MEM_END 0x3FFFFFFF
50#define BOOT_ROM_SPACE 0x100000 /* 1MB */
51
52#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
53#define GPMC_SECTION_SHIFT 28 /* 128 MB */
54
55static struct resource gpmc_mem_root;
56static struct resource gpmc_cs_mem[GPMC_CS_NUM];
Thomas Gleixner87b247c2007-05-10 22:33:04 -070057static DEFINE_SPINLOCK(gpmc_mem_lock);
Imre Deakf37e4582006-09-25 12:41:33 +030058static unsigned gpmc_cs_map;
59
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070060static void __iomem *gpmc_base =
61 (void __iomem *) IO_ADDRESS(GPMC_BASE);
62static void __iomem *gpmc_cs_base =
63 (void __iomem *) IO_ADDRESS(GPMC_BASE) + GPMC_CS0;
64
65static struct clk *gpmc_l3_clk;
66
67static void gpmc_write_reg(int idx, u32 val)
68{
69 __raw_writel(val, gpmc_base + idx);
70}
71
72static u32 gpmc_read_reg(int idx)
73{
74 return __raw_readl(gpmc_base + idx);
75}
76
77void gpmc_cs_write_reg(int cs, int idx, u32 val)
78{
79 void __iomem *reg_addr;
80
81 reg_addr = gpmc_cs_base + (cs * GPMC_CS_SIZE) + idx;
82 __raw_writel(val, reg_addr);
83}
84
85u32 gpmc_cs_read_reg(int cs, int idx)
86{
87 return __raw_readl(gpmc_cs_base + (cs * GPMC_CS_SIZE) + idx);
88}
89
90/* TODO: Add support for gpmc_fck to clock framework and use it */
David Brownell1c22cc12006-12-06 17:13:55 -080091unsigned long gpmc_get_fclk_period(void)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070092{
93 /* In picoseconds */
94 return 1000000000 / ((clk_get_rate(gpmc_l3_clk)) / 1000);
95}
96
97unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
98{
99 unsigned long tick_ps;
100
101 /* Calculate in picosecs to yield more exact results */
102 tick_ps = gpmc_get_fclk_period();
103
104 return (time_ns * 1000 + tick_ps - 1) / tick_ps;
105}
106
107#ifdef DEBUG
108static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
Juha Yrjola2aab6462006-06-26 16:16:21 -0700109 int time, const char *name)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700110#else
111static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
112 int time)
113#endif
114{
115 u32 l;
116 int ticks, mask, nr_bits;
117
118 if (time == 0)
119 ticks = 0;
120 else
121 ticks = gpmc_ns_to_ticks(time);
122 nr_bits = end_bit - st_bit + 1;
David Brownell1c22cc12006-12-06 17:13:55 -0800123 if (ticks >= 1 << nr_bits) {
124#ifdef DEBUG
125 printk(KERN_INFO "GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
126 cs, name, time, ticks, 1 << nr_bits);
127#endif
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700128 return -1;
David Brownell1c22cc12006-12-06 17:13:55 -0800129 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700130
131 mask = (1 << nr_bits) - 1;
132 l = gpmc_cs_read_reg(cs, reg);
133#ifdef DEBUG
David Brownell1c22cc12006-12-06 17:13:55 -0800134 printk(KERN_INFO
135 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
Juha Yrjola2aab6462006-06-26 16:16:21 -0700136 cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
David Brownell1c22cc12006-12-06 17:13:55 -0800137 (l >> st_bit) & mask, time);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700138#endif
139 l &= ~(mask << st_bit);
140 l |= ticks << st_bit;
141 gpmc_cs_write_reg(cs, reg, l);
142
143 return 0;
144}
145
146#ifdef DEBUG
147#define GPMC_SET_ONE(reg, st, end, field) \
148 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
149 t->field, #field) < 0) \
150 return -1
151#else
152#define GPMC_SET_ONE(reg, st, end, field) \
153 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
154 return -1
155#endif
156
157int gpmc_cs_calc_divider(int cs, unsigned int sync_clk)
158{
159 int div;
160 u32 l;
161
162 l = sync_clk * 1000 + (gpmc_get_fclk_period() - 1);
163 div = l / gpmc_get_fclk_period();
164 if (div > 4)
165 return -1;
David Brownell1c22cc12006-12-06 17:13:55 -0800166 if (div <= 0)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700167 div = 1;
168
169 return div;
170}
171
172int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
173{
174 int div;
175 u32 l;
176
177 div = gpmc_cs_calc_divider(cs, t->sync_clk);
178 if (div < 0)
179 return -1;
180
181 GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
182 GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
183 GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
184
185 GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
186 GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
187 GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
188
189 GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
190 GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
191 GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
192 GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
193
194 GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
195 GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
196 GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
197
198 GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
199
David Brownell1c22cc12006-12-06 17:13:55 -0800200 /* caller is expected to have initialized CONFIG1 to cover
201 * at least sync vs async
202 */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700203 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
David Brownell1c22cc12006-12-06 17:13:55 -0800204 if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
205#ifdef DEBUG
206 printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
207 cs, (div * gpmc_get_fclk_period()) / 1000, div);
208#endif
209 l &= ~0x03;
210 l |= (div - 1);
211 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
212 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700213
214 return 0;
215}
216
Imre Deakf37e4582006-09-25 12:41:33 +0300217static void gpmc_cs_enable_mem(int cs, u32 base, u32 size)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700218{
Imre Deakf37e4582006-09-25 12:41:33 +0300219 u32 l;
220 u32 mask;
221
222 mask = (1 << GPMC_SECTION_SHIFT) - size;
223 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
224 l &= ~0x3f;
225 l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
226 l &= ~(0x0f << 8);
227 l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
228 l |= 1 << 6; /* CSVALID */
229 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
230}
231
232static void gpmc_cs_disable_mem(int cs)
233{
234 u32 l;
235
236 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
237 l &= ~(1 << 6); /* CSVALID */
238 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
239}
240
241static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
242{
243 u32 l;
244 u32 mask;
245
246 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
247 *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
248 mask = (l >> 8) & 0x0f;
249 *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
250}
251
252static int gpmc_cs_mem_enabled(int cs)
253{
254 u32 l;
255
256 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
257 return l & (1 << 6);
258}
259
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800260int gpmc_cs_set_reserved(int cs, int reserved)
Imre Deakf37e4582006-09-25 12:41:33 +0300261{
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800262 if (cs > GPMC_CS_NUM)
263 return -ENODEV;
264
Imre Deakf37e4582006-09-25 12:41:33 +0300265 gpmc_cs_map &= ~(1 << cs);
266 gpmc_cs_map |= (reserved ? 1 : 0) << cs;
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800267
268 return 0;
Imre Deakf37e4582006-09-25 12:41:33 +0300269}
270
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800271int gpmc_cs_reserved(int cs)
Imre Deakf37e4582006-09-25 12:41:33 +0300272{
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800273 if (cs > GPMC_CS_NUM)
274 return -ENODEV;
275
Imre Deakf37e4582006-09-25 12:41:33 +0300276 return gpmc_cs_map & (1 << cs);
277}
278
279static unsigned long gpmc_mem_align(unsigned long size)
280{
281 int order;
282
283 size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
284 order = GPMC_CHUNK_SHIFT - 1;
285 do {
286 size >>= 1;
287 order++;
288 } while (size);
289 size = 1 << order;
290 return size;
291}
292
293static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
294{
295 struct resource *res = &gpmc_cs_mem[cs];
296 int r;
297
298 size = gpmc_mem_align(size);
299 spin_lock(&gpmc_mem_lock);
300 res->start = base;
301 res->end = base + size - 1;
302 r = request_resource(&gpmc_mem_root, res);
303 spin_unlock(&gpmc_mem_lock);
304
305 return r;
306}
307
308int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
309{
310 struct resource *res = &gpmc_cs_mem[cs];
311 int r = -1;
312
313 if (cs > GPMC_CS_NUM)
314 return -ENODEV;
315
316 size = gpmc_mem_align(size);
317 if (size > (1 << GPMC_SECTION_SHIFT))
318 return -ENOMEM;
319
320 spin_lock(&gpmc_mem_lock);
321 if (gpmc_cs_reserved(cs)) {
322 r = -EBUSY;
323 goto out;
324 }
325 if (gpmc_cs_mem_enabled(cs))
326 r = adjust_resource(res, res->start & ~(size - 1), size);
327 if (r < 0)
328 r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
329 size, NULL, NULL);
330 if (r < 0)
331 goto out;
332
333 gpmc_cs_enable_mem(cs, res->start, res->end - res->start + 1);
334 *base = res->start;
335 gpmc_cs_set_reserved(cs, 1);
336out:
337 spin_unlock(&gpmc_mem_lock);
338 return r;
339}
340
341void gpmc_cs_free(int cs)
342{
343 spin_lock(&gpmc_mem_lock);
344 if (cs >= GPMC_CS_NUM || !gpmc_cs_reserved(cs)) {
345 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
346 BUG();
347 spin_unlock(&gpmc_mem_lock);
348 return;
349 }
350 gpmc_cs_disable_mem(cs);
351 release_resource(&gpmc_cs_mem[cs]);
352 gpmc_cs_set_reserved(cs, 0);
353 spin_unlock(&gpmc_mem_lock);
354}
355
356void __init gpmc_mem_init(void)
357{
358 int cs;
359 unsigned long boot_rom_space = 0;
360
Kyungmin Park7f245162006-12-29 16:48:51 -0800361 /* never allocate the first page, to facilitate bug detection;
362 * even if we didn't boot from ROM.
363 */
364 boot_rom_space = BOOT_ROM_SPACE;
365 /* In apollon the CS0 is mapped as 0x0000 0000 */
366 if (machine_is_omap_apollon())
367 boot_rom_space = 0;
Imre Deakf37e4582006-09-25 12:41:33 +0300368 gpmc_mem_root.start = GPMC_MEM_START + boot_rom_space;
369 gpmc_mem_root.end = GPMC_MEM_END;
370
371 /* Reserve all regions that has been set up by bootloader */
372 for (cs = 0; cs < GPMC_CS_NUM; cs++) {
373 u32 base, size;
374
375 if (!gpmc_cs_mem_enabled(cs))
376 continue;
377 gpmc_cs_get_memconf(cs, &base, &size);
378 if (gpmc_cs_insert_mem(cs, base, size) < 0)
379 BUG();
380 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700381}
382
383void __init gpmc_init(void)
384{
385 u32 l;
386
387 gpmc_l3_clk = clk_get(NULL, "core_l3_ck");
388 BUG_ON(IS_ERR(gpmc_l3_clk));
389
390 l = gpmc_read_reg(GPMC_REVISION);
391 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
392 /* Set smart idle mode and automatic L3 clock gating */
393 l = gpmc_read_reg(GPMC_SYSCONFIG);
394 l &= 0x03 << 3;
395 l |= (0x02 << 3) | (1 << 0);
396 gpmc_write_reg(GPMC_SYSCONFIG, l);
Imre Deakf37e4582006-09-25 12:41:33 +0300397
398 gpmc_mem_init();
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700399}