blob: f46933bc9373495c3094b6b98d7ebe12b0768238 [file] [log] [blame]
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07001/*
2 * GPMC support functions
3 *
4 * Copyright (C) 2005-2006 Nokia Corporation
5 *
6 * Author: Juha Yrjola
7 *
Santosh Shilimkar44169072009-05-28 14:16:04 -07008 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10 *
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030015#undef DEBUG
16
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070017#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/err.h>
20#include <linux/clk.h>
Imre Deakf37e4582006-09-25 12:41:33 +030021#include <linux/ioport.h>
22#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010023#include <linux/io.h>
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030024#include <linux/module.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070025
Kyungmin Park7f245162006-12-29 16:48:51 -080026#include <asm/mach-types.h>
Tony Lindgrence491cf2009-10-20 09:40:47 -070027#include <plat/gpmc.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070028
Tony Lindgrence491cf2009-10-20 09:40:47 -070029#include <plat/sdrc.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070030
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030031/* GPMC register offsets */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070032#define GPMC_REVISION 0x00
33#define GPMC_SYSCONFIG 0x10
34#define GPMC_SYSSTATUS 0x14
35#define GPMC_IRQSTATUS 0x18
36#define GPMC_IRQENABLE 0x1c
37#define GPMC_TIMEOUT_CONTROL 0x40
38#define GPMC_ERR_ADDRESS 0x44
39#define GPMC_ERR_TYPE 0x48
40#define GPMC_CONFIG 0x50
41#define GPMC_STATUS 0x54
42#define GPMC_PREFETCH_CONFIG1 0x1e0
43#define GPMC_PREFETCH_CONFIG2 0x1e4
Thara Gopinath15e02a32008-04-28 16:55:01 +053044#define GPMC_PREFETCH_CONTROL 0x1ec
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070045#define GPMC_PREFETCH_STATUS 0x1f0
46#define GPMC_ECC_CONFIG 0x1f4
47#define GPMC_ECC_CONTROL 0x1f8
48#define GPMC_ECC_SIZE_CONFIG 0x1fc
Sukumar Ghorai948d38e2010-07-09 09:14:44 +000049#define GPMC_ECC1_RESULT 0x200
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070050
Sukumar Ghorai948d38e2010-07-09 09:14:44 +000051#define GPMC_CS0_OFFSET 0x60
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070052#define GPMC_CS_SIZE 0x30
53
Imre Deakf37e4582006-09-25 12:41:33 +030054#define GPMC_MEM_START 0x00000000
55#define GPMC_MEM_END 0x3FFFFFFF
56#define BOOT_ROM_SPACE 0x100000 /* 1MB */
57
58#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
59#define GPMC_SECTION_SHIFT 28 /* 128 MB */
60
vimal singh59e9c5a2009-07-13 16:26:24 +053061#define PREFETCH_FIFOTHRESHOLD (0x40 << 8)
62#define CS_NUM_SHIFT 24
63#define ENABLE_PREFETCH (0x1 << 7)
64#define DMA_MPU_MODE 2
65
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +053066/* Structure to save gpmc cs context */
67struct gpmc_cs_config {
68 u32 config1;
69 u32 config2;
70 u32 config3;
71 u32 config4;
72 u32 config5;
73 u32 config6;
74 u32 config7;
75 int is_valid;
76};
77
78/*
79 * Structure to save/restore gpmc context
80 * to support core off on OMAP3
81 */
82struct omap3_gpmc_regs {
83 u32 sysconfig;
84 u32 irqenable;
85 u32 timeout_ctrl;
86 u32 config;
87 u32 prefetch_config1;
88 u32 prefetch_config2;
89 u32 prefetch_control;
90 struct gpmc_cs_config cs_context[GPMC_CS_NUM];
91};
92
Imre Deakf37e4582006-09-25 12:41:33 +030093static struct resource gpmc_mem_root;
94static struct resource gpmc_cs_mem[GPMC_CS_NUM];
Thomas Gleixner87b247c2007-05-10 22:33:04 -070095static DEFINE_SPINLOCK(gpmc_mem_lock);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +000096static unsigned int gpmc_cs_map; /* flag for cs which are initialized */
97static int gpmc_ecc_used = -EINVAL; /* cs using ecc engine */
Imre Deakf37e4582006-09-25 12:41:33 +030098
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030099static void __iomem *gpmc_base;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700100
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300101static struct clk *gpmc_l3_clk;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700102
103static void gpmc_write_reg(int idx, u32 val)
104{
105 __raw_writel(val, gpmc_base + idx);
106}
107
108static u32 gpmc_read_reg(int idx)
109{
110 return __raw_readl(gpmc_base + idx);
111}
112
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000113static void gpmc_cs_write_byte(int cs, int idx, u8 val)
114{
115 void __iomem *reg_addr;
116
117 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
118 __raw_writeb(val, reg_addr);
119}
120
121static u8 gpmc_cs_read_byte(int cs, int idx)
122{
123 void __iomem *reg_addr;
124
125 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
126 return __raw_readb(reg_addr);
127}
128
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700129void gpmc_cs_write_reg(int cs, int idx, u32 val)
130{
131 void __iomem *reg_addr;
132
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000133 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700134 __raw_writel(val, reg_addr);
135}
136
137u32 gpmc_cs_read_reg(int cs, int idx)
138{
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300139 void __iomem *reg_addr;
140
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000141 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300142 return __raw_readl(reg_addr);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700143}
144
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300145/* TODO: Add support for gpmc_fck to clock framework and use it */
David Brownell1c22cc12006-12-06 17:13:55 -0800146unsigned long gpmc_get_fclk_period(void)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700147{
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300148 unsigned long rate = clk_get_rate(gpmc_l3_clk);
149
150 if (rate == 0) {
151 printk(KERN_WARNING "gpmc_l3_clk not enabled\n");
152 return 0;
153 }
154
155 rate /= 1000;
156 rate = 1000000000 / rate; /* In picoseconds */
157
158 return rate;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700159}
160
161unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
162{
163 unsigned long tick_ps;
164
165 /* Calculate in picosecs to yield more exact results */
166 tick_ps = gpmc_get_fclk_period();
167
168 return (time_ns * 1000 + tick_ps - 1) / tick_ps;
169}
170
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300171unsigned int gpmc_ticks_to_ns(unsigned int ticks)
172{
173 return ticks * gpmc_get_fclk_period() / 1000;
174}
175
Kai Svahn23300592007-01-26 12:29:40 -0800176unsigned int gpmc_round_ns_to_ticks(unsigned int time_ns)
177{
178 unsigned long ticks = gpmc_ns_to_ticks(time_ns);
179
180 return ticks * gpmc_get_fclk_period() / 1000;
181}
182
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700183#ifdef DEBUG
184static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
Juha Yrjola2aab6462006-06-26 16:16:21 -0700185 int time, const char *name)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700186#else
187static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
188 int time)
189#endif
190{
191 u32 l;
192 int ticks, mask, nr_bits;
193
194 if (time == 0)
195 ticks = 0;
196 else
197 ticks = gpmc_ns_to_ticks(time);
198 nr_bits = end_bit - st_bit + 1;
David Brownell1c22cc12006-12-06 17:13:55 -0800199 if (ticks >= 1 << nr_bits) {
200#ifdef DEBUG
201 printk(KERN_INFO "GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
202 cs, name, time, ticks, 1 << nr_bits);
203#endif
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700204 return -1;
David Brownell1c22cc12006-12-06 17:13:55 -0800205 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700206
207 mask = (1 << nr_bits) - 1;
208 l = gpmc_cs_read_reg(cs, reg);
209#ifdef DEBUG
David Brownell1c22cc12006-12-06 17:13:55 -0800210 printk(KERN_INFO
211 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
Juha Yrjola2aab6462006-06-26 16:16:21 -0700212 cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
David Brownell1c22cc12006-12-06 17:13:55 -0800213 (l >> st_bit) & mask, time);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700214#endif
215 l &= ~(mask << st_bit);
216 l |= ticks << st_bit;
217 gpmc_cs_write_reg(cs, reg, l);
218
219 return 0;
220}
221
222#ifdef DEBUG
223#define GPMC_SET_ONE(reg, st, end, field) \
224 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
225 t->field, #field) < 0) \
226 return -1
227#else
228#define GPMC_SET_ONE(reg, st, end, field) \
229 if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
230 return -1
231#endif
232
233int gpmc_cs_calc_divider(int cs, unsigned int sync_clk)
234{
235 int div;
236 u32 l;
237
238 l = sync_clk * 1000 + (gpmc_get_fclk_period() - 1);
239 div = l / gpmc_get_fclk_period();
240 if (div > 4)
241 return -1;
David Brownell1c22cc12006-12-06 17:13:55 -0800242 if (div <= 0)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700243 div = 1;
244
245 return div;
246}
247
248int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
249{
250 int div;
251 u32 l;
252
253 div = gpmc_cs_calc_divider(cs, t->sync_clk);
254 if (div < 0)
255 return -1;
256
257 GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
258 GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
259 GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
260
261 GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
262 GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
263 GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
264
265 GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
266 GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
267 GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
268 GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
269
270 GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
271 GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
272 GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
273
274 GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
275
Syed Mohammed, Khasimcc26b3b2008-10-09 17:51:41 +0300276 if (cpu_is_omap34xx()) {
277 GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
278 GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
279 }
280
David Brownell1c22cc12006-12-06 17:13:55 -0800281 /* caller is expected to have initialized CONFIG1 to cover
282 * at least sync vs async
283 */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700284 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
David Brownell1c22cc12006-12-06 17:13:55 -0800285 if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
286#ifdef DEBUG
287 printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
288 cs, (div * gpmc_get_fclk_period()) / 1000, div);
289#endif
290 l &= ~0x03;
291 l |= (div - 1);
292 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
293 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700294
295 return 0;
296}
297
Imre Deakf37e4582006-09-25 12:41:33 +0300298static void gpmc_cs_enable_mem(int cs, u32 base, u32 size)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700299{
Imre Deakf37e4582006-09-25 12:41:33 +0300300 u32 l;
301 u32 mask;
302
303 mask = (1 << GPMC_SECTION_SHIFT) - size;
304 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
305 l &= ~0x3f;
306 l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
307 l &= ~(0x0f << 8);
308 l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530309 l |= GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300310 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
311}
312
313static void gpmc_cs_disable_mem(int cs)
314{
315 u32 l;
316
317 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530318 l &= ~GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300319 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
320}
321
322static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
323{
324 u32 l;
325 u32 mask;
326
327 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
328 *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
329 mask = (l >> 8) & 0x0f;
330 *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
331}
332
333static int gpmc_cs_mem_enabled(int cs)
334{
335 u32 l;
336
337 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530338 return l & GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300339}
340
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800341int gpmc_cs_set_reserved(int cs, int reserved)
Imre Deakf37e4582006-09-25 12:41:33 +0300342{
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800343 if (cs > GPMC_CS_NUM)
344 return -ENODEV;
345
Imre Deakf37e4582006-09-25 12:41:33 +0300346 gpmc_cs_map &= ~(1 << cs);
347 gpmc_cs_map |= (reserved ? 1 : 0) << cs;
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800348
349 return 0;
Imre Deakf37e4582006-09-25 12:41:33 +0300350}
351
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800352int gpmc_cs_reserved(int cs)
Imre Deakf37e4582006-09-25 12:41:33 +0300353{
Tony Lindgrenc40fae952006-12-07 13:58:10 -0800354 if (cs > GPMC_CS_NUM)
355 return -ENODEV;
356
Imre Deakf37e4582006-09-25 12:41:33 +0300357 return gpmc_cs_map & (1 << cs);
358}
359
360static unsigned long gpmc_mem_align(unsigned long size)
361{
362 int order;
363
364 size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
365 order = GPMC_CHUNK_SHIFT - 1;
366 do {
367 size >>= 1;
368 order++;
369 } while (size);
370 size = 1 << order;
371 return size;
372}
373
374static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
375{
376 struct resource *res = &gpmc_cs_mem[cs];
377 int r;
378
379 size = gpmc_mem_align(size);
380 spin_lock(&gpmc_mem_lock);
381 res->start = base;
382 res->end = base + size - 1;
383 r = request_resource(&gpmc_mem_root, res);
384 spin_unlock(&gpmc_mem_lock);
385
386 return r;
387}
388
389int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
390{
391 struct resource *res = &gpmc_cs_mem[cs];
392 int r = -1;
393
394 if (cs > GPMC_CS_NUM)
395 return -ENODEV;
396
397 size = gpmc_mem_align(size);
398 if (size > (1 << GPMC_SECTION_SHIFT))
399 return -ENOMEM;
400
401 spin_lock(&gpmc_mem_lock);
402 if (gpmc_cs_reserved(cs)) {
403 r = -EBUSY;
404 goto out;
405 }
406 if (gpmc_cs_mem_enabled(cs))
407 r = adjust_resource(res, res->start & ~(size - 1), size);
408 if (r < 0)
409 r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
410 size, NULL, NULL);
411 if (r < 0)
412 goto out;
413
Tobias Klauser6d135242009-11-10 18:55:19 -0800414 gpmc_cs_enable_mem(cs, res->start, resource_size(res));
Imre Deakf37e4582006-09-25 12:41:33 +0300415 *base = res->start;
416 gpmc_cs_set_reserved(cs, 1);
417out:
418 spin_unlock(&gpmc_mem_lock);
419 return r;
420}
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300421EXPORT_SYMBOL(gpmc_cs_request);
Imre Deakf37e4582006-09-25 12:41:33 +0300422
423void gpmc_cs_free(int cs)
424{
425 spin_lock(&gpmc_mem_lock);
Roel Kluine7fdc602009-11-17 14:39:06 -0800426 if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
Imre Deakf37e4582006-09-25 12:41:33 +0300427 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
428 BUG();
429 spin_unlock(&gpmc_mem_lock);
430 return;
431 }
432 gpmc_cs_disable_mem(cs);
433 release_resource(&gpmc_cs_mem[cs]);
434 gpmc_cs_set_reserved(cs, 0);
435 spin_unlock(&gpmc_mem_lock);
436}
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300437EXPORT_SYMBOL(gpmc_cs_free);
Imre Deakf37e4582006-09-25 12:41:33 +0300438
vimal singh59e9c5a2009-07-13 16:26:24 +0530439/**
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000440 * gpmc_read_status - read access request to get the different gpmc status
441 * @cmd: command type
442 * @return status
443 */
444int gpmc_read_status(int cmd)
445{
446 int status = -EINVAL;
447 u32 regval = 0;
448
449 switch (cmd) {
450 case GPMC_GET_IRQ_STATUS:
451 status = gpmc_read_reg(GPMC_IRQSTATUS);
452 break;
453
454 case GPMC_PREFETCH_FIFO_CNT:
455 regval = gpmc_read_reg(GPMC_PREFETCH_STATUS);
456 status = GPMC_PREFETCH_STATUS_FIFO_CNT(regval);
457 break;
458
459 case GPMC_PREFETCH_COUNT:
460 regval = gpmc_read_reg(GPMC_PREFETCH_STATUS);
461 status = GPMC_PREFETCH_STATUS_COUNT(regval);
462 break;
463
464 case GPMC_STATUS_BUFFER:
465 regval = gpmc_read_reg(GPMC_STATUS);
466 /* 1 : buffer is available to write */
467 status = regval & GPMC_STATUS_BUFF_EMPTY;
468 break;
469
470 default:
471 printk(KERN_ERR "gpmc_read_status: Not supported\n");
472 }
473 return status;
474}
475EXPORT_SYMBOL(gpmc_read_status);
476
477/**
478 * gpmc_cs_configure - write request to configure gpmc
479 * @cs: chip select number
480 * @cmd: command type
481 * @wval: value to write
482 * @return status of the operation
483 */
484int gpmc_cs_configure(int cs, int cmd, int wval)
485{
486 int err = 0;
487 u32 regval = 0;
488
489 switch (cmd) {
490 case GPMC_SET_IRQ_STATUS:
491 gpmc_write_reg(GPMC_IRQSTATUS, wval);
492 break;
493
494 case GPMC_CONFIG_WP:
495 regval = gpmc_read_reg(GPMC_CONFIG);
496 if (wval)
497 regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
498 else
499 regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
500 gpmc_write_reg(GPMC_CONFIG, regval);
501 break;
502
503 case GPMC_CONFIG_RDY_BSY:
504 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
505 if (wval)
506 regval |= WR_RD_PIN_MONITORING;
507 else
508 regval &= ~WR_RD_PIN_MONITORING;
509 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
510 break;
511
512 case GPMC_CONFIG_DEV_SIZE:
513 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
514 regval |= GPMC_CONFIG1_DEVICESIZE(wval);
515 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
516 break;
517
518 case GPMC_CONFIG_DEV_TYPE:
519 regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
520 regval |= GPMC_CONFIG1_DEVICETYPE(wval);
521 if (wval == GPMC_DEVICETYPE_NOR)
522 regval |= GPMC_CONFIG1_MUXADDDATA;
523 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
524 break;
525
526 default:
527 printk(KERN_ERR "gpmc_configure_cs: Not supported\n");
528 err = -EINVAL;
529 }
530
531 return err;
532}
533EXPORT_SYMBOL(gpmc_cs_configure);
534
535/**
536 * gpmc_nand_read - nand specific read access request
537 * @cs: chip select number
538 * @cmd: command type
539 */
540int gpmc_nand_read(int cs, int cmd)
541{
542 int rval = -EINVAL;
543
544 switch (cmd) {
545 case GPMC_NAND_DATA:
546 rval = gpmc_cs_read_byte(cs, GPMC_CS_NAND_DATA);
547 break;
548
549 default:
550 printk(KERN_ERR "gpmc_read_nand_ctrl: Not supported\n");
551 }
552 return rval;
553}
554EXPORT_SYMBOL(gpmc_nand_read);
555
556/**
557 * gpmc_nand_write - nand specific write request
558 * @cs: chip select number
559 * @cmd: command type
560 * @wval: value to write
561 */
562int gpmc_nand_write(int cs, int cmd, int wval)
563{
564 int err = 0;
565
566 switch (cmd) {
567 case GPMC_NAND_COMMAND:
568 gpmc_cs_write_byte(cs, GPMC_CS_NAND_COMMAND, wval);
569 break;
570
571 case GPMC_NAND_ADDRESS:
572 gpmc_cs_write_byte(cs, GPMC_CS_NAND_ADDRESS, wval);
573 break;
574
575 case GPMC_NAND_DATA:
576 gpmc_cs_write_byte(cs, GPMC_CS_NAND_DATA, wval);
577
578 default:
579 printk(KERN_ERR "gpmc_write_nand_ctrl: Not supported\n");
580 err = -EINVAL;
581 }
582 return err;
583}
584EXPORT_SYMBOL(gpmc_nand_write);
585
586
587
588/**
vimal singh59e9c5a2009-07-13 16:26:24 +0530589 * gpmc_prefetch_enable - configures and starts prefetch transfer
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000590 * @cs: cs (chip select) number
vimal singh59e9c5a2009-07-13 16:26:24 +0530591 * @dma_mode: dma mode enable (1) or disable (0)
592 * @u32_count: number of bytes to be transferred
593 * @is_write: prefetch read(0) or write post(1) mode
594 */
595int gpmc_prefetch_enable(int cs, int dma_mode,
596 unsigned int u32_count, int is_write)
597{
vimal singh59e9c5a2009-07-13 16:26:24 +0530598
599 if (!(gpmc_read_reg(GPMC_PREFETCH_CONTROL))) {
600 /* Set the amount of bytes to be prefetched */
601 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, u32_count);
602
603 /* Set dma/mpu mode, the prefetch read / post write and
604 * enable the engine. Set which cs is has requested for.
605 */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000606 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, ((cs << CS_NUM_SHIFT) |
vimal singh59e9c5a2009-07-13 16:26:24 +0530607 PREFETCH_FIFOTHRESHOLD |
608 ENABLE_PREFETCH |
609 (dma_mode << DMA_MPU_MODE) |
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000610 (0x1 & is_write)));
611
612 /* Start the prefetch engine */
613 gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x1);
vimal singh59e9c5a2009-07-13 16:26:24 +0530614 } else {
615 return -EBUSY;
616 }
vimal singh59e9c5a2009-07-13 16:26:24 +0530617
618 return 0;
619}
620EXPORT_SYMBOL(gpmc_prefetch_enable);
621
622/**
623 * gpmc_prefetch_reset - disables and stops the prefetch engine
624 */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000625int gpmc_prefetch_reset(int cs)
vimal singh59e9c5a2009-07-13 16:26:24 +0530626{
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000627 u32 config1;
628
629 /* check if the same module/cs is trying to reset */
630 config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
631 if (((config1 >> CS_NUM_SHIFT) & 0x7) != cs)
632 return -EINVAL;
633
vimal singh59e9c5a2009-07-13 16:26:24 +0530634 /* Stop the PFPW engine */
635 gpmc_write_reg(GPMC_PREFETCH_CONTROL, 0x0);
636
637 /* Reset/disable the PFPW engine */
638 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, 0x0);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000639
640 return 0;
vimal singh59e9c5a2009-07-13 16:26:24 +0530641}
642EXPORT_SYMBOL(gpmc_prefetch_reset);
643
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300644static void __init gpmc_mem_init(void)
Imre Deakf37e4582006-09-25 12:41:33 +0300645{
646 int cs;
647 unsigned long boot_rom_space = 0;
648
Kyungmin Park7f245162006-12-29 16:48:51 -0800649 /* never allocate the first page, to facilitate bug detection;
650 * even if we didn't boot from ROM.
651 */
652 boot_rom_space = BOOT_ROM_SPACE;
653 /* In apollon the CS0 is mapped as 0x0000 0000 */
654 if (machine_is_omap_apollon())
655 boot_rom_space = 0;
Imre Deakf37e4582006-09-25 12:41:33 +0300656 gpmc_mem_root.start = GPMC_MEM_START + boot_rom_space;
657 gpmc_mem_root.end = GPMC_MEM_END;
658
659 /* Reserve all regions that has been set up by bootloader */
660 for (cs = 0; cs < GPMC_CS_NUM; cs++) {
661 u32 base, size;
662
663 if (!gpmc_cs_mem_enabled(cs))
664 continue;
665 gpmc_cs_get_memconf(cs, &base, &size);
666 if (gpmc_cs_insert_mem(cs, base, size) < 0)
667 BUG();
668 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700669}
670
671void __init gpmc_init(void)
672{
673 u32 l;
Kevin Hilman8d084362010-01-29 14:20:06 -0800674 char *ck = NULL;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700675
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300676 if (cpu_is_omap24xx()) {
677 ck = "core_l3_ck";
678 if (cpu_is_omap2420())
679 l = OMAP2420_GPMC_BASE;
680 else
681 l = OMAP34XX_GPMC_BASE;
682 } else if (cpu_is_omap34xx()) {
683 ck = "gpmc_fck";
684 l = OMAP34XX_GPMC_BASE;
Santosh Shilimkar44169072009-05-28 14:16:04 -0700685 } else if (cpu_is_omap44xx()) {
Rajendra Nayakd79b1262009-12-09 00:01:44 +0530686 ck = "gpmc_ck";
Santosh Shilimkar44169072009-05-28 14:16:04 -0700687 l = OMAP44XX_GPMC_BASE;
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300688 }
689
Kevin Hilman8d084362010-01-29 14:20:06 -0800690 if (WARN_ON(!ck))
691 return;
692
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300693 gpmc_l3_clk = clk_get(NULL, ck);
694 if (IS_ERR(gpmc_l3_clk)) {
695 printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
Sanjeev Premi85d7a072008-11-04 13:35:06 -0800696 BUG();
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300697 }
698
699 gpmc_base = ioremap(l, SZ_4K);
700 if (!gpmc_base) {
701 clk_put(gpmc_l3_clk);
702 printk(KERN_ERR "Could not get GPMC register memory\n");
Sanjeev Premi85d7a072008-11-04 13:35:06 -0800703 BUG();
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300704 }
705
Olof Johansson1daa8c12010-01-20 22:39:29 +0000706 clk_enable(gpmc_l3_clk);
707
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700708 l = gpmc_read_reg(GPMC_REVISION);
709 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
710 /* Set smart idle mode and automatic L3 clock gating */
711 l = gpmc_read_reg(GPMC_SYSCONFIG);
712 l &= 0x03 << 3;
713 l |= (0x02 << 3) | (1 << 0);
714 gpmc_write_reg(GPMC_SYSCONFIG, l);
Imre Deakf37e4582006-09-25 12:41:33 +0300715 gpmc_mem_init();
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700716}
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530717
718#ifdef CONFIG_ARCH_OMAP3
719static struct omap3_gpmc_regs gpmc_context;
720
Felipe Balbib2fa3b72010-02-15 10:03:33 -0800721void omap3_gpmc_save_context(void)
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530722{
723 int i;
Felipe Balbib2fa3b72010-02-15 10:03:33 -0800724
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530725 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
726 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
727 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
728 gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
729 gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
730 gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
731 gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
732 for (i = 0; i < GPMC_CS_NUM; i++) {
733 gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
734 if (gpmc_context.cs_context[i].is_valid) {
735 gpmc_context.cs_context[i].config1 =
736 gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
737 gpmc_context.cs_context[i].config2 =
738 gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
739 gpmc_context.cs_context[i].config3 =
740 gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
741 gpmc_context.cs_context[i].config4 =
742 gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
743 gpmc_context.cs_context[i].config5 =
744 gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
745 gpmc_context.cs_context[i].config6 =
746 gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
747 gpmc_context.cs_context[i].config7 =
748 gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
749 }
750 }
751}
752
Felipe Balbib2fa3b72010-02-15 10:03:33 -0800753void omap3_gpmc_restore_context(void)
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530754{
755 int i;
Felipe Balbib2fa3b72010-02-15 10:03:33 -0800756
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530757 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
758 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
759 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
760 gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
761 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
762 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
763 gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
764 for (i = 0; i < GPMC_CS_NUM; i++) {
765 if (gpmc_context.cs_context[i].is_valid) {
766 gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
767 gpmc_context.cs_context[i].config1);
768 gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
769 gpmc_context.cs_context[i].config2);
770 gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
771 gpmc_context.cs_context[i].config3);
772 gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
773 gpmc_context.cs_context[i].config4);
774 gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
775 gpmc_context.cs_context[i].config5);
776 gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
777 gpmc_context.cs_context[i].config6);
778 gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
779 gpmc_context.cs_context[i].config7);
780 }
781 }
782}
783#endif /* CONFIG_ARCH_OMAP3 */
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000784
785/**
786 * gpmc_enable_hwecc - enable hardware ecc functionality
787 * @cs: chip select number
788 * @mode: read/write mode
789 * @dev_width: device bus width(1 for x16, 0 for x8)
790 * @ecc_size: bytes for which ECC will be generated
791 */
792int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
793{
794 unsigned int val;
795
796 /* check if ecc module is in used */
797 if (gpmc_ecc_used != -EINVAL)
798 return -EINVAL;
799
800 gpmc_ecc_used = cs;
801
802 /* clear ecc and enable bits */
803 val = ((0x00000001<<8) | 0x00000001);
804 gpmc_write_reg(GPMC_ECC_CONTROL, val);
805
806 /* program ecc and result sizes */
807 val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
808 gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, val);
809
810 switch (mode) {
811 case GPMC_ECC_READ:
812 gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
813 break;
814 case GPMC_ECC_READSYN:
815 gpmc_write_reg(GPMC_ECC_CONTROL, 0x100);
816 break;
817 case GPMC_ECC_WRITE:
818 gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
819 break;
820 default:
821 printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
822 break;
823 }
824
825 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
826 val = (dev_width << 7) | (cs << 1) | (0x1);
827 gpmc_write_reg(GPMC_ECC_CONFIG, val);
828 return 0;
829}
830
831/**
832 * gpmc_calculate_ecc - generate non-inverted ecc bytes
833 * @cs: chip select number
834 * @dat: data pointer over which ecc is computed
835 * @ecc_code: ecc code buffer
836 *
837 * Using non-inverted ECC is considered ugly since writing a blank
838 * page (padding) will clear the ECC bytes. This is not a problem as long
839 * no one is trying to write data on the seemingly unused page. Reading
840 * an erased page will produce an ECC mismatch between generated and read
841 * ECC bytes that has to be dealt with separately.
842 */
843int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code)
844{
845 unsigned int val = 0x0;
846
847 if (gpmc_ecc_used != cs)
848 return -EINVAL;
849
850 /* read ecc result */
851 val = gpmc_read_reg(GPMC_ECC1_RESULT);
852 *ecc_code++ = val; /* P128e, ..., P1e */
853 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
854 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
855 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
856
857 gpmc_ecc_used = -EINVAL;
858 return 0;
859}