blob: 0753a046fed202c153856fa31a4740dfd5b1c5dc [file] [log] [blame]
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07001/*
2 * GPMC support functions
3 *
4 * Copyright (C) 2005-2006 Nokia Corporation
5 *
6 * Author: Juha Yrjola
7 *
Santosh Shilimkar44169072009-05-28 14:16:04 -07008 * Copyright (C) 2009 Texas Instruments
9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
10 *
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070011 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030015#undef DEBUG
16
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +053017#include <linux/irq.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070018#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/err.h>
21#include <linux/clk.h>
Imre Deakf37e4582006-09-25 12:41:33 +030022#include <linux/ioport.h>
23#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010024#include <linux/io.h>
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030025#include <linux/module.h>
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +053026#include <linux/interrupt.h>
Afzal Mohammedda496872012-09-23 17:28:25 -060027#include <linux/platform_device.h>
Daniel Mackbc6b1e72012-12-14 11:36:44 +010028#include <linux/of.h>
Jon Huntercdd69282013-02-08 16:46:13 -060029#include <linux/of_address.h>
Daniel Mackbc6b1e72012-12-14 11:36:44 +010030#include <linux/of_mtd.h>
31#include <linux/of_device.h>
Tony Lindgrene639cd52014-11-20 12:11:25 -080032#include <linux/omap-gpmc.h>
Daniel Mackbc6b1e72012-12-14 11:36:44 +010033#include <linux/mtd/nand.h>
avinash philipb3f55252013-06-12 16:30:56 +053034#include <linux/pm_runtime.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070035
Afzal Mohammedbc3668e2012-09-29 12:26:13 +053036#include <linux/platform_data/mtd-nand-omap2.h>
Tony Lindgrene639cd52014-11-20 12:11:25 -080037#include <linux/platform_data/mtd-onenand-omap2.h>
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070038
Tony Lindgrendbc04162012-08-31 10:59:07 -070039#include "soc.h"
Tony Lindgren25c7d492012-10-02 17:25:48 -070040#include "omap_device.h"
Tony Lindgrene639cd52014-11-20 12:11:25 -080041
42#include <asm/mach-types.h>
Tony Lindgren7d7e1eb2012-08-27 17:43:01 -070043
Afzal Mohammed4be48fd2012-09-23 17:28:24 -060044#define DEVICE_NAME "omap-gpmc"
45
Paul Walmsleyfd1dc872008-10-06 15:49:17 +030046/* GPMC register offsets */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070047#define GPMC_REVISION 0x00
48#define GPMC_SYSCONFIG 0x10
49#define GPMC_SYSSTATUS 0x14
50#define GPMC_IRQSTATUS 0x18
51#define GPMC_IRQENABLE 0x1c
52#define GPMC_TIMEOUT_CONTROL 0x40
53#define GPMC_ERR_ADDRESS 0x44
54#define GPMC_ERR_TYPE 0x48
55#define GPMC_CONFIG 0x50
56#define GPMC_STATUS 0x54
57#define GPMC_PREFETCH_CONFIG1 0x1e0
58#define GPMC_PREFETCH_CONFIG2 0x1e4
Thara Gopinath15e02a32008-04-28 16:55:01 +053059#define GPMC_PREFETCH_CONTROL 0x1ec
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070060#define GPMC_PREFETCH_STATUS 0x1f0
61#define GPMC_ECC_CONFIG 0x1f4
62#define GPMC_ECC_CONTROL 0x1f8
63#define GPMC_ECC_SIZE_CONFIG 0x1fc
Sukumar Ghorai948d38e2010-07-09 09:14:44 +000064#define GPMC_ECC1_RESULT 0x200
Ivan Djelic8d602cf2012-04-26 14:17:49 +020065#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
Afzal Mohammed2fdf0c92012-10-04 15:49:04 +053066#define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
67#define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
68#define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
pekon gupta27c9fd62014-05-19 13:24:39 +053069#define GPMC_ECC_BCH_RESULT_4 0x300 /* not available on OMAP2 */
70#define GPMC_ECC_BCH_RESULT_5 0x304 /* not available on OMAP2 */
71#define GPMC_ECC_BCH_RESULT_6 0x308 /* not available on OMAP2 */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070072
Yegor Yefremov2c65e742012-05-09 08:32:49 -070073/* GPMC ECC control settings */
74#define GPMC_ECC_CTRL_ECCCLEAR 0x100
75#define GPMC_ECC_CTRL_ECCDISABLE 0x000
76#define GPMC_ECC_CTRL_ECCREG1 0x001
77#define GPMC_ECC_CTRL_ECCREG2 0x002
78#define GPMC_ECC_CTRL_ECCREG3 0x003
79#define GPMC_ECC_CTRL_ECCREG4 0x004
80#define GPMC_ECC_CTRL_ECCREG5 0x005
81#define GPMC_ECC_CTRL_ECCREG6 0x006
82#define GPMC_ECC_CTRL_ECCREG7 0x007
83#define GPMC_ECC_CTRL_ECCREG8 0x008
84#define GPMC_ECC_CTRL_ECCREG9 0x009
85
Roger Quadrose378d222014-08-29 19:11:52 +030086#define GPMC_CONFIG_LIMITEDADDRESS BIT(1)
87
Afzal Mohammed559d94b2012-05-28 17:51:37 +053088#define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
89#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
90#define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
91#define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
92#define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
93#define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
94
Sukumar Ghorai948d38e2010-07-09 09:14:44 +000095#define GPMC_CS0_OFFSET 0x60
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070096#define GPMC_CS_SIZE 0x30
Afzal Mohammed2fdf0c92012-10-04 15:49:04 +053097#define GPMC_BCH_SIZE 0x10
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -070098
Imre Deakf37e4582006-09-25 12:41:33 +030099#define GPMC_MEM_END 0x3FFFFFFF
Imre Deakf37e4582006-09-25 12:41:33 +0300100
101#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
102#define GPMC_SECTION_SHIFT 28 /* 128 MB */
103
vimal singh59e9c5a2009-07-13 16:26:24 +0530104#define CS_NUM_SHIFT 24
105#define ENABLE_PREFETCH (0x1 << 7)
106#define DMA_MPU_MODE 2
107
Afzal Mohammedda496872012-09-23 17:28:25 -0600108#define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
109#define GPMC_REVISION_MINOR(l) (l & 0xf)
110
111#define GPMC_HAS_WR_ACCESS 0x1
112#define GPMC_HAS_WR_DATA_MUX_BUS 0x2
Jon Hunteraa8d4762013-02-21 15:25:23 -0600113#define GPMC_HAS_MUX_AAD 0x4
Afzal Mohammedda496872012-09-23 17:28:25 -0600114
Jon Hunter9f833152013-02-20 15:53:38 -0600115#define GPMC_NR_WAITPINS 4
116
Tony Lindgrene639cd52014-11-20 12:11:25 -0800117#define GPMC_CS_CONFIG1 0x00
118#define GPMC_CS_CONFIG2 0x04
119#define GPMC_CS_CONFIG3 0x08
120#define GPMC_CS_CONFIG4 0x0c
121#define GPMC_CS_CONFIG5 0x10
122#define GPMC_CS_CONFIG6 0x14
123#define GPMC_CS_CONFIG7 0x18
124#define GPMC_CS_NAND_COMMAND 0x1c
125#define GPMC_CS_NAND_ADDRESS 0x20
126#define GPMC_CS_NAND_DATA 0x24
127
128/* Control Commands */
129#define GPMC_CONFIG_RDY_BSY 0x00000001
130#define GPMC_CONFIG_DEV_SIZE 0x00000002
131#define GPMC_CONFIG_DEV_TYPE 0x00000003
132#define GPMC_SET_IRQ_STATUS 0x00000004
133
134#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31)
135#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30)
136#define GPMC_CONFIG1_READTYPE_ASYNC (0 << 29)
137#define GPMC_CONFIG1_READTYPE_SYNC (1 << 29)
138#define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
139#define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27)
140#define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27)
141#define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
142#define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23)
143#define GPMC_CONFIG1_WAIT_READ_MON (1 << 22)
144#define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21)
145#define GPMC_CONFIG1_WAIT_MON_IIME(val) ((val & 3) << 18)
146#define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16)
147#define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12)
148#define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
149#define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10)
150#define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
151#define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8)
152#define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4)
153#define GPMC_CONFIG1_FCLK_DIV(val) (val & 3)
154#define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1))
155#define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2))
156#define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3))
157#define GPMC_CONFIG7_CSVALID (1 << 6)
158
159#define GPMC_DEVICETYPE_NOR 0
160#define GPMC_DEVICETYPE_NAND 2
161#define GPMC_CONFIG_WRITEPROTECT 0x00000010
162#define WR_RD_PIN_MONITORING 0x00600000
163
164#define GPMC_ENABLE_IRQ 0x0000000d
165
166/* ECC commands */
167#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
168#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
169#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */
170
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700171/* XXX: Only NAND irq has been considered,currently these are the only ones used
172 */
173#define GPMC_NR_IRQ 2
174
Tony Lindgren9ed7a772014-11-03 17:45:01 -0800175struct gpmc_cs_data {
176 const char *name;
177
178#define GPMC_CS_RESERVED (1 << 0)
179 u32 flags;
180
181 struct resource mem;
182};
183
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700184struct gpmc_client_irq {
185 unsigned irq;
186 u32 bitmask;
187};
188
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530189/* Structure to save gpmc cs context */
190struct gpmc_cs_config {
191 u32 config1;
192 u32 config2;
193 u32 config3;
194 u32 config4;
195 u32 config5;
196 u32 config6;
197 u32 config7;
198 int is_valid;
199};
200
201/*
202 * Structure to save/restore gpmc context
203 * to support core off on OMAP3
204 */
205struct omap3_gpmc_regs {
206 u32 sysconfig;
207 u32 irqenable;
208 u32 timeout_ctrl;
209 u32 config;
210 u32 prefetch_config1;
211 u32 prefetch_config2;
212 u32 prefetch_control;
213 struct gpmc_cs_config cs_context[GPMC_CS_NUM];
214};
215
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700216static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
217static struct irq_chip gpmc_irq_chip;
Chen Gangaf072192013-08-22 15:47:21 +0800218static int gpmc_irq_start;
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700219
Imre Deakf37e4582006-09-25 12:41:33 +0300220static struct resource gpmc_mem_root;
Tony Lindgren9ed7a772014-11-03 17:45:01 -0800221static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
Thomas Gleixner87b247c2007-05-10 22:33:04 -0700222static DEFINE_SPINLOCK(gpmc_mem_lock);
Jon Hunter6797b4f2013-02-01 10:38:45 -0600223/* Define chip-selects as reserved by default until probe completes */
Gupta Pekonf34f3712013-05-31 17:31:30 +0530224static unsigned int gpmc_cs_num = GPMC_CS_NUM;
Jon Hunter9f833152013-02-20 15:53:38 -0600225static unsigned int gpmc_nr_waitpins;
Afzal Mohammedda496872012-09-23 17:28:25 -0600226static struct device *gpmc_dev;
227static int gpmc_irq;
228static resource_size_t phys_base, mem_size;
229static unsigned gpmc_capability;
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300230static void __iomem *gpmc_base;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700231
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300232static struct clk *gpmc_l3_clk;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700233
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530234static irqreturn_t gpmc_handle_irq(int irq, void *dev);
235
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700236static void gpmc_write_reg(int idx, u32 val)
237{
Victor Kamenskyedfaf052014-04-15 20:37:46 +0300238 writel_relaxed(val, gpmc_base + idx);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700239}
240
241static u32 gpmc_read_reg(int idx)
242{
Victor Kamenskyedfaf052014-04-15 20:37:46 +0300243 return readl_relaxed(gpmc_base + idx);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700244}
245
246void gpmc_cs_write_reg(int cs, int idx, u32 val)
247{
248 void __iomem *reg_addr;
249
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000250 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
Victor Kamenskyedfaf052014-04-15 20:37:46 +0300251 writel_relaxed(val, reg_addr);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700252}
253
Ezequiel Garcia3fc089e2013-02-12 16:22:17 -0300254static u32 gpmc_cs_read_reg(int cs, int idx)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700255{
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300256 void __iomem *reg_addr;
257
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000258 reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
Victor Kamenskyedfaf052014-04-15 20:37:46 +0300259 return readl_relaxed(reg_addr);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700260}
261
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300262/* TODO: Add support for gpmc_fck to clock framework and use it */
Ezequiel Garcia3fc089e2013-02-12 16:22:17 -0300263static unsigned long gpmc_get_fclk_period(void)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700264{
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300265 unsigned long rate = clk_get_rate(gpmc_l3_clk);
266
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300267 rate /= 1000;
268 rate = 1000000000 / rate; /* In picoseconds */
269
270 return rate;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700271}
272
Ezequiel Garcia3fc089e2013-02-12 16:22:17 -0300273static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700274{
275 unsigned long tick_ps;
276
277 /* Calculate in picosecs to yield more exact results */
278 tick_ps = gpmc_get_fclk_period();
279
280 return (time_ns * 1000 + tick_ps - 1) / tick_ps;
281}
282
Ezequiel Garcia3fc089e2013-02-12 16:22:17 -0300283static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
Adrian Huntera3551f52010-12-09 10:48:27 +0200284{
285 unsigned long tick_ps;
286
287 /* Calculate in picosecs to yield more exact results */
288 tick_ps = gpmc_get_fclk_period();
289
290 return (time_ps + tick_ps - 1) / tick_ps;
291}
292
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300293unsigned int gpmc_ticks_to_ns(unsigned int ticks)
294{
295 return ticks * gpmc_get_fclk_period() / 1000;
296}
297
Afzal Mohammed246da262012-08-02 20:02:10 +0530298static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
299{
300 return ticks * gpmc_get_fclk_period();
301}
302
303static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
304{
305 unsigned long ticks = gpmc_ps_to_ticks(time_ps);
306
307 return ticks * gpmc_get_fclk_period();
308}
309
Afzal Mohammed559d94b2012-05-28 17:51:37 +0530310static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
311{
312 u32 l;
313
314 l = gpmc_cs_read_reg(cs, reg);
315 if (value)
316 l |= mask;
317 else
318 l &= ~mask;
319 gpmc_cs_write_reg(cs, reg, l);
320}
321
322static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
323{
324 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
325 GPMC_CONFIG1_TIME_PARA_GRAN,
326 p->time_para_granularity);
327 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
328 GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
329 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
330 GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
331 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
332 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
333 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
334 GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
335 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
336 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
337 p->cycle2cyclesamecsen);
338 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
339 GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
340 p->cycle2cyclediffcsen);
341}
342
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700343#ifdef DEBUG
Tony Lindgren35ac0512014-11-03 17:45:01 -0800344static int get_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
345 bool raw, bool noval, int shift,
346 const char *name)
347{
348 u32 l;
349 int nr_bits, max_value, mask;
350
351 l = gpmc_cs_read_reg(cs, reg);
352 nr_bits = end_bit - st_bit + 1;
353 max_value = (1 << nr_bits) - 1;
354 mask = max_value << st_bit;
355 l = (l & mask) >> st_bit;
356 if (shift)
357 l = (shift << l);
358 if (noval && (l == 0))
359 return 0;
360 if (!raw) {
361 unsigned int time_ns_min, time_ns, time_ns_max;
362
363 time_ns_min = gpmc_ticks_to_ns(l ? l - 1 : 0);
364 time_ns = gpmc_ticks_to_ns(l);
365 time_ns_max = gpmc_ticks_to_ns(l + 1 > max_value ?
366 max_value : l + 1);
367 pr_info("gpmc,%s = <%u> (%u - %u ns, %i ticks)\n",
368 name, time_ns, time_ns_min, time_ns_max, l);
369 } else {
370 pr_info("gpmc,%s = <%u>\n", name, l);
371 }
372
373 return l;
374}
375
376#define GPMC_PRINT_CONFIG(cs, config) \
377 pr_info("cs%i %s: 0x%08x\n", cs, #config, \
378 gpmc_cs_read_reg(cs, config))
379#define GPMC_GET_RAW(reg, st, end, field) \
380 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 0, 0, field)
381#define GPMC_GET_RAW_BOOL(reg, st, end, field) \
382 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, 0, field)
383#define GPMC_GET_RAW_SHIFT(reg, st, end, shift, field) \
384 get_gpmc_timing_reg(cs, (reg), (st), (end), 1, 1, (shift), field)
385#define GPMC_GET_TICKS(reg, st, end, field) \
386 get_gpmc_timing_reg(cs, (reg), (st), (end), 0, 0, 0, field)
387
388static void gpmc_show_regs(int cs, const char *desc)
389{
390 pr_info("gpmc cs%i %s:\n", cs, desc);
391 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1);
392 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2);
393 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3);
394 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4);
395 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5);
396 GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6);
397}
398
399/*
400 * Note that gpmc,wait-pin handing wrongly assumes bit 8 is available,
401 * see commit c9fb809.
402 */
403static void gpmc_cs_show_timings(int cs, const char *desc)
404{
405 gpmc_show_regs(cs, desc);
406
407 pr_info("gpmc cs%i access configuration:\n", cs);
408 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
409 GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
410 GPMC_GET_RAW(GPMC_CS_CONFIG1, 12, 13, "device-width");
411 GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
412 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
413 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
414 GPMC_GET_RAW_SHIFT(GPMC_CS_CONFIG1, 23, 24, 4, "burst-length");
415 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write");
416 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write");
417 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read");
418 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 30, 30, "burst-read");
419 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 31, 31, "burst-wrap");
420
421 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG2, 7, 7, "cs-extra-delay");
422
423 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG3, 7, 7, "adv-extra-delay");
424
425 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 23, 23, "we-extra-delay");
426 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 7, 7, "oe-extra-delay");
427
428 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 7, 7, "cycle2cycle-samecsen");
429 GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 6, 6, "cycle2cycle-diffcsen");
430
431 pr_info("gpmc cs%i timings configuration:\n", cs);
432 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 0, 3, "cs-on-ns");
433 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 8, 12, "cs-rd-off-ns");
434 GPMC_GET_TICKS(GPMC_CS_CONFIG2, 16, 20, "cs-wr-off-ns");
435
436 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns");
437 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns");
438 GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns");
439
440 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns");
441 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns");
442 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns");
443 GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns");
444
445 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 0, 4, "rd-cycle-ns");
446 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 8, 12, "wr-cycle-ns");
447 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 16, 20, "access-ns");
448
449 GPMC_GET_TICKS(GPMC_CS_CONFIG5, 24, 27, "page-burst-access-ns");
450
451 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns");
452 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns");
453
454 GPMC_GET_TICKS(GPMC_CS_CONFIG1, 18, 19, "wait-monitoring-ns");
455 GPMC_GET_TICKS(GPMC_CS_CONFIG1, 25, 26, "clk-activation-ns");
456
457 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns");
458 GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns");
459}
460#else
461static inline void gpmc_cs_show_timings(int cs, const char *desc)
462{
463}
464#endif
465
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700466static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
Juha Yrjola2aab6462006-06-26 16:16:21 -0700467 int time, const char *name)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700468{
469 u32 l;
470 int ticks, mask, nr_bits;
471
472 if (time == 0)
473 ticks = 0;
474 else
475 ticks = gpmc_ns_to_ticks(time);
476 nr_bits = end_bit - st_bit + 1;
Roger Quadros80323742014-08-29 19:11:50 +0300477 mask = (1 << nr_bits) - 1;
478
479 if (ticks > mask) {
480 pr_err("%s: GPMC error! CS%d: %s: %d ns, %d ticks > %d\n",
481 __func__, cs, name, time, ticks, mask);
482
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700483 return -1;
David Brownell1c22cc12006-12-06 17:13:55 -0800484 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700485
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700486 l = gpmc_cs_read_reg(cs, reg);
487#ifdef DEBUG
David Brownell1c22cc12006-12-06 17:13:55 -0800488 printk(KERN_INFO
489 "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
Juha Yrjola2aab6462006-06-26 16:16:21 -0700490 cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
David Brownell1c22cc12006-12-06 17:13:55 -0800491 (l >> st_bit) & mask, time);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700492#endif
493 l &= ~(mask << st_bit);
494 l |= ticks << st_bit;
495 gpmc_cs_write_reg(cs, reg, l);
496
497 return 0;
498}
499
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700500#define GPMC_SET_ONE(reg, st, end, field) \
501 if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
502 t->field, #field) < 0) \
503 return -1
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700504
Afzal Mohammed1b47ca12012-08-19 18:29:45 +0530505int gpmc_calc_divider(unsigned int sync_clk)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700506{
507 int div;
508 u32 l;
509
Adrian Huntera3551f52010-12-09 10:48:27 +0200510 l = sync_clk + (gpmc_get_fclk_period() - 1);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700511 div = l / gpmc_get_fclk_period();
512 if (div > 4)
513 return -1;
David Brownell1c22cc12006-12-06 17:13:55 -0800514 if (div <= 0)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700515 div = 1;
516
517 return div;
518}
519
520int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
521{
522 int div;
523 u32 l;
524
Tony Lindgren35ac0512014-11-03 17:45:01 -0800525 gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings");
Afzal Mohammed1b47ca12012-08-19 18:29:45 +0530526 div = gpmc_calc_divider(t->sync_clk);
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700527 if (div < 0)
Paul Walmsleya032d332012-08-03 09:21:10 -0600528 return div;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700529
530 GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
531 GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
532 GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
533
534 GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
535 GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
536 GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
537
538 GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
539 GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
540 GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
541 GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
542
543 GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
544 GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
545 GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
546
547 GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
548
Afzal Mohammed559d94b2012-05-28 17:51:37 +0530549 GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
550 GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
551
552 GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring);
553 GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation);
554
Afzal Mohammedda496872012-09-23 17:28:25 -0600555 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
Syed Mohammed, Khasimcc26b3b2008-10-09 17:51:41 +0300556 GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
Afzal Mohammedda496872012-09-23 17:28:25 -0600557 if (gpmc_capability & GPMC_HAS_WR_ACCESS)
Syed Mohammed, Khasimcc26b3b2008-10-09 17:51:41 +0300558 GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
Syed Mohammed, Khasimcc26b3b2008-10-09 17:51:41 +0300559
David Brownell1c22cc12006-12-06 17:13:55 -0800560 /* caller is expected to have initialized CONFIG1 to cover
561 * at least sync vs async
562 */
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700563 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
David Brownell1c22cc12006-12-06 17:13:55 -0800564 if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
565#ifdef DEBUG
566 printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
567 cs, (div * gpmc_get_fclk_period()) / 1000, div);
568#endif
569 l &= ~0x03;
570 l |= (div - 1);
571 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
572 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700573
Afzal Mohammed559d94b2012-05-28 17:51:37 +0530574 gpmc_cs_bool_timings(cs, &t->bool_timings);
Tony Lindgren35ac0512014-11-03 17:45:01 -0800575 gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings");
Afzal Mohammed559d94b2012-05-28 17:51:37 +0530576
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700577 return 0;
578}
579
Roger Quadros4cf27d22014-08-29 19:11:53 +0300580static int gpmc_cs_set_memconf(int cs, u32 base, u32 size)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -0700581{
Imre Deakf37e4582006-09-25 12:41:33 +0300582 u32 l;
583 u32 mask;
584
Jon Hunterc71f8e92013-03-06 12:00:10 -0600585 /*
586 * Ensure that base address is aligned on a
587 * boundary equal to or greater than size.
588 */
589 if (base & (size - 1))
590 return -EINVAL;
591
Imre Deakf37e4582006-09-25 12:41:33 +0300592 mask = (1 << GPMC_SECTION_SHIFT) - size;
593 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
594 l &= ~0x3f;
595 l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
596 l &= ~(0x0f << 8);
597 l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530598 l |= GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300599 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
Jon Hunterc71f8e92013-03-06 12:00:10 -0600600
601 return 0;
Imre Deakf37e4582006-09-25 12:41:33 +0300602}
603
Roger Quadros4cf27d22014-08-29 19:11:53 +0300604static void gpmc_cs_enable_mem(int cs)
605{
606 u32 l;
607
608 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
609 l |= GPMC_CONFIG7_CSVALID;
610 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
611}
612
Imre Deakf37e4582006-09-25 12:41:33 +0300613static void gpmc_cs_disable_mem(int cs)
614{
615 u32 l;
616
617 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530618 l &= ~GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300619 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
620}
621
622static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
623{
624 u32 l;
625 u32 mask;
626
627 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
628 *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
629 mask = (l >> 8) & 0x0f;
630 *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
631}
632
633static int gpmc_cs_mem_enabled(int cs)
634{
635 u32 l;
636
637 l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +0530638 return l & GPMC_CONFIG7_CSVALID;
Imre Deakf37e4582006-09-25 12:41:33 +0300639}
640
Ezequiel Garciaf5d8eda2013-02-12 16:22:24 -0300641static void gpmc_cs_set_reserved(int cs, int reserved)
Imre Deakf37e4582006-09-25 12:41:33 +0300642{
Tony Lindgren9ed7a772014-11-03 17:45:01 -0800643 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
644
645 gpmc->flags |= GPMC_CS_RESERVED;
Imre Deakf37e4582006-09-25 12:41:33 +0300646}
647
Ezequiel Garciaae9d9082013-02-12 16:22:19 -0300648static bool gpmc_cs_reserved(int cs)
Imre Deakf37e4582006-09-25 12:41:33 +0300649{
Tony Lindgren9ed7a772014-11-03 17:45:01 -0800650 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
651
652 return gpmc->flags & GPMC_CS_RESERVED;
653}
654
655static void gpmc_cs_set_name(int cs, const char *name)
656{
657 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
658
659 gpmc->name = name;
660}
661
662const char *gpmc_cs_get_name(int cs)
663{
664 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
665
666 return gpmc->name;
Imre Deakf37e4582006-09-25 12:41:33 +0300667}
668
669static unsigned long gpmc_mem_align(unsigned long size)
670{
671 int order;
672
673 size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
674 order = GPMC_CHUNK_SHIFT - 1;
675 do {
676 size >>= 1;
677 order++;
678 } while (size);
679 size = 1 << order;
680 return size;
681}
682
683static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
684{
Tony Lindgren9ed7a772014-11-03 17:45:01 -0800685 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
686 struct resource *res = &gpmc->mem;
Imre Deakf37e4582006-09-25 12:41:33 +0300687 int r;
688
689 size = gpmc_mem_align(size);
690 spin_lock(&gpmc_mem_lock);
691 res->start = base;
692 res->end = base + size - 1;
693 r = request_resource(&gpmc_mem_root, res);
694 spin_unlock(&gpmc_mem_lock);
695
696 return r;
697}
698
Afzal Mohammedda496872012-09-23 17:28:25 -0600699static int gpmc_cs_delete_mem(int cs)
700{
Tony Lindgren9ed7a772014-11-03 17:45:01 -0800701 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
702 struct resource *res = &gpmc->mem;
Afzal Mohammedda496872012-09-23 17:28:25 -0600703 int r;
704
705 spin_lock(&gpmc_mem_lock);
Tony Lindgrenefe80722014-04-21 19:26:13 -0700706 r = release_resource(res);
Afzal Mohammedda496872012-09-23 17:28:25 -0600707 res->start = 0;
708 res->end = 0;
709 spin_unlock(&gpmc_mem_lock);
710
711 return r;
712}
713
Jon Huntercdd69282013-02-08 16:46:13 -0600714/**
715 * gpmc_cs_remap - remaps a chip-select physical base address
716 * @cs: chip-select to remap
717 * @base: physical base address to re-map chip-select to
718 *
719 * Re-maps a chip-select to a new physical base address specified by
720 * "base". Returns 0 on success and appropriate negative error code
721 * on failure.
722 */
723static int gpmc_cs_remap(int cs, u32 base)
724{
725 int ret;
726 u32 old_base, size;
727
Gupta Pekonf34f3712013-05-31 17:31:30 +0530728 if (cs > gpmc_cs_num) {
729 pr_err("%s: requested chip-select is disabled\n", __func__);
Jon Huntercdd69282013-02-08 16:46:13 -0600730 return -ENODEV;
Gupta Pekonf34f3712013-05-31 17:31:30 +0530731 }
Tony Lindgrenfb677ef2014-04-21 19:26:13 -0700732
733 /*
734 * Make sure we ignore any device offsets from the GPMC partition
735 * allocated for the chip select and that the new base confirms
736 * to the GPMC 16MB minimum granularity.
737 */
738 base &= ~(SZ_16M - 1);
739
Jon Huntercdd69282013-02-08 16:46:13 -0600740 gpmc_cs_get_memconf(cs, &old_base, &size);
741 if (base == old_base)
742 return 0;
Roger Quadros4cf27d22014-08-29 19:11:53 +0300743
Jon Huntercdd69282013-02-08 16:46:13 -0600744 ret = gpmc_cs_delete_mem(cs);
745 if (ret < 0)
746 return ret;
Roger Quadros4cf27d22014-08-29 19:11:53 +0300747
Jon Huntercdd69282013-02-08 16:46:13 -0600748 ret = gpmc_cs_insert_mem(cs, base, size);
749 if (ret < 0)
750 return ret;
Jon Huntercdd69282013-02-08 16:46:13 -0600751
Roger Quadros4cf27d22014-08-29 19:11:53 +0300752 ret = gpmc_cs_set_memconf(cs, base, size);
753
754 return ret;
Jon Huntercdd69282013-02-08 16:46:13 -0600755}
756
Imre Deakf37e4582006-09-25 12:41:33 +0300757int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
758{
Tony Lindgren9ed7a772014-11-03 17:45:01 -0800759 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
760 struct resource *res = &gpmc->mem;
Imre Deakf37e4582006-09-25 12:41:33 +0300761 int r = -1;
762
Gupta Pekonf34f3712013-05-31 17:31:30 +0530763 if (cs > gpmc_cs_num) {
764 pr_err("%s: requested chip-select is disabled\n", __func__);
Imre Deakf37e4582006-09-25 12:41:33 +0300765 return -ENODEV;
Gupta Pekonf34f3712013-05-31 17:31:30 +0530766 }
Imre Deakf37e4582006-09-25 12:41:33 +0300767 size = gpmc_mem_align(size);
768 if (size > (1 << GPMC_SECTION_SHIFT))
769 return -ENOMEM;
770
771 spin_lock(&gpmc_mem_lock);
772 if (gpmc_cs_reserved(cs)) {
773 r = -EBUSY;
774 goto out;
775 }
776 if (gpmc_cs_mem_enabled(cs))
777 r = adjust_resource(res, res->start & ~(size - 1), size);
778 if (r < 0)
779 r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
780 size, NULL, NULL);
781 if (r < 0)
782 goto out;
783
Roger Quadros4cf27d22014-08-29 19:11:53 +0300784 /* Disable CS while changing base address and size mask */
785 gpmc_cs_disable_mem(cs);
786
787 r = gpmc_cs_set_memconf(cs, res->start, resource_size(res));
Jon Hunterc71f8e92013-03-06 12:00:10 -0600788 if (r < 0) {
789 release_resource(res);
790 goto out;
791 }
792
Roger Quadros4cf27d22014-08-29 19:11:53 +0300793 /* Enable CS */
794 gpmc_cs_enable_mem(cs);
Imre Deakf37e4582006-09-25 12:41:33 +0300795 *base = res->start;
796 gpmc_cs_set_reserved(cs, 1);
797out:
798 spin_unlock(&gpmc_mem_lock);
799 return r;
800}
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300801EXPORT_SYMBOL(gpmc_cs_request);
Imre Deakf37e4582006-09-25 12:41:33 +0300802
803void gpmc_cs_free(int cs)
804{
Tony Lindgren9ed7a772014-11-03 17:45:01 -0800805 struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
806 struct resource *res = &gpmc->mem;
Tony Lindgrenefe80722014-04-21 19:26:13 -0700807
Imre Deakf37e4582006-09-25 12:41:33 +0300808 spin_lock(&gpmc_mem_lock);
Gupta Pekonf34f3712013-05-31 17:31:30 +0530809 if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
Imre Deakf37e4582006-09-25 12:41:33 +0300810 printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
811 BUG();
812 spin_unlock(&gpmc_mem_lock);
813 return;
814 }
815 gpmc_cs_disable_mem(cs);
Tony Lindgrenefe80722014-04-21 19:26:13 -0700816 if (res->flags)
817 release_resource(res);
Imre Deakf37e4582006-09-25 12:41:33 +0300818 gpmc_cs_set_reserved(cs, 0);
819 spin_unlock(&gpmc_mem_lock);
820}
Paul Walmsleyfd1dc872008-10-06 15:49:17 +0300821EXPORT_SYMBOL(gpmc_cs_free);
Imre Deakf37e4582006-09-25 12:41:33 +0300822
vimal singh59e9c5a2009-07-13 16:26:24 +0530823/**
Jon Hunter3a544352013-02-21 13:00:21 -0600824 * gpmc_configure - write request to configure gpmc
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000825 * @cmd: command type
826 * @wval: value to write
827 * @return status of the operation
828 */
Jon Hunter3a544352013-02-21 13:00:21 -0600829int gpmc_configure(int cmd, int wval)
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000830{
Jon Hunter3a544352013-02-21 13:00:21 -0600831 u32 regval;
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000832
833 switch (cmd) {
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +0530834 case GPMC_ENABLE_IRQ:
835 gpmc_write_reg(GPMC_IRQENABLE, wval);
836 break;
837
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000838 case GPMC_SET_IRQ_STATUS:
839 gpmc_write_reg(GPMC_IRQSTATUS, wval);
840 break;
841
842 case GPMC_CONFIG_WP:
843 regval = gpmc_read_reg(GPMC_CONFIG);
844 if (wval)
845 regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
846 else
847 regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
848 gpmc_write_reg(GPMC_CONFIG, regval);
849 break;
850
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000851 default:
Jon Hunter3a544352013-02-21 13:00:21 -0600852 pr_err("%s: command not supported\n", __func__);
853 return -EINVAL;
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000854 }
855
Jon Hunter3a544352013-02-21 13:00:21 -0600856 return 0;
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000857}
Jon Hunter3a544352013-02-21 13:00:21 -0600858EXPORT_SYMBOL(gpmc_configure);
Sukumar Ghorai948d38e2010-07-09 09:14:44 +0000859
Afzal Mohammed52bd1382012-08-30 12:53:22 -0700860void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
861{
Afzal Mohammed2fdf0c92012-10-04 15:49:04 +0530862 int i;
863
Afzal Mohammed52bd1382012-08-30 12:53:22 -0700864 reg->gpmc_status = gpmc_base + GPMC_STATUS;
865 reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
866 GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
867 reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
868 GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
869 reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
870 GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
871 reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
872 reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
873 reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
874 reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
875 reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
876 reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
877 reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
878 reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
Afzal Mohammed2fdf0c92012-10-04 15:49:04 +0530879
880 for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) {
881 reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 +
882 GPMC_BCH_SIZE * i;
883 reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 +
884 GPMC_BCH_SIZE * i;
885 reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 +
886 GPMC_BCH_SIZE * i;
887 reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 +
888 GPMC_BCH_SIZE * i;
pekon gupta27c9fd62014-05-19 13:24:39 +0530889 reg->gpmc_bch_result4[i] = gpmc_base + GPMC_ECC_BCH_RESULT_4 +
890 i * GPMC_BCH_SIZE;
891 reg->gpmc_bch_result5[i] = gpmc_base + GPMC_ECC_BCH_RESULT_5 +
892 i * GPMC_BCH_SIZE;
893 reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 +
894 i * GPMC_BCH_SIZE;
Afzal Mohammed2fdf0c92012-10-04 15:49:04 +0530895 }
Afzal Mohammed52bd1382012-08-30 12:53:22 -0700896}
897
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700898int gpmc_get_client_irq(unsigned irq_config)
899{
900 int i;
901
902 if (hweight32(irq_config) > 1)
903 return 0;
904
905 for (i = 0; i < GPMC_NR_IRQ; i++)
906 if (gpmc_client_irq[i].bitmask & irq_config)
907 return gpmc_client_irq[i].irq;
908
909 return 0;
910}
911
912static int gpmc_irq_endis(unsigned irq, bool endis)
913{
914 int i;
915 u32 regval;
916
917 for (i = 0; i < GPMC_NR_IRQ; i++)
918 if (irq == gpmc_client_irq[i].irq) {
919 regval = gpmc_read_reg(GPMC_IRQENABLE);
920 if (endis)
921 regval |= gpmc_client_irq[i].bitmask;
922 else
923 regval &= ~gpmc_client_irq[i].bitmask;
924 gpmc_write_reg(GPMC_IRQENABLE, regval);
925 break;
926 }
927
928 return 0;
929}
930
931static void gpmc_irq_disable(struct irq_data *p)
932{
933 gpmc_irq_endis(p->irq, false);
934}
935
936static void gpmc_irq_enable(struct irq_data *p)
937{
938 gpmc_irq_endis(p->irq, true);
939}
940
941static void gpmc_irq_noop(struct irq_data *data) { }
942
943static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
944
Afzal Mohammedda496872012-09-23 17:28:25 -0600945static int gpmc_setup_irq(void)
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700946{
947 int i;
948 u32 regval;
949
950 if (!gpmc_irq)
951 return -EINVAL;
952
953 gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
Russell King71856842013-03-13 20:44:21 +0000954 if (gpmc_irq_start < 0) {
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -0700955 pr_err("irq_alloc_descs failed\n");
956 return gpmc_irq_start;
957 }
958
959 gpmc_irq_chip.name = "gpmc";
960 gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
961 gpmc_irq_chip.irq_enable = gpmc_irq_enable;
962 gpmc_irq_chip.irq_disable = gpmc_irq_disable;
963 gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
964 gpmc_irq_chip.irq_ack = gpmc_irq_noop;
965 gpmc_irq_chip.irq_mask = gpmc_irq_noop;
966 gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
967
968 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
969 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
970
971 for (i = 0; i < GPMC_NR_IRQ; i++) {
972 gpmc_client_irq[i].irq = gpmc_irq_start + i;
973 irq_set_chip_and_handler(gpmc_client_irq[i].irq,
974 &gpmc_irq_chip, handle_simple_irq);
975 set_irq_flags(gpmc_client_irq[i].irq,
976 IRQF_VALID | IRQF_NOAUTOEN);
977 }
978
979 /* Disable interrupts */
980 gpmc_write_reg(GPMC_IRQENABLE, 0);
981
982 /* clear interrupts */
983 regval = gpmc_read_reg(GPMC_IRQSTATUS);
984 gpmc_write_reg(GPMC_IRQSTATUS, regval);
985
986 return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
987}
988
Greg Kroah-Hartman351a1022012-12-21 14:02:24 -0800989static int gpmc_free_irq(void)
Afzal Mohammedda496872012-09-23 17:28:25 -0600990{
991 int i;
992
993 if (gpmc_irq)
994 free_irq(gpmc_irq, NULL);
995
996 for (i = 0; i < GPMC_NR_IRQ; i++) {
997 irq_set_handler(gpmc_client_irq[i].irq, NULL);
998 irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
999 irq_modify_status(gpmc_client_irq[i].irq, 0, 0);
1000 }
1001
1002 irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
1003
1004 return 0;
1005}
1006
Greg Kroah-Hartman351a1022012-12-21 14:02:24 -08001007static void gpmc_mem_exit(void)
Afzal Mohammedda496872012-09-23 17:28:25 -06001008{
1009 int cs;
1010
Gupta Pekonf34f3712013-05-31 17:31:30 +05301011 for (cs = 0; cs < gpmc_cs_num; cs++) {
Afzal Mohammedda496872012-09-23 17:28:25 -06001012 if (!gpmc_cs_mem_enabled(cs))
1013 continue;
1014 gpmc_cs_delete_mem(cs);
1015 }
1016
1017}
1018
Jon Hunter84b00f02013-03-06 14:36:47 -06001019static void gpmc_mem_init(void)
Imre Deakf37e4582006-09-25 12:41:33 +03001020{
Jon Hunter84b00f02013-03-06 14:36:47 -06001021 int cs;
Imre Deakf37e4582006-09-25 12:41:33 +03001022
Jon Hunterbf234392013-03-06 14:12:59 -06001023 /*
1024 * The first 1MB of GPMC address space is typically mapped to
1025 * the internal ROM. Never allocate the first page, to
1026 * facilitate bug detection; even if we didn't boot from ROM.
Kyungmin Park7f245162006-12-29 16:48:51 -08001027 */
Jon Hunterbf234392013-03-06 14:12:59 -06001028 gpmc_mem_root.start = SZ_1M;
Imre Deakf37e4582006-09-25 12:41:33 +03001029 gpmc_mem_root.end = GPMC_MEM_END;
1030
1031 /* Reserve all regions that has been set up by bootloader */
Gupta Pekonf34f3712013-05-31 17:31:30 +05301032 for (cs = 0; cs < gpmc_cs_num; cs++) {
Imre Deakf37e4582006-09-25 12:41:33 +03001033 u32 base, size;
1034
1035 if (!gpmc_cs_mem_enabled(cs))
1036 continue;
1037 gpmc_cs_get_memconf(cs, &base, &size);
Jon Hunter84b00f02013-03-06 14:36:47 -06001038 if (gpmc_cs_insert_mem(cs, base, size)) {
1039 pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n",
1040 __func__, cs, base, base + size);
1041 gpmc_cs_disable_mem(cs);
Jon Hunter81190242012-10-17 09:41:25 -05001042 }
Imre Deakf37e4582006-09-25 12:41:33 +03001043 }
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07001044}
1045
Afzal Mohammed246da262012-08-02 20:02:10 +05301046static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
1047{
1048 u32 temp;
1049 int div;
1050
1051 div = gpmc_calc_divider(sync_clk);
1052 temp = gpmc_ps_to_ticks(time_ps);
1053 temp = (temp + div - 1) / div;
1054 return gpmc_ticks_to_ps(temp * div);
1055}
1056
1057/* XXX: can the cycles be avoided ? */
1058static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
Jon Hunterc3be5b42013-02-21 13:46:22 -06001059 struct gpmc_device_timings *dev_t,
1060 bool mux)
Afzal Mohammed246da262012-08-02 20:02:10 +05301061{
Afzal Mohammed246da262012-08-02 20:02:10 +05301062 u32 temp;
1063
1064 /* adv_rd_off */
1065 temp = dev_t->t_avdp_r;
1066 /* XXX: mux check required ? */
1067 if (mux) {
1068 /* XXX: t_avdp not to be required for sync, only added for tusb
1069 * this indirectly necessitates requirement of t_avdp_r and
1070 * t_avdp_w instead of having a single t_avdp
1071 */
1072 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
1073 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1074 }
1075 gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
1076
1077 /* oe_on */
1078 temp = dev_t->t_oeasu; /* XXX: remove this ? */
1079 if (mux) {
1080 temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
1081 temp = max_t(u32, temp, gpmc_t->adv_rd_off +
1082 gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
1083 }
1084 gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
1085
1086 /* access */
1087 /* XXX: any scope for improvement ?, by combining oe_on
1088 * and clk_activation, need to check whether
1089 * access = clk_activation + round to sync clk ?
1090 */
1091 temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
1092 temp += gpmc_t->clk_activation;
1093 if (dev_t->cyc_oe)
1094 temp = max_t(u32, temp, gpmc_t->oe_on +
1095 gpmc_ticks_to_ps(dev_t->cyc_oe));
1096 gpmc_t->access = gpmc_round_ps_to_ticks(temp);
1097
1098 gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
1099 gpmc_t->cs_rd_off = gpmc_t->oe_off;
1100
1101 /* rd_cycle */
1102 temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
1103 temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
1104 gpmc_t->access;
1105 /* XXX: barter t_ce_rdyz with t_cez_r ? */
1106 if (dev_t->t_ce_rdyz)
1107 temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
1108 gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
1109
1110 return 0;
1111}
1112
1113static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
Jon Hunterc3be5b42013-02-21 13:46:22 -06001114 struct gpmc_device_timings *dev_t,
1115 bool mux)
Afzal Mohammed246da262012-08-02 20:02:10 +05301116{
Afzal Mohammed246da262012-08-02 20:02:10 +05301117 u32 temp;
1118
1119 /* adv_wr_off */
1120 temp = dev_t->t_avdp_w;
1121 if (mux) {
1122 temp = max_t(u32, temp,
1123 gpmc_t->clk_activation + dev_t->t_avdh);
1124 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1125 }
1126 gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
1127
1128 /* wr_data_mux_bus */
1129 temp = max_t(u32, dev_t->t_weasu,
1130 gpmc_t->clk_activation + dev_t->t_rdyo);
1131 /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
1132 * and in that case remember to handle we_on properly
1133 */
1134 if (mux) {
1135 temp = max_t(u32, temp,
1136 gpmc_t->adv_wr_off + dev_t->t_aavdh);
1137 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
1138 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
1139 }
1140 gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
1141
1142 /* we_on */
1143 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
1144 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
1145 else
1146 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
1147
1148 /* wr_access */
1149 /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
1150 gpmc_t->wr_access = gpmc_t->access;
1151
1152 /* we_off */
1153 temp = gpmc_t->we_on + dev_t->t_wpl;
1154 temp = max_t(u32, temp,
1155 gpmc_t->wr_access + gpmc_ticks_to_ps(1));
1156 temp = max_t(u32, temp,
1157 gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
1158 gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
1159
1160 gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
1161 dev_t->t_wph);
1162
1163 /* wr_cycle */
1164 temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
1165 temp += gpmc_t->wr_access;
1166 /* XXX: barter t_ce_rdyz with t_cez_w ? */
1167 if (dev_t->t_ce_rdyz)
1168 temp = max_t(u32, temp,
1169 gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
1170 gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
1171
1172 return 0;
1173}
1174
1175static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
Jon Hunterc3be5b42013-02-21 13:46:22 -06001176 struct gpmc_device_timings *dev_t,
1177 bool mux)
Afzal Mohammed246da262012-08-02 20:02:10 +05301178{
Afzal Mohammed246da262012-08-02 20:02:10 +05301179 u32 temp;
1180
1181 /* adv_rd_off */
1182 temp = dev_t->t_avdp_r;
1183 if (mux)
1184 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1185 gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
1186
1187 /* oe_on */
1188 temp = dev_t->t_oeasu;
1189 if (mux)
1190 temp = max_t(u32, temp,
1191 gpmc_t->adv_rd_off + dev_t->t_aavdh);
1192 gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
1193
1194 /* access */
1195 temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
1196 gpmc_t->oe_on + dev_t->t_oe);
1197 temp = max_t(u32, temp,
1198 gpmc_t->cs_on + dev_t->t_ce);
1199 temp = max_t(u32, temp,
1200 gpmc_t->adv_on + dev_t->t_aa);
1201 gpmc_t->access = gpmc_round_ps_to_ticks(temp);
1202
1203 gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
1204 gpmc_t->cs_rd_off = gpmc_t->oe_off;
1205
1206 /* rd_cycle */
1207 temp = max_t(u32, dev_t->t_rd_cycle,
1208 gpmc_t->cs_rd_off + dev_t->t_cez_r);
1209 temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
1210 gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
1211
1212 return 0;
1213}
1214
1215static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
Jon Hunterc3be5b42013-02-21 13:46:22 -06001216 struct gpmc_device_timings *dev_t,
1217 bool mux)
Afzal Mohammed246da262012-08-02 20:02:10 +05301218{
Afzal Mohammed246da262012-08-02 20:02:10 +05301219 u32 temp;
1220
1221 /* adv_wr_off */
1222 temp = dev_t->t_avdp_w;
1223 if (mux)
1224 temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
1225 gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
1226
1227 /* wr_data_mux_bus */
1228 temp = dev_t->t_weasu;
1229 if (mux) {
1230 temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
1231 temp = max_t(u32, temp, gpmc_t->adv_wr_off +
1232 gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
1233 }
1234 gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
1235
1236 /* we_on */
1237 if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
1238 gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
1239 else
1240 gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
1241
1242 /* we_off */
1243 temp = gpmc_t->we_on + dev_t->t_wpl;
1244 gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
1245
1246 gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
1247 dev_t->t_wph);
1248
1249 /* wr_cycle */
1250 temp = max_t(u32, dev_t->t_wr_cycle,
1251 gpmc_t->cs_wr_off + dev_t->t_cez_w);
1252 gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
1253
1254 return 0;
1255}
1256
1257static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
1258 struct gpmc_device_timings *dev_t)
1259{
1260 u32 temp;
1261
1262 gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
1263 gpmc_get_fclk_period();
1264
1265 gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
1266 dev_t->t_bacc,
1267 gpmc_t->sync_clk);
1268
1269 temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
1270 gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
1271
1272 if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
1273 return 0;
1274
1275 if (dev_t->ce_xdelay)
1276 gpmc_t->bool_timings.cs_extra_delay = true;
1277 if (dev_t->avd_xdelay)
1278 gpmc_t->bool_timings.adv_extra_delay = true;
1279 if (dev_t->oe_xdelay)
1280 gpmc_t->bool_timings.oe_extra_delay = true;
1281 if (dev_t->we_xdelay)
1282 gpmc_t->bool_timings.we_extra_delay = true;
1283
1284 return 0;
1285}
1286
1287static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
Jon Hunterc3be5b42013-02-21 13:46:22 -06001288 struct gpmc_device_timings *dev_t,
1289 bool sync)
Afzal Mohammed246da262012-08-02 20:02:10 +05301290{
1291 u32 temp;
1292
1293 /* cs_on */
1294 gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
1295
1296 /* adv_on */
1297 temp = dev_t->t_avdasu;
1298 if (dev_t->t_ce_avd)
1299 temp = max_t(u32, temp,
1300 gpmc_t->cs_on + dev_t->t_ce_avd);
1301 gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
1302
Jon Hunterc3be5b42013-02-21 13:46:22 -06001303 if (sync)
Afzal Mohammed246da262012-08-02 20:02:10 +05301304 gpmc_calc_sync_common_timings(gpmc_t, dev_t);
1305
1306 return 0;
1307}
1308
1309/* TODO: remove this function once all peripherals are confirmed to
1310 * work with generic timing. Simultaneously gpmc_cs_set_timings()
1311 * has to be modified to handle timings in ps instead of ns
1312*/
1313static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
1314{
1315 t->cs_on /= 1000;
1316 t->cs_rd_off /= 1000;
1317 t->cs_wr_off /= 1000;
1318 t->adv_on /= 1000;
1319 t->adv_rd_off /= 1000;
1320 t->adv_wr_off /= 1000;
1321 t->we_on /= 1000;
1322 t->we_off /= 1000;
1323 t->oe_on /= 1000;
1324 t->oe_off /= 1000;
1325 t->page_burst_access /= 1000;
1326 t->access /= 1000;
1327 t->rd_cycle /= 1000;
1328 t->wr_cycle /= 1000;
1329 t->bus_turnaround /= 1000;
1330 t->cycle2cycle_delay /= 1000;
1331 t->wait_monitoring /= 1000;
1332 t->clk_activation /= 1000;
1333 t->wr_access /= 1000;
1334 t->wr_data_mux_bus /= 1000;
1335}
1336
1337int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
Jon Hunterc3be5b42013-02-21 13:46:22 -06001338 struct gpmc_settings *gpmc_s,
1339 struct gpmc_device_timings *dev_t)
Afzal Mohammed246da262012-08-02 20:02:10 +05301340{
Jon Hunterc3be5b42013-02-21 13:46:22 -06001341 bool mux = false, sync = false;
1342
1343 if (gpmc_s) {
1344 mux = gpmc_s->mux_add_data ? true : false;
1345 sync = (gpmc_s->sync_read || gpmc_s->sync_write);
1346 }
1347
Afzal Mohammed246da262012-08-02 20:02:10 +05301348 memset(gpmc_t, 0, sizeof(*gpmc_t));
1349
Jon Hunterc3be5b42013-02-21 13:46:22 -06001350 gpmc_calc_common_timings(gpmc_t, dev_t, sync);
Afzal Mohammed246da262012-08-02 20:02:10 +05301351
Jon Hunterc3be5b42013-02-21 13:46:22 -06001352 if (gpmc_s && gpmc_s->sync_read)
1353 gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux);
Afzal Mohammed246da262012-08-02 20:02:10 +05301354 else
Jon Hunterc3be5b42013-02-21 13:46:22 -06001355 gpmc_calc_async_read_timings(gpmc_t, dev_t, mux);
Afzal Mohammed246da262012-08-02 20:02:10 +05301356
Jon Hunterc3be5b42013-02-21 13:46:22 -06001357 if (gpmc_s && gpmc_s->sync_write)
1358 gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux);
Afzal Mohammed246da262012-08-02 20:02:10 +05301359 else
Jon Hunterc3be5b42013-02-21 13:46:22 -06001360 gpmc_calc_async_write_timings(gpmc_t, dev_t, mux);
Afzal Mohammed246da262012-08-02 20:02:10 +05301361
1362 /* TODO: remove, see function definition */
1363 gpmc_convert_ps_to_ns(gpmc_t);
1364
1365 return 0;
1366}
1367
Jon Hunteraa8d4762013-02-21 15:25:23 -06001368/**
1369 * gpmc_cs_program_settings - programs non-timing related settings
1370 * @cs: GPMC chip-select to program
1371 * @p: pointer to GPMC settings structure
1372 *
1373 * Programs non-timing related settings for a GPMC chip-select, such as
1374 * bus-width, burst configuration, etc. Function should be called once
1375 * for each chip-select that is being used and must be called before
1376 * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1
1377 * register will be initialised to zero by this function. Returns 0 on
1378 * success and appropriate negative error code on failure.
1379 */
1380int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
1381{
1382 u32 config1;
1383
1384 if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) {
1385 pr_err("%s: invalid width %d!", __func__, p->device_width);
1386 return -EINVAL;
1387 }
1388
1389 /* Address-data multiplexing not supported for NAND devices */
1390 if (p->device_nand && p->mux_add_data) {
1391 pr_err("%s: invalid configuration!\n", __func__);
1392 return -EINVAL;
1393 }
1394
1395 if ((p->mux_add_data > GPMC_MUX_AD) ||
1396 ((p->mux_add_data == GPMC_MUX_AAD) &&
1397 !(gpmc_capability & GPMC_HAS_MUX_AAD))) {
1398 pr_err("%s: invalid multiplex configuration!\n", __func__);
1399 return -EINVAL;
1400 }
1401
1402 /* Page/burst mode supports lengths of 4, 8 and 16 bytes */
1403 if (p->burst_read || p->burst_write) {
1404 switch (p->burst_len) {
1405 case GPMC_BURST_4:
1406 case GPMC_BURST_8:
1407 case GPMC_BURST_16:
1408 break;
1409 default:
1410 pr_err("%s: invalid page/burst-length (%d)\n",
1411 __func__, p->burst_len);
1412 return -EINVAL;
1413 }
1414 }
1415
Roger Quadros2b540572014-09-02 16:57:06 +03001416 if (p->wait_pin > gpmc_nr_waitpins) {
Jon Hunteraa8d4762013-02-21 15:25:23 -06001417 pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
1418 return -EINVAL;
1419 }
1420
1421 config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1));
1422
1423 if (p->sync_read)
1424 config1 |= GPMC_CONFIG1_READTYPE_SYNC;
1425 if (p->sync_write)
1426 config1 |= GPMC_CONFIG1_WRITETYPE_SYNC;
1427 if (p->wait_on_read)
1428 config1 |= GPMC_CONFIG1_WAIT_READ_MON;
1429 if (p->wait_on_write)
1430 config1 |= GPMC_CONFIG1_WAIT_WRITE_MON;
1431 if (p->wait_on_read || p->wait_on_write)
1432 config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin);
1433 if (p->device_nand)
1434 config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND);
1435 if (p->mux_add_data)
1436 config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data);
1437 if (p->burst_read)
1438 config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP;
1439 if (p->burst_write)
1440 config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP;
1441 if (p->burst_read || p->burst_write) {
1442 config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3);
1443 config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0;
1444 }
1445
1446 gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1);
1447
1448 return 0;
1449}
1450
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001451#ifdef CONFIG_OF
Uwe Kleine-König31957602014-09-10 10:26:17 +02001452static const struct of_device_id gpmc_dt_ids[] = {
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001453 { .compatible = "ti,omap2420-gpmc" },
1454 { .compatible = "ti,omap2430-gpmc" },
1455 { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
1456 { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
1457 { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
1458 { }
1459};
1460MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
1461
Jon Hunter8c8a77712013-02-20 15:53:12 -06001462/**
1463 * gpmc_read_settings_dt - read gpmc settings from device-tree
1464 * @np: pointer to device-tree node for a gpmc child device
1465 * @p: pointer to gpmc settings structure
1466 *
1467 * Reads the GPMC settings for a GPMC child device from device-tree and
1468 * stores them in the GPMC settings structure passed. The GPMC settings
1469 * structure is initialised to zero by this function and so any
1470 * previously stored settings will be cleared.
1471 */
1472void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
1473{
1474 memset(p, 0, sizeof(struct gpmc_settings));
1475
1476 p->sync_read = of_property_read_bool(np, "gpmc,sync-read");
1477 p->sync_write = of_property_read_bool(np, "gpmc,sync-write");
Jon Hunter8c8a77712013-02-20 15:53:12 -06001478 of_property_read_u32(np, "gpmc,device-width", &p->device_width);
1479 of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data);
1480
1481 if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) {
1482 p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap");
1483 p->burst_read = of_property_read_bool(np, "gpmc,burst-read");
1484 p->burst_write = of_property_read_bool(np, "gpmc,burst-write");
1485 if (!p->burst_read && !p->burst_write)
1486 pr_warn("%s: page/burst-length set but not used!\n",
1487 __func__);
1488 }
1489
1490 if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) {
1491 p->wait_on_read = of_property_read_bool(np,
1492 "gpmc,wait-on-read");
1493 p->wait_on_write = of_property_read_bool(np,
1494 "gpmc,wait-on-write");
1495 if (!p->wait_on_read && !p->wait_on_write)
Roger Quadros2b540572014-09-02 16:57:06 +03001496 pr_debug("%s: rd/wr wait monitoring not enabled!\n",
1497 __func__);
Jon Hunter8c8a77712013-02-20 15:53:12 -06001498 }
1499}
1500
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001501static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
1502 struct gpmc_timings *gpmc_t)
1503{
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001504 struct gpmc_bool_timings *p;
1505
1506 if (!np || !gpmc_t)
1507 return;
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001508
1509 memset(gpmc_t, 0, sizeof(*gpmc_t));
1510
1511 /* minimum clock period for syncronous mode */
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001512 of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001513
1514 /* chip select timtings */
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001515 of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on);
1516 of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off);
1517 of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001518
1519 /* ADV signal timings */
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001520 of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on);
1521 of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off);
1522 of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001523
1524 /* WE signal timings */
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001525 of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on);
1526 of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001527
1528 /* OE signal timings */
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001529 of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on);
1530 of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001531
1532 /* access and cycle timings */
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001533 of_property_read_u32(np, "gpmc,page-burst-access-ns",
1534 &gpmc_t->page_burst_access);
1535 of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access);
1536 of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle);
1537 of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle);
1538 of_property_read_u32(np, "gpmc,bus-turnaround-ns",
1539 &gpmc_t->bus_turnaround);
1540 of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns",
1541 &gpmc_t->cycle2cycle_delay);
1542 of_property_read_u32(np, "gpmc,wait-monitoring-ns",
1543 &gpmc_t->wait_monitoring);
1544 of_property_read_u32(np, "gpmc,clk-activation-ns",
1545 &gpmc_t->clk_activation);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001546
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001547 /* only applicable to OMAP3+ */
1548 of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access);
1549 of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns",
1550 &gpmc_t->wr_data_mux_bus);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001551
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001552 /* bool timing parameters */
1553 p = &gpmc_t->bool_timings;
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001554
Jon Hunterd36b4cd2013-02-21 18:51:27 -06001555 p->cycle2cyclediffcsen =
1556 of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen");
1557 p->cycle2cyclesamecsen =
1558 of_property_read_bool(np, "gpmc,cycle2cycle-samecsen");
1559 p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay");
1560 p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay");
1561 p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay");
1562 p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay");
1563 p->time_para_granularity =
1564 of_property_read_bool(np, "gpmc,time-para-granularity");
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001565}
1566
Pekon Gupta6b187b22014-01-28 11:42:40 +05301567#if IS_ENABLED(CONFIG_MTD_NAND)
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001568
Mark Jackson496c8a02013-04-19 21:08:28 +01001569static const char * const nand_xfer_types[] = {
1570 [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
1571 [NAND_OMAP_POLLED] = "polled",
1572 [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
1573 [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
1574};
1575
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001576static int gpmc_probe_nand_child(struct platform_device *pdev,
1577 struct device_node *child)
1578{
1579 u32 val;
1580 const char *s;
1581 struct gpmc_timings gpmc_t;
1582 struct omap_nand_platform_data *gpmc_nand_data;
1583
1584 if (of_property_read_u32(child, "reg", &val) < 0) {
1585 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1586 child->full_name);
1587 return -ENODEV;
1588 }
1589
1590 gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
1591 GFP_KERNEL);
1592 if (!gpmc_nand_data)
1593 return -ENOMEM;
1594
1595 gpmc_nand_data->cs = val;
1596 gpmc_nand_data->of_node = child;
1597
Pekon Guptaac65caf2013-10-24 18:20:17 +05301598 /* Detect availability of ELM module */
1599 gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
1600 if (gpmc_nand_data->elm_of_node == NULL)
1601 gpmc_nand_data->elm_of_node =
1602 of_parse_phandle(child, "elm_id", 0);
1603 if (gpmc_nand_data->elm_of_node == NULL)
1604 pr_warn("%s: ti,elm-id property not found\n", __func__);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001605
Pekon Guptaac65caf2013-10-24 18:20:17 +05301606 /* select ecc-scheme for NAND */
1607 if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
1608 pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
1609 return -ENODEV;
1610 }
Roger Quadrosa3e83f02014-08-25 16:15:33 -07001611
1612 if (!strcmp(s, "sw"))
1613 gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
1614 else if (!strcmp(s, "ham1") ||
1615 !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
Pekon Guptaac65caf2013-10-24 18:20:17 +05301616 gpmc_nand_data->ecc_opt =
1617 OMAP_ECC_HAM1_CODE_HW;
1618 else if (!strcmp(s, "bch4"))
1619 if (gpmc_nand_data->elm_of_node)
1620 gpmc_nand_data->ecc_opt =
1621 OMAP_ECC_BCH4_CODE_HW;
1622 else
1623 gpmc_nand_data->ecc_opt =
1624 OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
1625 else if (!strcmp(s, "bch8"))
1626 if (gpmc_nand_data->elm_of_node)
1627 gpmc_nand_data->ecc_opt =
1628 OMAP_ECC_BCH8_CODE_HW;
1629 else
1630 gpmc_nand_data->ecc_opt =
1631 OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
pekon gupta27c9fd62014-05-19 13:24:39 +05301632 else if (!strcmp(s, "bch16"))
1633 if (gpmc_nand_data->elm_of_node)
1634 gpmc_nand_data->ecc_opt =
1635 OMAP_ECC_BCH16_CODE_HW;
1636 else
1637 pr_err("%s: BCH16 requires ELM support\n", __func__);
Pekon Guptaac65caf2013-10-24 18:20:17 +05301638 else
1639 pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
1640
1641 /* select data transfer mode for NAND controller */
Mark Jackson496c8a02013-04-19 21:08:28 +01001642 if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
1643 for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
1644 if (!strcasecmp(s, nand_xfer_types[val])) {
1645 gpmc_nand_data->xfer_type = val;
1646 break;
1647 }
1648
Ezequiel Garcíafef775c2014-09-11 12:02:08 -03001649 gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child);
1650
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001651 val = of_get_nand_bus_width(child);
1652 if (val == 16)
1653 gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
1654
1655 gpmc_read_timings_dt(child, &gpmc_t);
1656 gpmc_nand_init(gpmc_nand_data, &gpmc_t);
1657
1658 return 0;
1659}
1660#else
1661static int gpmc_probe_nand_child(struct platform_device *pdev,
1662 struct device_node *child)
1663{
1664 return 0;
1665}
1666#endif
1667
Pekon Gupta980386d2014-01-28 11:42:41 +05301668#if IS_ENABLED(CONFIG_MTD_ONENAND)
Ezequiel Garcia75d36252013-01-25 09:23:11 -03001669static int gpmc_probe_onenand_child(struct platform_device *pdev,
1670 struct device_node *child)
1671{
1672 u32 val;
1673 struct omap_onenand_platform_data *gpmc_onenand_data;
1674
1675 if (of_property_read_u32(child, "reg", &val) < 0) {
1676 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1677 child->full_name);
1678 return -ENODEV;
1679 }
1680
1681 gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
1682 GFP_KERNEL);
1683 if (!gpmc_onenand_data)
1684 return -ENOMEM;
1685
1686 gpmc_onenand_data->cs = val;
1687 gpmc_onenand_data->of_node = child;
1688 gpmc_onenand_data->dma_channel = -1;
1689
1690 if (!of_property_read_u32(child, "dma-channel", &val))
1691 gpmc_onenand_data->dma_channel = val;
1692
1693 gpmc_onenand_init(gpmc_onenand_data);
1694
1695 return 0;
1696}
1697#else
1698static int gpmc_probe_onenand_child(struct platform_device *pdev,
1699 struct device_node *child)
1700{
1701 return 0;
1702}
1703#endif
1704
Jon Huntercdd69282013-02-08 16:46:13 -06001705/**
Javier Martinez Canillas3af91cf2013-03-14 16:09:21 +01001706 * gpmc_probe_generic_child - configures the gpmc for a child device
Jon Huntercdd69282013-02-08 16:46:13 -06001707 * @pdev: pointer to gpmc platform device
Javier Martinez Canillas3af91cf2013-03-14 16:09:21 +01001708 * @child: pointer to device-tree node for child device
Jon Huntercdd69282013-02-08 16:46:13 -06001709 *
Javier Martinez Canillas3af91cf2013-03-14 16:09:21 +01001710 * Allocates and configures a GPMC chip-select for a child device.
Jon Huntercdd69282013-02-08 16:46:13 -06001711 * Returns 0 on success and appropriate negative error code on failure.
1712 */
Javier Martinez Canillas3af91cf2013-03-14 16:09:21 +01001713static int gpmc_probe_generic_child(struct platform_device *pdev,
Jon Huntercdd69282013-02-08 16:46:13 -06001714 struct device_node *child)
1715{
1716 struct gpmc_settings gpmc_s;
1717 struct gpmc_timings gpmc_t;
1718 struct resource res;
1719 unsigned long base;
Tony Lindgren9ed7a772014-11-03 17:45:01 -08001720 const char *name;
Jon Huntercdd69282013-02-08 16:46:13 -06001721 int ret, cs;
Roger Quadrose378d222014-08-29 19:11:52 +03001722 u32 val;
Jon Huntercdd69282013-02-08 16:46:13 -06001723
1724 if (of_property_read_u32(child, "reg", &cs) < 0) {
1725 dev_err(&pdev->dev, "%s has no 'reg' property\n",
1726 child->full_name);
1727 return -ENODEV;
1728 }
1729
1730 if (of_address_to_resource(child, 0, &res) < 0) {
1731 dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
1732 child->full_name);
1733 return -ENODEV;
1734 }
1735
Tony Lindgren9ed7a772014-11-03 17:45:01 -08001736 /*
1737 * Check if we have multiple instances of the same device
1738 * on a single chip select. If so, use the already initialized
1739 * timings.
1740 */
1741 name = gpmc_cs_get_name(cs);
1742 if (name && child->name && of_node_cmp(child->name, name) == 0)
1743 goto no_timings;
1744
Jon Huntercdd69282013-02-08 16:46:13 -06001745 ret = gpmc_cs_request(cs, resource_size(&res), &base);
1746 if (ret < 0) {
1747 dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
1748 return ret;
1749 }
Tony Lindgren9ed7a772014-11-03 17:45:01 -08001750 gpmc_cs_set_name(cs, child->name);
Jon Huntercdd69282013-02-08 16:46:13 -06001751
Tony Lindgren35ac0512014-11-03 17:45:01 -08001752 gpmc_read_settings_dt(child, &gpmc_s);
1753 gpmc_read_timings_dt(child, &gpmc_t);
1754
1755 /*
1756 * For some GPMC devices we still need to rely on the bootloader
1757 * timings because the devices can be connected via FPGA.
1758 * REVISIT: Add timing support from slls644g.pdf.
1759 */
1760 if (!gpmc_t.cs_rd_off) {
1761 WARN(1, "enable GPMC debug to configure .dts timings for CS%i\n",
1762 cs);
1763 gpmc_cs_show_timings(cs,
1764 "please add GPMC bootloader timings to .dts");
1765 goto no_timings;
1766 }
1767
Roger Quadros4cf27d22014-08-29 19:11:53 +03001768 /* CS must be disabled while making changes to gpmc configuration */
1769 gpmc_cs_disable_mem(cs);
1770
Jon Huntercdd69282013-02-08 16:46:13 -06001771 /*
1772 * FIXME: gpmc_cs_request() will map the CS to an arbitary
1773 * location in the gpmc address space. When booting with
1774 * device-tree we want the NOR flash to be mapped to the
1775 * location specified in the device-tree blob. So remap the
1776 * CS to this location. Once DT migration is complete should
1777 * just make gpmc_cs_request() map a specific address.
1778 */
1779 ret = gpmc_cs_remap(cs, res.start);
1780 if (ret < 0) {
Fabio Estevamf70bf2a2013-09-18 12:01:59 -07001781 dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
1782 cs, &res.start);
Jon Huntercdd69282013-02-08 16:46:13 -06001783 goto err;
1784 }
1785
Jon Huntercdd69282013-02-08 16:46:13 -06001786 ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width);
1787 if (ret < 0)
1788 goto err;
1789
1790 ret = gpmc_cs_program_settings(cs, &gpmc_s);
1791 if (ret < 0)
1792 goto err;
1793
Roger Quadros7604baf2014-08-29 19:11:51 +03001794 ret = gpmc_cs_set_timings(cs, &gpmc_t);
1795 if (ret) {
1796 dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
1797 child->name);
1798 goto err;
1799 }
Jon Huntercdd69282013-02-08 16:46:13 -06001800
Roger Quadrose378d222014-08-29 19:11:52 +03001801 /* Clear limited address i.e. enable A26-A11 */
1802 val = gpmc_read_reg(GPMC_CONFIG);
1803 val &= ~GPMC_CONFIG_LIMITEDADDRESS;
1804 gpmc_write_reg(GPMC_CONFIG, val);
1805
Roger Quadros4cf27d22014-08-29 19:11:53 +03001806 /* Enable CS region */
1807 gpmc_cs_enable_mem(cs);
Jon Huntercdd69282013-02-08 16:46:13 -06001808
Tony Lindgrenfd4446f2013-11-14 15:25:09 -08001809no_timings:
Jon Huntercdd69282013-02-08 16:46:13 -06001810 if (of_platform_device_create(child, NULL, &pdev->dev))
1811 return 0;
1812
1813 dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
Javier Martinez Canillase8ffd6f2013-03-14 16:09:20 +01001814 ret = -ENODEV;
Jon Huntercdd69282013-02-08 16:46:13 -06001815
1816err:
1817 gpmc_cs_free(cs);
1818
1819 return ret;
1820}
1821
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001822static int gpmc_probe_dt(struct platform_device *pdev)
1823{
1824 int ret;
1825 struct device_node *child;
1826 const struct of_device_id *of_id =
1827 of_match_device(gpmc_dt_ids, &pdev->dev);
1828
1829 if (!of_id)
1830 return 0;
1831
Gupta Pekonf34f3712013-05-31 17:31:30 +05301832 ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-cs",
1833 &gpmc_cs_num);
1834 if (ret < 0) {
1835 pr_err("%s: number of chip-selects not defined\n", __func__);
1836 return ret;
1837 } else if (gpmc_cs_num < 1) {
1838 pr_err("%s: all chip-selects are disabled\n", __func__);
1839 return -EINVAL;
1840 } else if (gpmc_cs_num > GPMC_CS_NUM) {
1841 pr_err("%s: number of supported chip-selects cannot be > %d\n",
1842 __func__, GPMC_CS_NUM);
1843 return -EINVAL;
1844 }
1845
Jon Hunter9f833152013-02-20 15:53:38 -06001846 ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins",
1847 &gpmc_nr_waitpins);
1848 if (ret < 0) {
1849 pr_err("%s: number of wait pins not found!\n", __func__);
1850 return ret;
1851 }
1852
Guido Martínez68e2eb52014-07-02 10:35:18 -03001853 for_each_available_child_of_node(pdev->dev.of_node, child) {
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001854
Javier Martinez Canillasf2b09f62013-04-17 22:34:11 +02001855 if (!child->name)
1856 continue;
Jon Huntercdd69282013-02-08 16:46:13 -06001857
Javier Martinez Canillasf2b09f62013-04-17 22:34:11 +02001858 if (of_node_cmp(child->name, "nand") == 0)
1859 ret = gpmc_probe_nand_child(pdev, child);
1860 else if (of_node_cmp(child->name, "onenand") == 0)
1861 ret = gpmc_probe_onenand_child(pdev, child);
1862 else if (of_node_cmp(child->name, "ethernet") == 0 ||
Tony Lindgrenfd4446f2013-11-14 15:25:09 -08001863 of_node_cmp(child->name, "nor") == 0 ||
1864 of_node_cmp(child->name, "uart") == 0)
Javier Martinez Canillasf2b09f62013-04-17 22:34:11 +02001865 ret = gpmc_probe_generic_child(pdev, child);
Jon Huntercdd69282013-02-08 16:46:13 -06001866
Javier Martinez Canillasb327b362013-04-17 22:34:12 +02001867 if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
1868 __func__, child->full_name))
Javier Martinez Canillas5330dc12013-03-14 22:54:11 +01001869 of_node_put(child);
Javier Martinez Canillas5330dc12013-03-14 22:54:11 +01001870 }
1871
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001872 return 0;
1873}
1874#else
1875static int gpmc_probe_dt(struct platform_device *pdev)
1876{
1877 return 0;
1878}
1879#endif
1880
Greg Kroah-Hartman351a1022012-12-21 14:02:24 -08001881static int gpmc_probe(struct platform_device *pdev)
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07001882{
Jon Hunter81190242012-10-17 09:41:25 -05001883 int rc;
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -07001884 u32 l;
Afzal Mohammedda496872012-09-23 17:28:25 -06001885 struct resource *res;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07001886
Afzal Mohammedda496872012-09-23 17:28:25 -06001887 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1888 if (res == NULL)
1889 return -ENOENT;
Paul Walmsleyfd1dc872008-10-06 15:49:17 +03001890
Afzal Mohammedda496872012-09-23 17:28:25 -06001891 phys_base = res->start;
1892 mem_size = resource_size(res);
Kevin Hilman8d084362010-01-29 14:20:06 -08001893
Thierry Reding5857bd92013-01-21 11:08:55 +01001894 gpmc_base = devm_ioremap_resource(&pdev->dev, res);
1895 if (IS_ERR(gpmc_base))
1896 return PTR_ERR(gpmc_base);
Afzal Mohammedda496872012-09-23 17:28:25 -06001897
1898 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1899 if (res == NULL)
1900 dev_warn(&pdev->dev, "Failed to get resource: irq\n");
1901 else
1902 gpmc_irq = res->start;
1903
Roger Quadros8bf9be52014-09-01 15:18:56 +03001904 gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
Afzal Mohammedda496872012-09-23 17:28:25 -06001905 if (IS_ERR(gpmc_l3_clk)) {
Roger Quadros8bf9be52014-09-01 15:18:56 +03001906 dev_err(&pdev->dev, "Failed to get GPMC fck\n");
Afzal Mohammedda496872012-09-23 17:28:25 -06001907 gpmc_irq = 0;
1908 return PTR_ERR(gpmc_l3_clk);
Paul Walmsleyfd1dc872008-10-06 15:49:17 +03001909 }
1910
Roger Quadros8bf9be52014-09-01 15:18:56 +03001911 if (!clk_get_rate(gpmc_l3_clk)) {
1912 dev_err(&pdev->dev, "Invalid GPMC fck clock rate\n");
1913 return -EINVAL;
1914 }
1915
avinash philipb3f55252013-06-12 16:30:56 +05301916 pm_runtime_enable(&pdev->dev);
1917 pm_runtime_get_sync(&pdev->dev);
Olof Johansson1daa8c12010-01-20 22:39:29 +00001918
Afzal Mohammedda496872012-09-23 17:28:25 -06001919 gpmc_dev = &pdev->dev;
1920
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07001921 l = gpmc_read_reg(GPMC_REVISION);
Jon Hunteraa8d4762013-02-21 15:25:23 -06001922
1923 /*
1924 * FIXME: Once device-tree migration is complete the below flags
1925 * should be populated based upon the device-tree compatible
1926 * string. For now just use the IP revision. OMAP3+ devices have
1927 * the wr_access and wr_data_mux_bus register fields. OMAP4+
1928 * devices support the addr-addr-data multiplex protocol.
1929 *
1930 * GPMC IP revisions:
1931 * - OMAP24xx = 2.0
1932 * - OMAP3xxx = 5.0
1933 * - OMAP44xx/54xx/AM335x = 6.0
1934 */
Afzal Mohammedda496872012-09-23 17:28:25 -06001935 if (GPMC_REVISION_MAJOR(l) > 0x4)
1936 gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
Jon Hunteraa8d4762013-02-21 15:25:23 -06001937 if (GPMC_REVISION_MAJOR(l) > 0x5)
1938 gpmc_capability |= GPMC_HAS_MUX_AAD;
Afzal Mohammedda496872012-09-23 17:28:25 -06001939 dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
1940 GPMC_REVISION_MINOR(l));
1941
Jon Hunter84b00f02013-03-06 14:36:47 -06001942 gpmc_mem_init();
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +05301943
Russell King71856842013-03-13 20:44:21 +00001944 if (gpmc_setup_irq() < 0)
Afzal Mohammedda496872012-09-23 17:28:25 -06001945 dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
1946
Gupta Pekonf34f3712013-05-31 17:31:30 +05301947 if (!pdev->dev.of_node) {
1948 gpmc_cs_num = GPMC_CS_NUM;
Jon Hunter9f833152013-02-20 15:53:38 -06001949 gpmc_nr_waitpins = GPMC_NR_WAITPINS;
Gupta Pekonf34f3712013-05-31 17:31:30 +05301950 }
Jon Hunter9f833152013-02-20 15:53:38 -06001951
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001952 rc = gpmc_probe_dt(pdev);
1953 if (rc < 0) {
avinash philipb3f55252013-06-12 16:30:56 +05301954 pm_runtime_put_sync(&pdev->dev);
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001955 dev_err(gpmc_dev, "failed to probe DT parameters\n");
1956 return rc;
1957 }
1958
Afzal Mohammedda496872012-09-23 17:28:25 -06001959 return 0;
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +05301960}
Afzal Mohammedda496872012-09-23 17:28:25 -06001961
Greg Kroah-Hartman351a1022012-12-21 14:02:24 -08001962static int gpmc_remove(struct platform_device *pdev)
Afzal Mohammedda496872012-09-23 17:28:25 -06001963{
1964 gpmc_free_irq();
1965 gpmc_mem_exit();
avinash philipb3f55252013-06-12 16:30:56 +05301966 pm_runtime_put_sync(&pdev->dev);
1967 pm_runtime_disable(&pdev->dev);
Afzal Mohammedda496872012-09-23 17:28:25 -06001968 gpmc_dev = NULL;
1969 return 0;
1970}
1971
avinash philipb536dd42013-06-18 00:16:38 +05301972#ifdef CONFIG_PM_SLEEP
1973static int gpmc_suspend(struct device *dev)
1974{
1975 omap3_gpmc_save_context();
1976 pm_runtime_put_sync(dev);
1977 return 0;
1978}
1979
1980static int gpmc_resume(struct device *dev)
1981{
1982 pm_runtime_get_sync(dev);
1983 omap3_gpmc_restore_context();
1984 return 0;
1985}
1986#endif
1987
1988static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume);
1989
Afzal Mohammedda496872012-09-23 17:28:25 -06001990static struct platform_driver gpmc_driver = {
1991 .probe = gpmc_probe,
Greg Kroah-Hartman351a1022012-12-21 14:02:24 -08001992 .remove = gpmc_remove,
Afzal Mohammedda496872012-09-23 17:28:25 -06001993 .driver = {
1994 .name = DEVICE_NAME,
1995 .owner = THIS_MODULE,
Daniel Mackbc6b1e72012-12-14 11:36:44 +01001996 .of_match_table = of_match_ptr(gpmc_dt_ids),
avinash philipb536dd42013-06-18 00:16:38 +05301997 .pm = &gpmc_pm_ops,
Afzal Mohammedda496872012-09-23 17:28:25 -06001998 },
1999};
2000
2001static __init int gpmc_init(void)
2002{
2003 return platform_driver_register(&gpmc_driver);
2004}
2005
2006static __exit void gpmc_exit(void)
2007{
2008 platform_driver_unregister(&gpmc_driver);
2009
2010}
2011
Tony Lindgrenb76c8b12013-01-11 11:24:18 -08002012omap_postcore_initcall(gpmc_init);
Afzal Mohammedda496872012-09-23 17:28:25 -06002013module_exit(gpmc_exit);
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +05302014
Afzal Mohammed4be48fd2012-09-23 17:28:24 -06002015static int __init omap_gpmc_init(void)
2016{
2017 struct omap_hwmod *oh;
2018 struct platform_device *pdev;
2019 char *oh_name = "gpmc";
2020
Daniel Mack2f98ca82012-12-14 11:36:40 +01002021 /*
2022 * if the board boots up with a populated DT, do not
2023 * manually add the device from this initcall
2024 */
2025 if (of_have_populated_dt())
2026 return -ENODEV;
2027
Afzal Mohammed4be48fd2012-09-23 17:28:24 -06002028 oh = omap_hwmod_lookup(oh_name);
2029 if (!oh) {
2030 pr_err("Could not look up %s\n", oh_name);
2031 return -ENODEV;
2032 }
2033
Paul Walmsleyc1d1cd52013-01-26 00:48:53 -07002034 pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0);
Afzal Mohammed4be48fd2012-09-23 17:28:24 -06002035 WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);
2036
Thomas Meyer12616742013-06-01 11:44:44 +02002037 return PTR_RET(pdev);
Afzal Mohammed4be48fd2012-09-23 17:28:24 -06002038}
Tony Lindgrenb76c8b12013-01-11 11:24:18 -08002039omap_postcore_initcall(omap_gpmc_init);
Afzal Mohammed4be48fd2012-09-23 17:28:24 -06002040
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +05302041static irqreturn_t gpmc_handle_irq(int irq, void *dev)
2042{
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -07002043 int i;
2044 u32 regval;
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +05302045
Afzal Mohammed6b6c32f2012-08-30 12:53:23 -07002046 regval = gpmc_read_reg(GPMC_IRQSTATUS);
2047
2048 if (!regval)
2049 return IRQ_NONE;
2050
2051 for (i = 0; i < GPMC_NR_IRQ; i++)
2052 if (regval & gpmc_client_irq[i].bitmask)
2053 generic_handle_irq(gpmc_client_irq[i].irq);
2054
2055 gpmc_write_reg(GPMC_IRQSTATUS, regval);
Sukumar Ghoraidb97eb7d2011-01-28 15:42:05 +05302056
2057 return IRQ_HANDLED;
Juha Yrjola4bbbc1a2006-06-26 16:16:16 -07002058}
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +05302059
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +05302060static struct omap3_gpmc_regs gpmc_context;
2061
Felipe Balbib2fa3b72010-02-15 10:03:33 -08002062void omap3_gpmc_save_context(void)
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +05302063{
2064 int i;
Felipe Balbib2fa3b72010-02-15 10:03:33 -08002065
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +05302066 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
2067 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
2068 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
2069 gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
2070 gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
2071 gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
2072 gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
Gupta Pekonf34f3712013-05-31 17:31:30 +05302073 for (i = 0; i < gpmc_cs_num; i++) {
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +05302074 gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
2075 if (gpmc_context.cs_context[i].is_valid) {
2076 gpmc_context.cs_context[i].config1 =
2077 gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
2078 gpmc_context.cs_context[i].config2 =
2079 gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
2080 gpmc_context.cs_context[i].config3 =
2081 gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
2082 gpmc_context.cs_context[i].config4 =
2083 gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
2084 gpmc_context.cs_context[i].config5 =
2085 gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
2086 gpmc_context.cs_context[i].config6 =
2087 gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
2088 gpmc_context.cs_context[i].config7 =
2089 gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
2090 }
2091 }
2092}
2093
Felipe Balbib2fa3b72010-02-15 10:03:33 -08002094void omap3_gpmc_restore_context(void)
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +05302095{
2096 int i;
Felipe Balbib2fa3b72010-02-15 10:03:33 -08002097
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +05302098 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
2099 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
2100 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
2101 gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
2102 gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
2103 gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
2104 gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
Gupta Pekonf34f3712013-05-31 17:31:30 +05302105 for (i = 0; i < gpmc_cs_num; i++) {
Rajendra Nayaka2d3e7b2008-09-26 17:47:33 +05302106 if (gpmc_context.cs_context[i].is_valid) {
2107 gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
2108 gpmc_context.cs_context[i].config1);
2109 gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
2110 gpmc_context.cs_context[i].config2);
2111 gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
2112 gpmc_context.cs_context[i].config3);
2113 gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
2114 gpmc_context.cs_context[i].config4);
2115 gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
2116 gpmc_context.cs_context[i].config5);
2117 gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
2118 gpmc_context.cs_context[i].config6);
2119 gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
2120 gpmc_context.cs_context[i].config7);
2121 }
2122 }
2123}