blob: 76d60eed149004fd221aa200e14de6f9e9354cdc [file] [log] [blame]
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +00001/*
2 * drivers/mtd/devices/goldfish_nand.c
3 *
4 * Copyright (C) 2007 Google, Inc.
5 * Copyright (C) 2012 Intel, Inc.
6 * Copyright (C) 2013 Intel, Inc.
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#include <linux/io.h>
20#include <linux/device.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/ioport.h>
24#include <linux/vmalloc.h>
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000025#include <linux/mtd/mtd.h>
26#include <linux/platform_device.h>
Kristina Martšenko67c20cf2014-03-25 01:45:09 +020027#include <linux/mutex.h>
Alanf6279712014-05-12 16:56:56 +010028#include <linux/goldfish.h>
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000029#include <asm/div64.h>
Shraddha Barke3e2fbc72016-01-21 02:38:53 +053030#include <linux/dma-mapping.h>
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000031
32#include "goldfish_nand_reg.h"
33
34struct goldfish_nand {
Loic Pefferkorn2c507412014-09-03 22:23:14 +020035 /* lock protects access to the device registers */
Kristina Martšenko67c20cf2014-03-25 01:45:09 +020036 struct mutex lock;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000037 unsigned char __iomem *base;
38 struct cmd_params *cmd_params;
39 size_t mtd_count;
40 struct mtd_info mtd[0];
41};
42
43static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +020044 enum nand_cmd cmd, u64 addr, u32 len,
45 void *ptr, u32 *rv)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000046{
47 u32 cmdp;
48 struct goldfish_nand *nand = mtd->priv;
49 struct cmd_params *cps = nand->cmd_params;
50 unsigned char __iomem *base = nand->base;
51
Ravi Teja Darbha10d71082015-09-02 22:41:19 +053052 if (!cps)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000053 return -1;
54
55 switch (cmd) {
56 case NAND_CMD_ERASE:
57 cmdp = NAND_CMD_ERASE_WITH_PARAMS;
58 break;
59 case NAND_CMD_READ:
60 cmdp = NAND_CMD_READ_WITH_PARAMS;
61 break;
62 case NAND_CMD_WRITE:
63 cmdp = NAND_CMD_WRITE_WITH_PARAMS;
64 break;
65 default:
66 return -1;
67 }
68 cps->dev = mtd - nand->mtd;
69 cps->addr_high = (u32)(addr >> 32);
70 cps->addr_low = (u32)addr;
71 cps->transfer_size = len;
Jun Tian7f09d4a2014-04-28 20:47:22 +010072 cps->data = (unsigned long)ptr;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000073 writel(cmdp, base + NAND_COMMAND);
74 *rv = cps->result;
75 return 0;
76}
77
78static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +020079 u64 addr, u32 len, void *ptr)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000080{
81 struct goldfish_nand *nand = mtd->priv;
82 u32 rv;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000083 unsigned char __iomem *base = nand->base;
84
Kristina Martšenko67c20cf2014-03-25 01:45:09 +020085 mutex_lock(&nand->lock);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000086 if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) {
87 writel(mtd - nand->mtd, base + NAND_DEV);
88 writel((u32)(addr >> 32), base + NAND_ADDR_HIGH);
89 writel((u32)addr, base + NAND_ADDR_LOW);
90 writel(len, base + NAND_TRANSFER_SIZE);
Peter Senna Tschudin07d783f2015-05-19 11:44:46 +020091 gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000092 writel(cmd, base + NAND_COMMAND);
93 rv = readl(base + NAND_RESULT);
94 }
Kristina Martšenko67c20cf2014-03-25 01:45:09 +020095 mutex_unlock(&nand->lock);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +000096 return rv;
97}
98
99static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
100{
101 loff_t ofs = instr->addr;
102 u32 len = instr->len;
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100103 s32 rem;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000104
105 if (ofs + len > mtd->size)
106 goto invalid_arg;
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100107 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000108 if (rem)
109 goto invalid_arg;
110 ofs *= (mtd->writesize + mtd->oobsize);
111
112 if (len % mtd->writesize)
113 goto invalid_arg;
114 len = len / mtd->writesize * (mtd->writesize + mtd->oobsize);
115
116 if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) {
117 pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n",
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200118 ofs, len, mtd->size, mtd->erasesize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000119 return -EIO;
120 }
121
122 instr->state = MTD_ERASE_DONE;
123 mtd_erase_callback(instr);
124
125 return 0;
126
127invalid_arg:
128 pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n",
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200129 ofs, len, mtd->size, mtd->erasesize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000130 return -EINVAL;
131}
132
133static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200134 struct mtd_oob_ops *ops)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000135{
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100136 s32 rem;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000137
138 if (ofs + ops->len > mtd->size)
139 goto invalid_arg;
140 if (ops->datbuf && ops->len && ops->len != mtd->writesize)
141 goto invalid_arg;
142 if (ops->ooblen + ops->ooboffs > mtd->oobsize)
143 goto invalid_arg;
144
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100145 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000146 if (rem)
147 goto invalid_arg;
148 ofs *= (mtd->writesize + mtd->oobsize);
149
150 if (ops->datbuf)
151 ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
152 ops->len, ops->datbuf);
153 ofs += mtd->writesize + ops->ooboffs;
154 if (ops->oobbuf)
155 ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs,
156 ops->ooblen, ops->oobbuf);
157 return 0;
158
159invalid_arg:
Peter Hueweb4fcf482013-02-07 23:57:07 +0100160 pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200161 ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000162 return -EINVAL;
163}
164
165static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200166 struct mtd_oob_ops *ops)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000167{
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100168 s32 rem;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000169
170 if (ofs + ops->len > mtd->size)
171 goto invalid_arg;
172 if (ops->len && ops->len != mtd->writesize)
173 goto invalid_arg;
174 if (ops->ooblen + ops->ooboffs > mtd->oobsize)
175 goto invalid_arg;
176
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100177 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000178 if (rem)
179 goto invalid_arg;
180 ofs *= (mtd->writesize + mtd->oobsize);
181
182 if (ops->datbuf)
183 ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
184 ops->len, ops->datbuf);
185 ofs += mtd->writesize + ops->ooboffs;
186 if (ops->oobbuf)
187 ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs,
188 ops->ooblen, ops->oobbuf);
189 return 0;
190
191invalid_arg:
Peter Hueweb4fcf482013-02-07 23:57:07 +0100192 pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n",
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200193 ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000194 return -EINVAL;
195}
196
197static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200198 size_t *retlen, u_char *buf)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000199{
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100200 s32 rem;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000201
202 if (from + len > mtd->size)
203 goto invalid_arg;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000204
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100205 from = div_s64_rem(from, mtd->writesize, &rem);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000206 if (rem)
207 goto invalid_arg;
208 from *= (mtd->writesize + mtd->oobsize);
209
210 *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf);
211 return 0;
212
213invalid_arg:
Peter Hueweb4fcf482013-02-07 23:57:07 +0100214 pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n",
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200215 from, len, mtd->size, mtd->writesize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000216 return -EINVAL;
217}
218
219static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200220 size_t *retlen, const u_char *buf)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000221{
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100222 s32 rem;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000223
224 if (to + len > mtd->size)
225 goto invalid_arg;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000226
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100227 to = div_s64_rem(to, mtd->writesize, &rem);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000228 if (rem)
229 goto invalid_arg;
230 to *= (mtd->writesize + mtd->oobsize);
231
232 *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf);
233 return 0;
234
235invalid_arg:
Peter Hueweb4fcf482013-02-07 23:57:07 +0100236 pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n",
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200237 to, len, mtd->size, mtd->writesize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000238 return -EINVAL;
239}
240
241static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
242{
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100243 s32 rem;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000244
245 if (ofs >= mtd->size)
246 goto invalid_arg;
247
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100248 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000249 if (rem)
250 goto invalid_arg;
251 ofs *= mtd->erasesize / mtd->writesize;
252 ofs *= (mtd->writesize + mtd->oobsize);
253
254 return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL);
255
256invalid_arg:
257 pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200258 ofs, mtd->size, mtd->writesize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000259 return -EINVAL;
260}
261
262static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
263{
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100264 s32 rem;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000265
266 if (ofs >= mtd->size)
267 goto invalid_arg;
268
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100269 ofs = div_s64_rem(ofs, mtd->writesize, &rem);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000270 if (rem)
271 goto invalid_arg;
272 ofs *= mtd->erasesize / mtd->writesize;
273 ofs *= (mtd->writesize + mtd->oobsize);
274
275 if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1)
276 return -EIO;
277 return 0;
278
279invalid_arg:
280 pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n",
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200281 ofs, mtd->size, mtd->writesize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000282 return -EINVAL;
283}
284
285static int nand_setup_cmd_params(struct platform_device *pdev,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200286 struct goldfish_nand *nand)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000287{
Shraddha Barke3e2fbc72016-01-21 02:38:53 +0530288 dma_addr_t dma_handle;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000289 unsigned char __iomem *base = nand->base;
290
Shraddha Barke3e2fbc72016-01-21 02:38:53 +0530291 nand->cmd_params = dmam_alloc_coherent(&pdev->dev,
292 sizeof(struct cmd_params),
293 &dma_handle, GFP_KERNEL);
294 if (!nand->cmd_params) {
295 dev_err(&pdev->dev, "allocate buffer failed\n");
296 return -ENOMEM;
297 }
298 writel((u32)((u64)dma_handle >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH);
299 writel((u32)dma_handle, base + NAND_CMD_PARAMS_ADDR_LOW);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000300 return 0;
301}
302
303static int goldfish_nand_init_device(struct platform_device *pdev,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200304 struct goldfish_nand *nand, int id)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000305{
306 u32 name_len;
307 u32 result;
308 u32 flags;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000309 unsigned char __iomem *base = nand->base;
310 struct mtd_info *mtd = &nand->mtd[id];
311 char *name;
312
Kristina Martšenko67c20cf2014-03-25 01:45:09 +0200313 mutex_lock(&nand->lock);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000314 writel(id, base + NAND_DEV);
315 flags = readl(base + NAND_DEV_FLAGS);
316 name_len = readl(base + NAND_DEV_NAME_LEN);
317 mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE);
318 mtd->size = readl(base + NAND_DEV_SIZE_LOW);
319 mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32;
320 mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE);
321 mtd->oobavail = mtd->oobsize;
322 mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) /
323 (mtd->writesize + mtd->oobsize) * mtd->writesize;
Arnd Bergmannb0e302b2016-02-01 11:33:00 +0100324 mtd->size = div_s64(mtd->size, mtd->writesize + mtd->oobsize);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000325 mtd->size *= mtd->writesize;
Hema Prathaban36270be2013-05-09 19:07:28 +0530326 dev_dbg(&pdev->dev,
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000327 "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n",
Hema Prathaban13aa40162013-05-09 19:08:51 +0530328 id, mtd->size, mtd->writesize,
329 mtd->oobsize, mtd->erasesize);
Kristina Martšenko67c20cf2014-03-25 01:45:09 +0200330 mutex_unlock(&nand->lock);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000331
332 mtd->priv = nand;
333
Loic Pefferkorn1f11b382014-09-03 22:23:15 +0200334 name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL);
Somya Anand6e3f3bb2015-03-16 19:34:09 +0530335 if (!name)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000336 return -ENOMEM;
Loic Pefferkorn1f11b382014-09-03 22:23:15 +0200337 mtd->name = name;
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000338
339 result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len,
Loic Pefferkorn8f52e262014-09-03 22:23:13 +0200340 name);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000341 if (result != name_len) {
Hema Prathaban36270be2013-05-09 19:07:28 +0530342 dev_err(&pdev->dev,
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000343 "goldfish_nand_init_device failed to get dev name %d != %d\n",
344 result, name_len);
345 return -ENODEV;
346 }
Loic Pefferkorn19775332014-09-03 22:23:11 +0200347 ((char *)mtd->name)[name_len] = '\0';
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000348
349 /* Setup the MTD structure */
350 mtd->type = MTD_NANDFLASH;
351 mtd->flags = MTD_CAP_NANDFLASH;
352 if (flags & NAND_DEV_FLAG_READ_ONLY)
353 mtd->flags &= ~MTD_WRITEABLE;
354 if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP)
355 nand_setup_cmd_params(pdev, nand);
356
357 mtd->owner = THIS_MODULE;
358 mtd->_erase = goldfish_nand_erase;
359 mtd->_read = goldfish_nand_read;
360 mtd->_write = goldfish_nand_write;
361 mtd->_read_oob = goldfish_nand_read_oob;
362 mtd->_write_oob = goldfish_nand_write_oob;
363 mtd->_block_isbad = goldfish_nand_block_isbad;
364 mtd->_block_markbad = goldfish_nand_block_markbad;
365
366 if (mtd_device_register(mtd, NULL, 0))
367 return -EIO;
368
369 return 0;
370}
371
372static int goldfish_nand_probe(struct platform_device *pdev)
373{
374 u32 num_dev;
375 int i;
376 int err;
377 u32 num_dev_working;
378 u32 version;
379 struct resource *r;
380 struct goldfish_nand *nand;
381 unsigned char __iomem *base;
382
383 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Ravi Teja Darbha10d71082015-09-02 22:41:19 +0530384 if (!r)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000385 return -ENODEV;
386
387 base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
Somya Anand6e3f3bb2015-03-16 19:34:09 +0530388 if (!base)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000389 return -ENOMEM;
390
391 version = readl(base + NAND_VERSION);
392 if (version != NAND_VERSION_CURRENT) {
Hema Prathaban36270be2013-05-09 19:07:28 +0530393 dev_err(&pdev->dev,
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000394 "goldfish_nand_init: version mismatch, got %d, expected %d\n",
395 version, NAND_VERSION_CURRENT);
396 return -ENODEV;
397 }
398 num_dev = readl(base + NAND_NUM_DEV);
399 if (num_dev == 0)
400 return -ENODEV;
401
Hema Prathaban36270be2013-05-09 19:07:28 +0530402 nand = devm_kzalloc(&pdev->dev, sizeof(*nand) +
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000403 sizeof(struct mtd_info) * num_dev, GFP_KERNEL);
Somya Anand6e3f3bb2015-03-16 19:34:09 +0530404 if (!nand)
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000405 return -ENOMEM;
406
Kristina Martšenko67c20cf2014-03-25 01:45:09 +0200407 mutex_init(&nand->lock);
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000408 nand->base = base;
409 nand->mtd_count = num_dev;
410 platform_set_drvdata(pdev, nand);
411
412 num_dev_working = 0;
413 for (i = 0; i < num_dev; i++) {
414 err = goldfish_nand_init_device(pdev, nand, i);
415 if (err == 0)
416 num_dev_working++;
417 }
418 if (num_dev_working == 0)
419 return -ENODEV;
420 return 0;
421}
422
423static int goldfish_nand_remove(struct platform_device *pdev)
424{
425 struct goldfish_nand *nand = platform_get_drvdata(pdev);
426 int i;
Garret Kellyef323812014-04-06 23:47:31 -0400427
Arve Hjønnevåg8e404ff2013-01-24 17:50:00 +0000428 for (i = 0; i < nand->mtd_count; i++) {
429 if (nand->mtd[i].name)
430 mtd_device_unregister(&nand->mtd[i]);
431 }
432 return 0;
433}
434
435static struct platform_driver goldfish_nand_driver = {
436 .probe = goldfish_nand_probe,
437 .remove = goldfish_nand_remove,
438 .driver = {
439 .name = "goldfish_nand"
440 }
441};
442
443module_platform_driver(goldfish_nand_driver);
444MODULE_LICENSE("GPL");