blob: eb39bf71c4cf4839b29586c1592e58a104521b76 [file] [log] [blame]
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05301/*
2 * Copyright (C) 2007 Google, Inc.
Shiraz Hashim4e297f42020-02-04 13:34:29 +05303 * Copyright (c) 2012-2017, 2020, The Linux Foundation. All rights reserved.
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05304 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#include "msm_qpic_nand.h"
17
18#define QPIC_BAM_DEFAULT_IPC_LOGLVL 2
19
20/* The driver supports devices upto 4K page */
21#define MAX_CW_PER_PAGE 8
22/*
23 * Max descriptors needed for erase, read, write operations.
24 * Usually, this is (2 * MAX_CW_PER_PAGE).
25 */
26#define MAX_DESC 16
27
28static bool enable_euclean;
Sahitya Tummaladd8caf42017-10-11 10:12:19 +053029static bool enable_perfstats;
30
31static ssize_t msm_nand_attr_perf_stats_show(struct device *dev,
32 struct device_attribute *attr,
33 char *buf);
34static ssize_t msm_nand_attr_perf_stats_store(struct device *dev,
35 struct device_attribute *attr,
36 const char *buf, size_t count);
37
38static struct device_attribute dev_msm_nand_perf_stats =
39 __ATTR(perf_stats, 0644,
40 msm_nand_attr_perf_stats_show, msm_nand_attr_perf_stats_store);
41
42#define print_sysfs(fmt, ...) \
43{ \
44 count += scnprintf(buf + count, PAGE_SIZE - count, \
45 fmt, ##__VA_ARGS__); \
46}
47
48static ssize_t msm_nand_attr_perf_stats_show(struct device *dev,
49 struct device_attribute *attr,
50 char *buf)
51{
52 ssize_t count = 0;
53 struct msm_nand_info *info = dev_get_drvdata(dev);
54
55 if (!enable_perfstats) {
56 print_sysfs("Performance stats is disabled\n");
57 return count;
58 }
59
60 spin_lock(&info->perf.lock);
61 print_sysfs("total_read_size = %llu\n", info->perf.total_read_size);
62 print_sysfs("total_write_size = %llu\n", info->perf.total_write_size);
63 print_sysfs("total_erase_blks = %llu\n\n", info->perf.total_erase_blks);
64
65 print_sysfs("total_read_time_us = %lld\n",
66 ktime_to_us(info->perf.total_read_time));
67 print_sysfs("total_write_time_us = %lld\n",
68 ktime_to_us(info->perf.total_write_time));
69 print_sysfs("total_erase_time_us = %lld\n\n",
70 ktime_to_us(info->perf.total_erase_time));
71
72 print_sysfs("min_read_time_us = %lld\n",
73 ktime_to_us(info->perf.min_read_time));
74 print_sysfs("min_write_time_us = %lld\n",
75 ktime_to_us(info->perf.min_write_time));
76 print_sysfs("min_erase_time_us = %lld\n\n",
77 ktime_to_us(info->perf.min_erase_time));
78
79 print_sysfs("max_read_time_us = %lld\n",
80 ktime_to_us(info->perf.max_read_time));
81 print_sysfs("max_write_time_us = %lld\n",
82 ktime_to_us(info->perf.max_write_time));
83 print_sysfs("max_erase_time_us = %lld\n\n",
84 ktime_to_us(info->perf.max_erase_time));
85
86 spin_unlock(&info->perf.lock);
87 return count;
88}
89
90static ssize_t msm_nand_attr_perf_stats_store(struct device *dev,
91 struct device_attribute *attr,
92 const char *buf, size_t count)
93{
94 struct msm_nand_info *info = dev_get_drvdata(dev);
95
96 if (!enable_perfstats) {
97 pr_err("couldn't write as perf stats is disabled\n");
98 return -EPERM;
99 }
100
101 if (count > 1 || (count == 1 && *buf != '\n')) {
102 pr_err("write not permitted\n");
103 return -EPERM;
104 }
105
106 spin_lock(&info->perf.lock);
107 info->perf.min_read_time = ktime_set(KTIME_MAX, 0);
108 info->perf.min_write_time = ktime_set(KTIME_MAX, 0);
109 info->perf.min_erase_time = ktime_set(KTIME_MAX, 0);
110
111 info->perf.max_read_time = ktime_set(0, 0);
112 info->perf.max_write_time = ktime_set(0, 0);
113 info->perf.max_erase_time = ktime_set(0, 0);
114
115 info->perf.total_read_time = ktime_set(0, 0);
116 info->perf.total_write_time = ktime_set(0, 0);
117 info->perf.total_erase_time = ktime_set(0, 0);
118
119 info->perf.total_read_size = 0;
120 info->perf.total_write_size = 0;
121 info->perf.total_erase_blks = 0;
122 spin_unlock(&info->perf.lock);
123
124 return count;
125}
126
127static void msm_nand_init_perf_stats(struct msm_nand_info *info)
128{
129 spin_lock_init(&info->perf.lock);
130 info->perf.min_read_time = ktime_set(KTIME_MAX, 0);
131 info->perf.min_write_time = ktime_set(KTIME_MAX, 0);
132 info->perf.min_erase_time = ktime_set(KTIME_MAX, 0);
133}
134
135static void msm_nand_init_sysfs(struct device *dev)
136{
137 sysfs_attr_init(&dev_msm_nand_perf_stats);
138 if (device_create_file(dev, &dev_msm_nand_perf_stats))
139 pr_err("Sysfs entry create failed");
140}
141
142static void msm_nand_cleanup_sysfs(struct device *dev)
143{
144 device_remove_file(dev, &dev_msm_nand_perf_stats);
145}
146
147static void msm_nand_update_read_perf_stats(struct msm_nand_info *info,
148 ktime_t start, u32 size)
149{
150 ktime_t time_delta;
151
152 time_delta = ktime_sub(ktime_get(), start);
153
154 spin_lock(&info->perf.lock);
155 info->perf.total_read_size += size;
156 info->perf.total_read_time = ktime_add(info->perf.total_read_time,
157 time_delta);
158 if (ktime_after(time_delta, info->perf.max_read_time))
159 info->perf.max_read_time = time_delta;
160
161 if (ktime_before(time_delta, info->perf.min_read_time))
162 info->perf.min_read_time = time_delta;
163
164 spin_unlock(&info->perf.lock);
165}
166
167static void msm_nand_update_write_perf_stats(struct msm_nand_info *info,
168 ktime_t start, u32 size)
169{
170 ktime_t time_delta;
171
172 time_delta = ktime_sub(ktime_get(), start);
173
174 spin_lock(&info->perf.lock);
175 info->perf.total_write_size += size;
176 info->perf.total_write_time = ktime_add(info->perf.total_write_time,
177 time_delta);
178 if (ktime_after(time_delta, info->perf.max_write_time))
179 info->perf.max_write_time = time_delta;
180
181 if (ktime_before(time_delta, info->perf.min_write_time))
182 info->perf.min_write_time = time_delta;
183
184 spin_unlock(&info->perf.lock);
185}
186
187static void msm_nand_update_erase_perf_stats(struct msm_nand_info *info,
188 ktime_t start, u32 count)
189{
190 ktime_t time_delta;
191
192 time_delta = ktime_sub(ktime_get(), start);
193
194 spin_lock(&info->perf.lock);
195 info->perf.total_erase_blks += count;
196 info->perf.total_erase_time = ktime_add(info->perf.total_erase_time,
197 time_delta);
198 if (ktime_after(time_delta, info->perf.max_erase_time))
199 info->perf.max_erase_time = time_delta;
200
201 if (ktime_before(time_delta, info->perf.min_erase_time))
202 info->perf.min_erase_time = time_delta;
203
204 spin_unlock(&info->perf.lock);
205}
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +0530206
207/*
208 * Get the DMA memory for requested amount of size. It returns the pointer
209 * to free memory available from the allocated pool. Returns NULL if there
210 * is no free memory.
211 */
212static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
213{
214 uint32_t bitmask, free_bitmask, old_bitmask;
215 uint32_t need_mask, current_need_mask;
216 int free_index;
217
218 need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
219 - 1;
220 bitmask = atomic_read(&chip->dma_buffer_busy);
221 free_bitmask = ~bitmask;
222 if (free_bitmask == 0)
223 return NULL;
224
225 do {
226 free_index = __ffs(free_bitmask);
227 current_need_mask = need_mask << free_index;
228
229 if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
230 MSM_NAND_DMA_BUFFER_SIZE)
231 return NULL;
232
233 if ((bitmask & current_need_mask) == 0) {
234 old_bitmask =
235 atomic_cmpxchg(&chip->dma_buffer_busy,
236 bitmask,
237 bitmask | current_need_mask);
238 if (old_bitmask == bitmask)
239 return chip->dma_virt_addr +
240 free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
241 free_bitmask = 0;/* force return */
242 }
243 /* current free range was too small, clear all free bits */
244 /* below the top busy bit within current_need_mask */
245 free_bitmask &=
246 ~(~0U >> (32 - fls(bitmask & current_need_mask)));
247 } while (free_bitmask);
248
249 return NULL;
250}
251
252/*
253 * Releases the DMA memory used to the free pool and also wakes up any user
254 * thread waiting on wait queue for free memory to be available.
255 */
256static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
257 void *buffer, size_t size)
258{
259 int index;
260 uint32_t used_mask;
261
262 used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
263 - 1;
264 index = ((uint8_t *)buffer - chip->dma_virt_addr) /
265 MSM_NAND_DMA_BUFFER_SLOT_SZ;
266 atomic_sub(used_mask << index, &chip->dma_buffer_busy);
267
268 wake_up(&chip->dma_wait_queue);
269}
270
271/*
272 * Calculates page address of the buffer passed, offset of buffer within
273 * that page and then maps it for DMA by calling dma_map_page().
274 */
275static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
276 enum dma_data_direction dir)
277{
278 struct page *page;
279 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
280
281 if (virt_addr_valid(addr))
282 page = virt_to_page(addr);
283 else {
284 if (WARN_ON(size + offset > PAGE_SIZE))
285 return ~0;
286 page = vmalloc_to_page(addr);
287 }
288 return dma_map_page(dev, page, offset, size, dir);
289}
290
Sahitya Tummala8e3409b2017-09-19 13:42:38 +0530291#ifdef CONFIG_QCOM_BUS_SCALING
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +0530292static int msm_nand_bus_set_vote(struct msm_nand_info *info,
293 unsigned int vote)
294{
295 int ret = 0;
296
297 ret = msm_bus_scale_client_update_request(info->clk_data.client_handle,
298 vote);
299 if (ret)
300 pr_err("msm_bus_scale_client_update_request() failed, bus_client_handle=0x%x, vote=%d, err=%d\n",
301 info->clk_data.client_handle, vote, ret);
302 return ret;
303}
304
305static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info,
306 bool vote)
307{
308 int ret = 0;
309
Sahitya Tummalaa2de55d2017-06-30 16:02:59 +0530310 if (!info->clk_data.rpmh_clk) {
311 if (IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
312 ret = -EINVAL;
313 goto out;
314 }
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +0530315 }
316 if (atomic_read(&info->clk_data.clk_enabled) == vote)
317 goto out;
318 if (!atomic_read(&info->clk_data.clk_enabled) && vote) {
319 ret = msm_nand_bus_set_vote(info, 1);
320 if (ret) {
321 pr_err("Failed to vote for bus with %d\n", ret);
322 goto out;
323 }
Sahitya Tummalaa2de55d2017-06-30 16:02:59 +0530324 if (!info->clk_data.rpmh_clk) {
325 ret = clk_prepare_enable(info->clk_data.qpic_clk);
326 if (ret) {
327 pr_err("Failed to enable the bus-clock with error %d\n",
328 ret);
329 msm_nand_bus_set_vote(info, 0);
330 goto out;
331 }
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +0530332 }
333 } else if (atomic_read(&info->clk_data.clk_enabled) && !vote) {
Sahitya Tummalaa2de55d2017-06-30 16:02:59 +0530334 if (!info->clk_data.rpmh_clk)
335 clk_disable_unprepare(info->clk_data.qpic_clk);
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +0530336 msm_nand_bus_set_vote(info, 0);
337 }
338 atomic_set(&info->clk_data.clk_enabled, vote);
339out:
340 return ret;
341}
342#else
343static int msm_nand_setup_clocks_and_bus_bw(struct msm_nand_info *info,
344 bool vote)
345{
346 return 0;
347}
348#endif
349
350#ifdef CONFIG_PM
351static int msm_nand_runtime_suspend(struct device *dev)
352{
353 int ret = 0;
354 struct msm_nand_info *info = dev_get_drvdata(dev);
355
356 ret = msm_nand_setup_clocks_and_bus_bw(info, false);
357
358 return ret;
359}
360
361static int msm_nand_runtime_resume(struct device *dev)
362{
363 int ret = 0;
364 struct msm_nand_info *info = dev_get_drvdata(dev);
365
366 ret = msm_nand_setup_clocks_and_bus_bw(info, true);
367
368 return ret;
369}
370
371static void msm_nand_print_rpm_info(struct device *dev)
372{
373 pr_err("RPM: runtime_status=%d, usage_count=%d, is_suspended=%d, disable_depth=%d, runtime_error=%d, request_pending=%d, request=%d\n",
374 dev->power.runtime_status, atomic_read(&dev->power.usage_count),
375 dev->power.is_suspended, dev->power.disable_depth,
376 dev->power.runtime_error, dev->power.request_pending,
377 dev->power.request);
378}
379#else
380static int msm_nand_runtime_suspend(struct device *dev)
381{
382 return 0;
383}
384
385static int msm_nand_runtime_resume(struct device *dev)
386{
387 return 0;
388}
389
390static void msm_nand_print_rpm_info(struct device *dev)
391{
392}
393#endif
394
395#ifdef CONFIG_PM
396static int msm_nand_suspend(struct device *dev)
397{
398 int ret = 0;
399
400 if (!pm_runtime_suspended(dev))
401 ret = msm_nand_runtime_suspend(dev);
402
403 return ret;
404}
405
406static int msm_nand_resume(struct device *dev)
407{
408 int ret = 0;
409
410 if (!pm_runtime_suspended(dev))
411 ret = msm_nand_runtime_resume(dev);
412
413 return ret;
414}
415#else
416static int msm_nand_suspend(struct device *dev)
417{
418 return 0;
419}
420
421static int msm_nand_resume(struct device *dev)
422{
423 return 0;
424}
425#endif
426
427#ifdef CONFIG_PM
428static int msm_nand_get_device(struct device *dev)
429{
430 int ret = 0;
431
432 ret = pm_runtime_get_sync(dev);
433 if (ret < 0) {
434 pr_err("Failed to resume with %d\n", ret);
435 msm_nand_print_rpm_info(dev);
436 } else { /* Reset to success */
437 ret = 0;
438 }
439 return ret;
440}
441
442static int msm_nand_put_device(struct device *dev)
443{
444 int ret = 0;
445
446 pm_runtime_mark_last_busy(dev);
447 ret = pm_runtime_put_autosuspend(dev);
448 if (ret < 0) {
449 pr_err("Failed to suspend with %d\n", ret);
450 msm_nand_print_rpm_info(dev);
451 } else { /* Reset to success */
452 ret = 0;
453 }
454 return ret;
455}
456#else
457static int msm_nand_get_device(struct device *dev)
458{
459 return 0;
460}
461
462static int msm_nand_put_device(struct device *dev)
463{
464 return 0;
465}
466#endif
467
Sahitya Tummala8e3409b2017-09-19 13:42:38 +0530468#ifdef CONFIG_QCOM_BUS_SCALING
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +0530469static int msm_nand_bus_register(struct platform_device *pdev,
470 struct msm_nand_info *info)
471{
472 int ret = 0;
473
474 info->clk_data.use_cases = msm_bus_cl_get_pdata(pdev);
475 if (!info->clk_data.use_cases) {
476 ret = -EINVAL;
477 pr_err("msm_bus_cl_get_pdata failed\n");
478 goto out;
479 }
480 info->clk_data.client_handle =
481 msm_bus_scale_register_client(info->clk_data.use_cases);
482 if (!info->clk_data.client_handle) {
483 ret = -EINVAL;
484 pr_err("msm_bus_scale_register_client failed\n");
485 }
486out:
487 return ret;
488}
489
490static void msm_nand_bus_unregister(struct msm_nand_info *info)
491{
492 if (info->clk_data.client_handle)
493 msm_bus_scale_unregister_client(info->clk_data.client_handle);
494}
495#else
496static int msm_nand_bus_register(struct platform_device *pdev,
497 struct msm_nand_info *info)
498{
Sahitya Tummala8e3409b2017-09-19 13:42:38 +0530499 pr_info("couldn't register due to missing config option\n");
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +0530500 return 0;
501}
502
503static void msm_nand_bus_unregister(struct msm_nand_info *info)
504{
505}
506#endif
507
508/*
509 * Wrapper function to prepare a single SPS command element with the data
510 * that is passed to this function.
511 */
512static inline void msm_nand_prep_ce(struct sps_command_element *ce,
513 uint32_t addr, uint32_t command, uint32_t data)
514{
515 ce->addr = addr;
516 ce->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
517 (uint32_t) SPS_READ_COMMAND;
518 ce->data = data;
519 ce->mask = 0xFFFFFFFF;
520}
521
522static int msm_nand_sps_get_iovec(struct sps_pipe *pipe, uint32_t indx,
523 unsigned int cnt, struct sps_iovec *iovec)
524{
525 int ret = 0;
526
527 do {
528 do {
529 ret = sps_get_iovec((pipe), (iovec));
530 } while (((iovec)->addr == 0x0) && ((iovec)->size == 0x0));
531 if (ret)
532 return ret;
533 } while (--(cnt));
534 return ret;
535}
536
537/*
538 * Wrapper function to prepare a single command descriptor with a single
539 * SPS command element with the data that is passed to this function.
540 *
541 * Since for any command element it is a must to have this flag
542 * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
543 * command element that is passed and thus, the caller need not explicilty
544 * pass this flag. The other flags must be passed based on the need. If a
545 * command element doesn't have any other flag, then 0 can be passed to flags.
546 */
547static inline void msm_nand_prep_single_desc(struct msm_nand_sps_cmd *sps_cmd,
548 uint32_t addr, uint32_t command,
549 uint32_t data, uint32_t flags)
550{
551 msm_nand_prep_ce(&sps_cmd->ce, addr, command, data);
552 sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
553}
554/*
555 * Read a single NANDc register as mentioned by its parameter addr. The return
556 * value indicates whether read is successful or not. The register value read
557 * is stored in val.
558 */
559static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
560 uint32_t *val)
561{
562 int ret = 0, submitted_num_desc = 1;
563 struct msm_nand_sps_cmd *cmd;
564 struct msm_nand_chip *chip = &info->nand_chip;
565 struct {
566 struct msm_nand_sps_cmd cmd;
567 uint32_t data;
568 } *dma_buffer;
569 struct sps_iovec iovec_temp;
570
571 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
572 chip, sizeof(*dma_buffer))));
573 cmd = &dma_buffer->cmd;
574 msm_nand_prep_single_desc(cmd, addr, READ, msm_virt_to_dma(chip,
575 &dma_buffer->data), SPS_IOVEC_FLAG_INT);
576
577 mutex_lock(&info->lock);
578 ret = msm_nand_get_device(chip->dev);
579 if (ret)
580 goto out;
581 ret = sps_transfer_one(info->sps.cmd_pipe.handle,
582 msm_virt_to_dma(chip, &cmd->ce),
583 sizeof(struct sps_command_element), NULL, cmd->flags);
584 if (ret) {
585 pr_err("failed to submit command %x ret %d\n", addr, ret);
586 msm_nand_put_device(chip->dev);
587 goto out;
588 }
589 ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
590 info->sps.cmd_pipe.index, submitted_num_desc,
591 &iovec_temp);
592 if (ret) {
593 pr_err("Failed to get iovec for pipe %d: (ret%d)\n",
594 (info->sps.cmd_pipe.index), ret);
595 goto out;
596 }
597 ret = msm_nand_put_device(chip->dev);
598 if (ret)
599 goto out;
600 *val = dma_buffer->data;
601out:
602 mutex_unlock(&info->lock);
603 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
604 return ret;
605}
606
607/*
608 * Read the Flash ID from the Nand Flash Device. The return value < 0
609 * indicates failure. When successful, the Flash ID is stored in parameter
610 * read_id.
611 */
612#define READID_CMDS 5
613static int msm_nand_flash_read_id(struct msm_nand_info *info,
614 bool read_onfi_signature, uint32_t *read_id,
615 uint32_t *read_id2)
616{
617 int err = 0, i = 0;
618 struct msm_nand_sps_cmd *cmd;
619 struct sps_iovec *iovec;
620 struct sps_iovec iovec_temp;
621 struct msm_nand_chip *chip = &info->nand_chip;
622 /*
623 * The following 5 commands are required to read id -
624 * write commands - addr0, flash, exec
625 * read_commands - read_id, read_id2
626 */
627 struct {
628 struct sps_transfer xfer;
629 struct sps_iovec cmd_iovec[READID_CMDS];
630 struct msm_nand_sps_cmd cmd[READID_CMDS];
631 uint32_t data[READID_CMDS];
632 } *dma_buffer;
633
634 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
635 (chip, sizeof(*dma_buffer))));
636 if (read_onfi_signature)
637 dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
638 else
639 dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
640
641 dma_buffer->data[1] = EXTENDED_FETCH_ID | MSM_NAND_CMD_FETCH_ID;
642 dma_buffer->data[2] = 1;
643 dma_buffer->data[3] = 0xeeeeeeee;
644 dma_buffer->data[4] = 0xeeeeeeee;
645
646 cmd = dma_buffer->cmd;
647 msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR0(info), WRITE,
648 dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
649 cmd++;
650
651 msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
652 dma_buffer->data[1], 0);
653 cmd++;
654
655 msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
656 dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
657 cmd++;
658
659 msm_nand_prep_single_desc(cmd, MSM_NAND_READ_ID(info), READ,
660 msm_virt_to_dma(chip, &dma_buffer->data[3]), 0);
661 cmd++;
662
663 msm_nand_prep_single_desc(cmd, MSM_NAND_READ_ID2(info), READ,
664 msm_virt_to_dma(chip, &dma_buffer->data[4]),
665 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
666 cmd++;
667
668 WARN_ON(cmd - dma_buffer->cmd > READID_CMDS);
669 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
670 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
671 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
672 &dma_buffer->cmd_iovec);
673 iovec = dma_buffer->xfer.iovec;
674
675 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
676 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
677 iovec->size = sizeof(struct sps_command_element);
678 iovec->flags = dma_buffer->cmd[i].flags;
679 iovec++;
680 }
681
682 mutex_lock(&info->lock);
683 err = msm_nand_get_device(chip->dev);
684 if (err)
685 goto out;
686 err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
687 if (err) {
688 pr_err("Failed to submit commands %d\n", err);
689 msm_nand_put_device(chip->dev);
690 goto out;
691 }
692 err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
693 info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
694 &iovec_temp);
695
696 if (err) {
697 pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
698 (info->sps.cmd_pipe.index), err);
699 goto out;
700 }
701 pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
702 if (!read_onfi_signature)
703 pr_debug("nandid: %x maker %02x device %02x\n",
704 dma_buffer->data[3], dma_buffer->data[3] & 0xff,
705 (dma_buffer->data[3] >> 8) & 0xff);
706 *read_id = dma_buffer->data[3];
707 if (read_id2) {
708 pr_debug("Extended Read ID register value 0x%x\n",
709 dma_buffer->data[4]);
710 *read_id2 = dma_buffer->data[4];
711 }
712 err = msm_nand_put_device(chip->dev);
713out:
714 mutex_unlock(&info->lock);
715 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
716 return err;
717}
718
719/*
720 * Contains data for common configuration registers that must be programmed
721 * for every NANDc operation.
722 */
723struct msm_nand_common_cfgs {
724 uint32_t cmd;
725 uint32_t addr0;
726 uint32_t addr1;
727 uint32_t cfg0;
728 uint32_t cfg1;
729};
730
731/*
732 * Function to prepare SPS command elements to write into NANDc configuration
733 * registers as per the data defined in struct msm_nand_common_cfgs. This is
734 * required for the following NANDc operations - Erase, Bad Block checking
735 * and for reading ONFI parameter page.
736 */
737static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
738 struct msm_nand_common_cfgs data,
739 struct msm_nand_sps_cmd **curr_cmd)
740{
741 struct msm_nand_sps_cmd *cmd;
742
743 cmd = *curr_cmd;
744 msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
745 data.cmd, SPS_IOVEC_FLAG_LOCK);
746 cmd++;
747
748 msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR0(info), WRITE,
749 data.addr0, 0);
750 cmd++;
751
752 msm_nand_prep_single_desc(cmd, MSM_NAND_ADDR1(info), WRITE,
753 data.addr1, 0);
754 cmd++;
755
756 msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
757 data.cfg0, 0);
758 cmd++;
759
760 msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
761 data.cfg1, 0);
762 cmd++;
763 *curr_cmd = cmd;
764}
765
766/*
767 * Function to check the CRC integrity check on ONFI parameter page read.
768 * For ONFI parameter page read, the controller ECC will be disabled. Hence,
769 * it is mandatory to manually compute CRC and check it against the value
770 * stored within ONFI page.
771 */
772static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
773{
774 int i;
775 uint16_t result;
776
777 for (i = 0; i < count; i++)
778 buffer[i] = bitrev8(buffer[i]);
779
780 result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
781
782 for (i = 0; i < count; i++)
783 buffer[i] = bitrev8(buffer[i]);
784
785 return result;
786}
787
788/*
789 * Structure that contains NANDc register data for commands required
790 * for reading ONFI parameter page.
791 */
792struct msm_nand_flash_onfi_data {
793 struct msm_nand_common_cfgs cfg;
794 uint32_t exec;
795 uint32_t ecc_bch_cfg;
796};
797
798struct version {
799 uint16_t nand_major;
800 uint16_t nand_minor;
801 uint16_t qpic_major;
802 uint16_t qpic_minor;
803};
804
805static int msm_nand_version_check(struct msm_nand_info *info,
806 struct version *nandc_version)
807{
808 uint32_t qpic_ver = 0, nand_ver = 0;
809 int err = 0;
810
811 /* Lookup the version to identify supported features */
812 err = msm_nand_flash_rd_reg(info, MSM_NAND_VERSION(info),
813 &nand_ver);
814 if (err) {
815 pr_err("Failed to read NAND_VERSION, err=%d\n", err);
816 goto out;
817 }
818 nandc_version->nand_major = (nand_ver & MSM_NAND_VERSION_MAJOR_MASK) >>
819 MSM_NAND_VERSION_MAJOR_SHIFT;
820 nandc_version->nand_minor = (nand_ver & MSM_NAND_VERSION_MINOR_MASK) >>
821 MSM_NAND_VERSION_MINOR_SHIFT;
822
823 err = msm_nand_flash_rd_reg(info, MSM_NAND_QPIC_VERSION(info),
824 &qpic_ver);
825 if (err) {
826 pr_err("Failed to read QPIC_VERSION, err=%d\n", err);
827 goto out;
828 }
829 nandc_version->qpic_major = (qpic_ver & MSM_NAND_VERSION_MAJOR_MASK) >>
830 MSM_NAND_VERSION_MAJOR_SHIFT;
831 nandc_version->qpic_minor = (qpic_ver & MSM_NAND_VERSION_MINOR_MASK) >>
832 MSM_NAND_VERSION_MINOR_SHIFT;
833 pr_info("nand_major:%d, nand_minor:%d, qpic_major:%d, qpic_minor:%d\n",
834 nandc_version->nand_major, nandc_version->nand_minor,
835 nandc_version->qpic_major, nandc_version->qpic_minor);
836out:
837 return err;
838}
839
840/*
841 * Function to identify whether the attached NAND flash device is
842 * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
843 * page to get the device parameters.
844 */
845#define ONFI_CMDS 9
846static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
847{
848 struct msm_nand_chip *chip = &info->nand_chip;
849 struct flash_identification *flash = &info->flash_dev;
850 uint32_t crc_chk_count = 0, page_address = 0;
851 int ret = 0, i = 0, submitted_num_desc = 1;
852
853 /* SPS parameters */
854 struct msm_nand_sps_cmd *cmd, *curr_cmd;
855 struct sps_iovec *iovec;
856 struct sps_iovec iovec_temp;
857 uint32_t rdata;
858
859 /* ONFI Identifier/Parameter Page parameters */
860 uint8_t *onfi_param_info_buf = NULL;
861 dma_addr_t dma_addr_param_info = 0;
862 struct onfi_param_page *onfi_param_page_ptr;
863 struct msm_nand_flash_onfi_data data;
864 uint32_t onfi_signature = 0;
865
866 /*
867 * The following 9 commands are required to get onfi parameters -
868 * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg,
869 * read_loc_0, exec, flash_status (read cmd).
870 */
871 struct {
872 struct sps_transfer xfer;
873 struct sps_iovec cmd_iovec[ONFI_CMDS];
874 struct msm_nand_sps_cmd cmd[ONFI_CMDS];
875 uint32_t flash_status;
876 } *dma_buffer;
877
878
879 /* Lookup the version to identify supported features */
880 struct version nandc_version = {0};
881
882 ret = msm_nand_version_check(info, &nandc_version);
883 if (!ret && !(nandc_version.nand_major == 1 &&
884 nandc_version.nand_minor >= 5 &&
885 nandc_version.qpic_major == 1 &&
886 nandc_version.qpic_minor >= 5)) {
887 ret = -EPERM;
888 goto out;
889 }
890 wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
891 msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
892 dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
893
894 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
895 (chip, sizeof(*dma_buffer))));
896
897 ret = msm_nand_flash_read_id(info, 1, &onfi_signature, NULL);
898 if (ret < 0) {
899 pr_err("Failed to read ONFI signature\n");
900 goto free_dma;
901 }
902 if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
903 pr_info("Found a non ONFI device\n");
904 ret = -EIO;
905 goto free_dma;
906 }
907
908 memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
909
910 /* Lookup the partition to which apps has access to */
911 for (i = 0; i < FLASH_PTABLE_MAX_PARTS_V4; i++) {
912 if (mtd_part[i].name && !strcmp("boot", mtd_part[i].name)) {
913 page_address = mtd_part[i].offset << 6;
914 break;
915 }
916 }
917 if (!page_address) {
918 pr_info("%s: no apps partition found in smem\n", __func__);
919 ret = -EPERM;
920 goto free_dma;
921 }
922 data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ONFI;
923 data.exec = 1;
924 data.cfg.addr0 = (page_address << 16) |
925 FLASH_READ_ONFI_PARAMETERS_ADDRESS;
926 data.cfg.addr1 = (page_address >> 16) & 0xFF;
927 data.cfg.cfg0 = MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
928 data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
929 data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
930 dma_buffer->flash_status = 0xeeeeeeee;
931
932 curr_cmd = cmd = dma_buffer->cmd;
933 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
934
935 cmd = curr_cmd;
936 msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
937 data.ecc_bch_cfg, 0);
938 cmd++;
939
940 rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
941 msm_nand_prep_single_desc(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
942 rdata, 0);
943 cmd++;
944
945 msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
946 data.exec, SPS_IOVEC_FLAG_NWD);
947 cmd++;
948
949 msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
950 msm_virt_to_dma(chip, &dma_buffer->flash_status),
951 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
952 cmd++;
953
954 WARN_ON(cmd - dma_buffer->cmd > ONFI_CMDS);
955 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
956 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
957 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
958 &dma_buffer->cmd_iovec);
959 iovec = dma_buffer->xfer.iovec;
960
961 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
962 iovec->addr = msm_virt_to_dma(chip,
963 &dma_buffer->cmd[i].ce);
964 iovec->size = sizeof(struct sps_command_element);
965 iovec->flags = dma_buffer->cmd[i].flags;
966 iovec++;
967 }
968 mutex_lock(&info->lock);
969 ret = msm_nand_get_device(chip->dev);
970 if (ret)
971 goto unlock_mutex;
972 /* Submit data descriptor */
973 ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
974 ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
975 if (ret) {
976 pr_err("Failed to submit data descriptors %d\n", ret);
977 goto put_dev;
978 }
979 /* Submit command descriptors */
980 ret = sps_transfer(info->sps.cmd_pipe.handle,
981 &dma_buffer->xfer);
982 if (ret) {
983 pr_err("Failed to submit commands %d\n", ret);
984 goto put_dev;
985 }
986
987 ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
988 info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
989 &iovec_temp);
990
991 if (ret) {
992 pr_err("Failed to get iovec for pipe %d: (ret:%d)\n",
993 (info->sps.cmd_pipe.index), ret);
994 goto put_dev;
995 }
996 ret = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
997 info->sps.data_prod.index, submitted_num_desc,
998 &iovec_temp);
999 if (ret) {
1000 pr_err("Failed to get iovec for pipe %d: (ret:%d)\n",
1001 (info->sps.data_prod.index), ret);
1002 goto put_dev;
1003 }
1004
1005 ret = msm_nand_put_device(chip->dev);
1006 mutex_unlock(&info->lock);
1007 if (ret)
1008 goto free_dma;
1009
1010 /* Check for flash status errors */
1011 if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
1012 pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
1013 ret = -EIO;
1014 goto free_dma;
1015 }
1016
1017 for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
1018 / ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
1019 onfi_param_page_ptr =
1020 (struct onfi_param_page *)
1021 (&(onfi_param_info_buf
1022 [ONFI_PARAM_PAGE_LENGTH *
1023 crc_chk_count]));
1024 if (msm_nand_flash_onfi_crc_check(
1025 (uint8_t *)onfi_param_page_ptr,
1026 ONFI_PARAM_PAGE_LENGTH - 2) ==
1027 onfi_param_page_ptr->integrity_crc) {
1028 break;
1029 }
1030 }
1031 if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
1032 / ONFI_PARAM_PAGE_LENGTH) {
1033 pr_err("CRC Check failed on param page\n");
1034 ret = -EIO;
1035 goto free_dma;
1036 }
1037 ret = msm_nand_flash_read_id(info, 0, &flash->flash_id, NULL);
1038 if (ret < 0) {
1039 pr_err("Failed to read flash ID\n");
1040 goto free_dma;
1041 }
1042 flash->widebus = onfi_param_page_ptr->features_supported & 0x01;
1043 flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
1044 flash->blksize = onfi_param_page_ptr->number_of_pages_per_block *
1045 flash->pagesize;
1046 flash->oobsize = onfi_param_page_ptr->number_of_spare_bytes_per_page;
1047 flash->density = onfi_param_page_ptr->number_of_blocks_per_logical_unit
1048 * flash->blksize;
1049 flash->ecc_correctability = onfi_param_page_ptr->
1050 number_of_bits_ecc_correctability;
1051
1052 pr_info("Found an ONFI compliant device %s\n",
1053 onfi_param_page_ptr->device_model);
1054 /*
1055 * Temporary hack for MT29F4G08ABC device.
1056 * Since the device is not properly adhering
1057 * to ONFi specification it is reporting
1058 * as 16 bit device though it is 8 bit device!!!
1059 */
1060 if (!strcmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC"))
1061 flash->widebus = 0;
1062 goto free_dma;
1063put_dev:
1064 msm_nand_put_device(chip->dev);
1065unlock_mutex:
1066 mutex_unlock(&info->lock);
1067free_dma:
1068 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1069 msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
1070 ONFI_PARAM_INFO_LENGTH);
1071out:
1072 return ret;
1073}
1074
1075/*
1076 * Structure that contains read/write parameters required for reading/writing
1077 * from/to a page.
1078 */
1079struct msm_nand_rw_params {
1080 uint32_t page;
1081 uint32_t page_count;
1082 uint32_t sectordatasize;
1083 uint32_t sectoroobsize;
1084 uint32_t cwperpage;
1085 uint32_t oob_len_cmd;
1086 uint32_t oob_len_data;
1087 uint32_t start_sector;
1088 uint32_t oob_col;
1089 dma_addr_t data_dma_addr;
1090 dma_addr_t oob_dma_addr;
1091 dma_addr_t ecc_dma_addr;
1092 dma_addr_t data_dma_addr_curr;
1093 dma_addr_t oob_dma_addr_curr;
1094 dma_addr_t ecc_dma_addr_curr;
1095 bool read;
1096};
1097
1098/*
1099 * Structure that contains NANDc register data required for reading/writing
1100 * from/to a page.
1101 */
1102struct msm_nand_rw_reg_data {
1103 uint32_t cmd;
1104 uint32_t addr0;
1105 uint32_t addr1;
1106 uint32_t cfg0;
1107 uint32_t cfg1;
1108 uint32_t ecc_bch_cfg;
1109 uint32_t exec;
1110 uint32_t ecc_cfg;
1111 uint32_t clrfstatus;
1112 uint32_t clrrstatus;
1113};
1114
1115/*
1116 * Function that validates page read/write MTD parameters received from upper
1117 * layers such as MTD/YAFFS2 and returns error for any unsupported operations
1118 * by the driver. In case of success, it also maps the data and oob buffer
1119 * received for DMA.
1120 */
1121static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
1122 loff_t offset,
1123 struct mtd_oob_ops *ops,
1124 struct msm_nand_rw_params *args)
1125{
1126 struct msm_nand_info *info = mtd->priv;
1127 struct msm_nand_chip *chip = &info->nand_chip;
1128 int err = 0;
1129
1130 pr_debug("========================================================\n");
1131 pr_debug("offset 0x%llx mode %d\ndatbuf 0x%pK datlen 0x%x\n",
1132 offset, ops->mode, ops->datbuf, ops->len);
1133 pr_debug("oobbuf 0x%pK ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
1134
1135 if (ops->mode == MTD_OPS_PLACE_OOB) {
1136 pr_err("MTD_OPS_PLACE_OOB is not supported\n");
1137 err = -EINVAL;
1138 goto out;
1139 }
1140
1141 if (mtd->writesize == PAGE_SIZE_2K)
1142 args->page = offset >> 11;
1143
1144 if (mtd->writesize == PAGE_SIZE_4K)
1145 args->page = offset >> 12;
1146
1147 args->oob_len_cmd = ops->ooblen;
1148 args->oob_len_data = ops->ooblen;
1149 args->cwperpage = (mtd->writesize >> 9);
1150 args->read = (read ? true : false);
1151
1152 if (offset & (mtd->writesize - 1)) {
1153 pr_err("unsupported offset 0x%llx\n", offset);
1154 err = -EINVAL;
1155 goto out;
1156 }
1157
1158 if (!read && !ops->datbuf) {
1159 pr_err("No data buffer provided for write!!\n");
1160 err = -EINVAL;
1161 goto out;
1162 }
1163
1164 if (ops->mode == MTD_OPS_RAW) {
1165 if (!ops->datbuf) {
1166 pr_err("No data buffer provided for RAW mode\n");
1167 err = -EINVAL;
1168 goto out;
1169 } else if ((ops->len % (mtd->writesize +
1170 mtd->oobsize)) != 0) {
1171 pr_err("unsupported data len %d for RAW mode\n",
1172 ops->len);
1173 err = -EINVAL;
1174 goto out;
1175 }
1176 args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
1177
1178 } else if (ops->mode == MTD_OPS_AUTO_OOB) {
1179 if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
1180 /* when ops->datbuf is NULL, ops->len can be ooblen */
1181 pr_err("unsupported data len %d for AUTO mode\n",
1182 ops->len);
1183 err = -EINVAL;
1184 goto out;
1185 }
1186 if (read && ops->oobbuf && !ops->datbuf) {
1187 args->start_sector = args->cwperpage - 1;
1188 args->page_count = ops->ooblen / mtd->oobavail;
1189 if ((args->page_count == 0) && (ops->ooblen))
1190 args->page_count = 1;
1191 } else if (ops->datbuf) {
1192 args->page_count = ops->len / mtd->writesize;
1193 }
1194 }
1195
1196 if (ops->datbuf) {
1197 if (read)
1198 memset(ops->datbuf, 0xFF, ops->len);
1199 args->data_dma_addr_curr = args->data_dma_addr =
1200 msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
1201 (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
1202 if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
1203 pr_err("dma mapping failed for 0x%pK\n", ops->datbuf);
1204 err = -EIO;
1205 goto out;
1206 }
1207 }
1208 if (ops->oobbuf) {
1209 if (read)
1210 memset(ops->oobbuf, 0xFF, ops->ooblen);
1211 args->oob_dma_addr_curr = args->oob_dma_addr =
1212 msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
1213 (read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
1214 if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
1215 pr_err("dma mapping failed for 0x%pK\n", ops->oobbuf);
1216 err = -EIO;
1217 goto dma_map_oobbuf_failed;
1218 }
1219 }
1220 goto out;
1221dma_map_oobbuf_failed:
1222 if (ops->datbuf)
1223 dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
1224 (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
1225out:
1226 return err;
1227}
1228
1229/*
1230 * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
1231 * required for page read/write.
1232 */
1233static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
1234 struct mtd_oob_ops *ops,
1235 struct msm_nand_rw_params *args,
1236 struct msm_nand_rw_reg_data *data)
1237{
1238 if (args->read) {
1239 if (ops->mode != MTD_OPS_RAW) {
1240 data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
1241 data->cfg0 =
1242 (chip->cfg0 & ~(7U << CW_PER_PAGE)) |
1243 (((args->cwperpage-1) - args->start_sector)
1244 << CW_PER_PAGE);
1245 data->cfg1 = chip->cfg1;
1246 data->ecc_bch_cfg = chip->ecc_bch_cfg;
1247 } else {
1248 data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
1249 data->cfg0 =
1250 (chip->cfg0_raw & ~(7U << CW_PER_PAGE)) |
1251 (((args->cwperpage-1) - args->start_sector)
1252 << CW_PER_PAGE);
1253 data->cfg1 = chip->cfg1_raw;
1254 data->ecc_bch_cfg = chip->ecc_cfg_raw;
1255 }
1256
1257 } else {
1258 if (ops->mode != MTD_OPS_RAW) {
1259 data->cmd = MSM_NAND_CMD_PRG_PAGE;
1260 data->cfg0 = chip->cfg0;
1261 data->cfg1 = chip->cfg1;
1262 data->ecc_bch_cfg = chip->ecc_bch_cfg;
1263 } else {
1264 data->cmd = MSM_NAND_CMD_PRG_PAGE_ALL;
1265 data->cfg0 = chip->cfg0_raw;
1266 data->cfg1 = chip->cfg1_raw;
1267 data->ecc_bch_cfg = chip->ecc_cfg_raw;
1268 }
1269 data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
1270 data->clrrstatus = MSM_NAND_RESET_READ_STS;
1271 }
1272 data->exec = 1;
1273 data->ecc_cfg = chip->ecc_buf_cfg;
1274}
1275
1276/*
1277 * Function to prepare series of SPS command descriptors required for a page
1278 * read/write operation.
1279 */
1280static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
1281 struct msm_nand_rw_params *args,
1282 struct msm_nand_rw_reg_data *data,
1283 struct msm_nand_info *info,
1284 uint32_t curr_cw,
1285 struct msm_nand_rw_cmd_desc *cmd_list,
1286 uint32_t *cw_desc_cnt,
1287 uint32_t ecc_parity_bytes)
1288{
1289 struct msm_nand_chip *chip = &info->nand_chip;
1290 uint32_t rdata;
1291 /* read_location register parameters */
1292 uint32_t offset, size, last_read;
1293 struct sps_command_element *curr_ce, *start_ce;
1294 uint32_t *flags_ptr, *num_ce_ptr;
1295
1296 if (curr_cw == args->start_sector) {
1297 curr_ce = start_ce = &cmd_list->setup_desc.ce[0];
1298 num_ce_ptr = &cmd_list->setup_desc.num_ce;
1299 flags_ptr = &cmd_list->setup_desc.flags;
1300 *flags_ptr = CMD_LCK;
1301 cmd_list->count = 1;
1302 msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_CMD(info), WRITE,
1303 data->cmd);
1304 curr_ce++;
1305
1306 msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR0(info), WRITE,
1307 data->addr0);
1308 curr_ce++;
1309
1310 msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR1(info), WRITE,
1311 data->addr1);
1312 curr_ce++;
1313
1314 msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG0(info), WRITE,
1315 data->cfg0);
1316 curr_ce++;
1317
1318 msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG1(info), WRITE,
1319 data->cfg1);
1320 curr_ce++;
1321
1322 msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
1323 data->ecc_bch_cfg);
1324 curr_ce++;
1325
1326 msm_nand_prep_ce(curr_ce, MSM_NAND_EBI2_ECC_BUF_CFG(info),
1327 WRITE, data->ecc_cfg);
1328 curr_ce++;
1329
1330 if (!args->read) {
1331 msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
1332 WRITE, data->clrfstatus);
1333 curr_ce++;
1334 goto sub_exec_cmd;
1335 } else {
1336 msm_nand_prep_ce(curr_ce,
1337 MSM_NAND_ERASED_CW_DETECT_CFG(info),
1338 WRITE, CLR_ERASED_PAGE_DET);
1339 curr_ce++;
1340 msm_nand_prep_ce(curr_ce,
1341 MSM_NAND_ERASED_CW_DETECT_CFG(info),
1342 WRITE, SET_ERASED_PAGE_DET);
1343 curr_ce++;
1344 }
1345 } else {
1346 curr_ce = start_ce = &cmd_list->cw_desc[*cw_desc_cnt].ce[0];
1347 num_ce_ptr = &cmd_list->cw_desc[*cw_desc_cnt].num_ce;
1348 flags_ptr = &cmd_list->cw_desc[*cw_desc_cnt].flags;
1349 *cw_desc_cnt += 1;
1350 *flags_ptr = CMD;
1351 cmd_list->count++;
1352 }
1353 if (!args->read)
1354 goto sub_exec_cmd;
1355
1356 if (ops->mode == MTD_OPS_RAW) {
1357 if (ecc_parity_bytes) {
1358 rdata = (BYTES_517 << 0) | (ecc_parity_bytes << 16)
1359 | (1 << 31);
1360 msm_nand_prep_ce(curr_ce,
1361 MSM_NAND_READ_LOCATION_0(info),
1362 WRITE, rdata);
1363 curr_ce++;
1364 } else {
1365 rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
1366 msm_nand_prep_ce(curr_ce,
1367 MSM_NAND_READ_LOCATION_0(info),
1368 WRITE, rdata);
1369 curr_ce++;
1370 }
1371 }
1372 if (ops->mode == MTD_OPS_AUTO_OOB) {
1373 if (ops->datbuf) {
1374 offset = 0;
1375 size = (curr_cw < (args->cwperpage - 1)) ? 516 :
1376 (512 - ((args->cwperpage - 1) << 2));
1377 last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
1378 (ops->oobbuf ? 0 : 1);
1379 rdata = (offset << 0) | (size << 16) |
1380 (last_read << 31);
1381
1382 msm_nand_prep_ce(curr_ce,
1383 MSM_NAND_READ_LOCATION_0(info),
1384 WRITE,
1385 rdata);
1386 curr_ce++;
1387 }
1388 if (curr_cw == (args->cwperpage - 1) && ops->oobbuf) {
1389 offset = 512 - ((args->cwperpage - 1) << 2);
1390 size = (args->cwperpage) << 2;
1391 if (size > args->oob_len_cmd)
1392 size = args->oob_len_cmd;
1393 args->oob_len_cmd -= size;
1394 last_read = 1;
1395 rdata = (offset << 0) | (size << 16) |
1396 (last_read << 31);
1397
1398 if (!ops->datbuf)
1399 msm_nand_prep_ce(curr_ce,
1400 MSM_NAND_READ_LOCATION_0(info),
1401 WRITE, rdata);
1402 else
1403 msm_nand_prep_ce(curr_ce,
1404 MSM_NAND_READ_LOCATION_1(info),
1405 WRITE, rdata);
1406 curr_ce++;
1407 }
1408 }
1409sub_exec_cmd:
1410 *flags_ptr |= NWD;
1411 msm_nand_prep_ce(curr_ce, MSM_NAND_EXEC_CMD(info), WRITE, data->exec);
1412 curr_ce++;
1413
1414 *num_ce_ptr = curr_ce - start_ce;
1415}
1416
1417/*
1418 * Function to prepare and submit SPS data descriptors required for a page
1419 * read/write operation.
1420 */
1421static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
1422 struct msm_nand_rw_params *args,
1423 struct msm_nand_info *info,
1424 uint32_t curr_cw,
1425 uint32_t ecc_parity_bytes)
1426{
1427 struct msm_nand_chip *chip = &info->nand_chip;
1428 struct sps_pipe *data_pipe_handle;
1429 uint32_t sectordatasize, sectoroobsize;
1430 uint32_t sps_flags = 0;
1431 int err = 0;
1432
1433 if (args->read)
1434 data_pipe_handle = info->sps.data_prod.handle;
1435 else
1436 data_pipe_handle = info->sps.data_cons.handle;
1437
1438 if (ops->mode == MTD_OPS_RAW) {
1439 if (ecc_parity_bytes && args->read) {
1440 if (curr_cw == (args->cwperpage - 1))
1441 sps_flags |= SPS_IOVEC_FLAG_INT;
1442
1443 /* read only ecc bytes */
1444 err = sps_transfer_one(data_pipe_handle,
1445 args->ecc_dma_addr_curr,
1446 ecc_parity_bytes, NULL,
1447 sps_flags);
1448 if (err)
1449 goto out;
1450 args->ecc_dma_addr_curr += ecc_parity_bytes;
1451 } else {
1452 sectordatasize = chip->cw_size;
1453 if (!args->read)
1454 sps_flags = SPS_IOVEC_FLAG_EOT;
1455 if (curr_cw == (args->cwperpage - 1))
1456 sps_flags |= SPS_IOVEC_FLAG_INT;
1457
1458 err = sps_transfer_one(data_pipe_handle,
1459 args->data_dma_addr_curr,
1460 sectordatasize, NULL,
1461 sps_flags);
1462 if (err)
1463 goto out;
1464 args->data_dma_addr_curr += sectordatasize;
1465 }
1466 } else if (ops->mode == MTD_OPS_AUTO_OOB) {
1467 if (ops->datbuf) {
1468 sectordatasize = (curr_cw < (args->cwperpage - 1))
1469 ? 516 : (512 - ((args->cwperpage - 1) << 2));
1470
1471 if (!args->read) {
1472 sps_flags = SPS_IOVEC_FLAG_EOT;
1473 if (curr_cw == (args->cwperpage - 1) &&
1474 ops->oobbuf)
1475 sps_flags = 0;
1476 }
1477 if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
1478 sps_flags |= SPS_IOVEC_FLAG_INT;
1479
1480 err = sps_transfer_one(data_pipe_handle,
1481 args->data_dma_addr_curr,
1482 sectordatasize, NULL,
1483 sps_flags);
1484 if (err)
1485 goto out;
1486 args->data_dma_addr_curr += sectordatasize;
1487 }
1488
1489 if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
1490 sectoroobsize = args->cwperpage << 2;
1491 if (sectoroobsize > args->oob_len_data)
1492 sectoroobsize = args->oob_len_data;
1493
1494 if (!args->read)
1495 sps_flags |= SPS_IOVEC_FLAG_EOT;
1496 sps_flags |= SPS_IOVEC_FLAG_INT;
1497 err = sps_transfer_one(data_pipe_handle,
1498 args->oob_dma_addr_curr,
1499 sectoroobsize, NULL,
1500 sps_flags);
1501 if (err)
1502 goto out;
1503 args->oob_dma_addr_curr += sectoroobsize;
1504 args->oob_len_data -= sectoroobsize;
1505 }
1506 }
1507out:
1508 return err;
1509}
1510
1511/*
1512 * Read ECC bytes and check whether page is erased or not.
1513 *
1514 * The NAND devices manufactured with newer process node technology are
1515 * susceptible to bit-flips. These bit-flips are easily fixable with the
1516 * ECC engine and ECC information stored on the NAND device. This device
1517 * specific information is found in the data sheet for the NAND device
1518 * and is usually specified as a "number of bit-flips expected per code-
1519 * word". For example, "a single bit-flip per codeword". Also this means
1520 * that the number of ECC errors don't increase over period of time as in
1521 * the past and can't be used to predict a "bad-block about to happen"
1522 * situation anymore.
1523 *
1524 * So what this means to erased pages:
1525 * Since ECC data for an erased page is all 0xFF's, the ECC engine would
1526 * not be able to correct any bit-flips that occur in these newer parts.
1527 * If the NAND controller is unable to identify the erased page due to
1528 * the bit-flips, then there would be "uncorrectable ECC errors" detected
1529 * and would get reported to file system layer (YAFFS2/UBIFS etc) and would
1530 * result in a good block being marked as a bad block and also lead to
1531 * error scenarios.
1532
1533 * So to handle this, the following will be done by software until newer
1534 * NAND controller hardware is avialable that can detected erased pages
1535 * with bit-flips successfully.
1536 *
1537 * 1. msm_nand_read_oob() calls this function when "uncorrectable ECC
1538 * errors" occur.
1539 * 2. This function then performs a raw read of the page.
1540 * 3. This read is done to extract ECC bytes and not data from that page.
1541 * 4. For each codeword’s ECC data, the following is done
1542 * a. Count number of zero bits
1543 * b. If that count is greater than <BIT-FLIPS-EXPECTED>, then it is
1544 * not an erased page.
1545 * c. Else repeat for next codeword’s ECC data
1546 * d. If all codewords have less than <BIT-FLIPS-EXPECTED> bits of
1547 * zeros, then it’s considered an erased page.
1548 *
1549 * Since "uncorrectable ECC errors" do not occur except for either an
1550 * erased page or in the case of an actual errror, this solution would
1551 * work.
1552 *
1553 */
1554static int msm_nand_is_erased_page(struct mtd_info *mtd, loff_t from,
1555 struct mtd_oob_ops *ops,
1556 struct msm_nand_rw_params *rw_params,
1557 bool *erased_page)
1558{
1559 struct msm_nand_info *info = mtd->priv;
1560 struct msm_nand_chip *chip = &info->nand_chip;
1561 uint32_t cwperpage = (mtd->writesize >> 9);
1562 int err, submitted_num_desc = 0;
1563 uint32_t n = 0, num_zero_bits = 0, total_ecc_byte_cnt;
1564 struct msm_nand_rw_reg_data data;
1565 struct sps_iovec *iovec;
1566 struct sps_iovec iovec_temp;
1567 struct mtd_oob_ops raw_ops;
1568
1569 /*
1570 * The following 6 commands will be sent only once for the first
1571 * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
1572 * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
1573 * be sent for every CW - flash, read_location_0, read_location_1,
1574 * exec, flash_status and buffer_status.
1575 */
1576 struct msm_nand_rw_cmd_desc *cmd_list = NULL;
1577 uint32_t cw_desc_cnt = 0;
1578 struct {
1579 struct sps_transfer xfer;
1580 struct sps_iovec cmd_iovec[MAX_DESC];
1581 struct {
1582 uint32_t count;
1583 struct msm_nand_cmd_setup_desc setup_desc;
1584 struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC - 1];
1585 } cmd_list;
1586 struct {
1587 uint32_t flash_status;
1588 uint32_t buffer_status;
1589 uint32_t erased_cw_status;
1590 } result[MAX_CW_PER_PAGE];
1591 } *dma_buffer;
1592 uint8_t *ecc;
1593
1594 pr_debug("========================================================\n");
1595 total_ecc_byte_cnt = (chip->ecc_parity_bytes * cwperpage);
1596 memcpy(&raw_ops, ops, sizeof(struct mtd_oob_ops));
1597 raw_ops.mode = MTD_OPS_RAW;
1598 ecc = kzalloc(total_ecc_byte_cnt, GFP_KERNEL);
1599
1600 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
1601 chip, sizeof(*dma_buffer))));
1602
1603 memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
1604 msm_nand_update_rw_reg_data(chip, &raw_ops, rw_params, &data);
1605 cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
1606
1607 /* map the ecc for dma operations */
1608 rw_params->ecc_dma_addr_curr = rw_params->ecc_dma_addr =
1609 dma_map_single(chip->dev, ecc, total_ecc_byte_cnt,
1610 DMA_FROM_DEVICE);
1611
1612 data.addr0 = (rw_params->page << 16) | rw_params->oob_col;
1613 data.addr1 = (rw_params->page >> 16) & 0xff;
1614 for (n = rw_params->start_sector; n < cwperpage; n++) {
1615 struct sps_command_element *curr_ce, *start_ce;
1616
1617 dma_buffer->result[n].flash_status = 0xeeeeeeee;
1618 dma_buffer->result[n].buffer_status = 0xeeeeeeee;
1619 dma_buffer->result[n].erased_cw_status = 0xeeeeee00;
1620
1621 msm_nand_prep_rw_cmd_desc(&raw_ops, rw_params, &data, info,
1622 n, cmd_list, &cw_desc_cnt,
1623 chip->ecc_parity_bytes);
1624
1625 start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
1626 curr_ce = start_ce;
1627 cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
1628 if (n == (cwperpage - 1))
1629 cmd_list->cw_desc[cw_desc_cnt].flags |=
1630 INT_UNLCK;
1631 cmd_list->count++;
1632
1633 msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
1634 READ, msm_virt_to_dma(chip,
1635 &dma_buffer->result[n].flash_status));
1636 curr_ce++;
1637
1638 msm_nand_prep_ce(curr_ce, MSM_NAND_BUFFER_STATUS(info),
1639 READ, msm_virt_to_dma(chip,
1640 &dma_buffer->result[n].buffer_status));
1641 curr_ce++;
1642
1643 msm_nand_prep_ce(curr_ce,
1644 MSM_NAND_ERASED_CW_DETECT_STATUS(info),
1645 READ, msm_virt_to_dma(chip,
1646 &dma_buffer->result[n].erased_cw_status));
1647 curr_ce++;
1648 cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce -
1649 start_ce;
1650 }
1651
1652 dma_buffer->xfer.iovec_count = cmd_list->count;
1653 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1654 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1655 &dma_buffer->cmd_iovec);
1656 iovec = dma_buffer->xfer.iovec;
1657
1658 iovec->addr = msm_virt_to_dma(chip,
1659 &cmd_list->setup_desc.ce[0]);
1660 iovec->size = sizeof(struct sps_command_element) *
1661 cmd_list->setup_desc.num_ce;
1662 iovec->flags = cmd_list->setup_desc.flags;
1663 iovec++;
1664 for (n = 0; n < (cmd_list->count - 1); n++) {
1665 iovec->addr = msm_virt_to_dma(chip,
1666 &cmd_list->cw_desc[n].ce[0]);
1667 iovec->size = sizeof(struct sps_command_element) *
1668 cmd_list->cw_desc[n].num_ce;
1669 iovec->flags = cmd_list->cw_desc[n].flags;
1670 iovec++;
1671 }
1672 mutex_lock(&info->lock);
1673 err = msm_nand_get_device(chip->dev);
1674 if (err)
1675 goto unlock_mutex;
1676 /* Submit data descriptors */
1677 for (n = rw_params->start_sector; n < cwperpage; n++) {
1678 err = msm_nand_submit_rw_data_desc(&raw_ops,
1679 rw_params, info, n,
1680 chip->ecc_parity_bytes);
1681 if (err) {
1682 pr_err("Failed to submit data descs %d\n", err);
1683 panic("error in nand driver\n");
1684 goto put_dev;
1685 }
1686 }
1687 submitted_num_desc = cwperpage - rw_params->start_sector;
1688
1689 /* Submit command descriptors */
1690 err = sps_transfer(info->sps.cmd_pipe.handle,
1691 &dma_buffer->xfer);
1692 if (err) {
1693 pr_err("Failed to submit commands %d\n", err);
1694 goto put_dev;
1695 }
1696
1697 err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
1698 info->sps.cmd_pipe.index,
1699 dma_buffer->xfer.iovec_count,
1700 &iovec_temp);
1701 if (err) {
1702 pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
1703 (info->sps.cmd_pipe.index), err);
1704 goto put_dev;
1705 }
1706 err = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
1707 info->sps.data_prod.index, submitted_num_desc,
1708 &iovec_temp);
1709 if (err) {
1710 pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
1711 (info->sps.data_prod.index), err);
1712 goto put_dev;
1713 }
1714
1715 err = msm_nand_put_device(chip->dev);
1716 mutex_unlock(&info->lock);
1717 if (err)
1718 goto free_dma;
1719
1720 pr_debug("addr0: 0x%08x, addr1: 0x%08x\n", data.addr0, data.addr1);
1721 for (n = rw_params->start_sector; n < cwperpage; n++)
1722 pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x\n",
1723 n, dma_buffer->result[n].flash_status,
1724 dma_buffer->result[n].buffer_status,
1725 dma_buffer->result[n].erased_cw_status);
1726
1727 goto free_dma;
1728put_dev:
1729 msm_nand_put_device(chip->dev);
1730unlock_mutex:
1731 mutex_unlock(&info->lock);
1732free_dma:
1733 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
1734 /* umap ecc dma memory */
1735 dma_unmap_single(chip->dev, rw_params->ecc_dma_addr,
1736 total_ecc_byte_cnt, DMA_FROM_DEVICE);
1737 /* check for bit flips in ecc data */
1738 for (n = rw_params->start_sector; n < cwperpage; n++) {
1739 uint8_t *ecc_temp = ecc;
1740 int last_pos = 0, next_pos = 0;
1741 int ecc_bytes_percw_in_bits = (chip->ecc_parity_bytes * 8);
1742
1743 do {
1744 last_pos = find_next_zero_bit(ecc_temp,
1745 ecc_bytes_percw_in_bits, next_pos);
1746
1747 if (last_pos < ecc_bytes_percw_in_bits)
1748 num_zero_bits++;
1749
1750 if (num_zero_bits > 4) {
1751 *erased_page = false;
1752 goto free_mem;
1753 }
1754
1755 next_pos = last_pos + 1;
1756 } while (last_pos < ecc_bytes_percw_in_bits);
1757
1758 num_zero_bits = last_pos = next_pos = 0;
1759 ecc_temp += chip->ecc_parity_bytes;
1760 }
1761
1762 if ((n == cwperpage) && (num_zero_bits <= 4))
1763 *erased_page = true;
1764free_mem:
1765 kfree(ecc);
1766 pr_debug("========================================================\n");
1767 return err;
1768}
1769
1770/*
1771 * Function that gets called from upper layers such as MTD/YAFFS2 to read a
1772 * page with main or/and spare data.
1773 */
1774static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
1775 struct mtd_oob_ops *ops)
1776{
1777 struct msm_nand_info *info = mtd->priv;
1778 struct msm_nand_chip *chip = &info->nand_chip;
1779 struct flash_identification *flash_dev = &info->flash_dev;
1780 uint32_t cwperpage = (mtd->writesize >> 9);
1781 int err, pageerr = 0, rawerr = 0, submitted_num_desc = 0;
1782 uint32_t n = 0, pages_read = 0;
1783 uint32_t ecc_errors = 0, total_ecc_errors = 0, ecc_capability;
1784 struct msm_nand_rw_params rw_params;
1785 struct msm_nand_rw_reg_data data;
1786 struct sps_iovec *iovec;
1787 struct sps_iovec iovec_temp;
1788 bool erased_page;
1789 uint64_t fix_data_in_pages = 0;
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05301790 ktime_t start;
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05301791
1792 /*
1793 * The following 6 commands will be sent only once for the first
1794 * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
1795 * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
1796 * be sent for every CW - flash, read_location_0, read_location_1,
1797 * exec, flash_status and buffer_status.
1798 */
1799 struct {
1800 struct sps_transfer xfer;
1801 struct sps_iovec cmd_iovec[MAX_DESC];
1802 struct {
1803 uint32_t count;
1804 struct msm_nand_cmd_setup_desc setup_desc;
1805 struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC - 1];
1806 } cmd_list;
1807 struct {
1808 uint32_t flash_status;
1809 uint32_t buffer_status;
1810 uint32_t erased_cw_status;
1811 } result[MAX_CW_PER_PAGE];
1812 } *dma_buffer;
1813 struct msm_nand_rw_cmd_desc *cmd_list = NULL;
1814
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05301815 if (unlikely(enable_perfstats))
1816 start = ktime_get();
1817
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05301818 memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
1819 err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
1820 if (err)
1821 goto validate_mtd_params_failed;
1822
1823 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
1824 chip, sizeof(*dma_buffer))));
1825
1826 rw_params.oob_col = rw_params.start_sector * chip->cw_size;
1827 if (chip->cfg1 & (1 << WIDE_FLASH))
1828 rw_params.oob_col >>= 1;
1829
1830 memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
1831 msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
1832 cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
1833
1834 ecc_capability = flash_dev->ecc_capability;
1835
1836 while (rw_params.page_count-- > 0) {
1837 uint32_t cw_desc_cnt = 0;
1838
1839 erased_page = false;
1840 data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
1841 data.addr1 = (rw_params.page >> 16) & 0xff;
1842
1843 for (n = rw_params.start_sector; n < cwperpage; n++) {
1844 struct sps_command_element *curr_ce, *start_ce;
1845
1846 dma_buffer->result[n].flash_status = 0xeeeeeeee;
1847 dma_buffer->result[n].buffer_status = 0xeeeeeeee;
1848 dma_buffer->result[n].erased_cw_status = 0xeeeeee00;
1849
1850 msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info,
1851 n, cmd_list, &cw_desc_cnt, 0);
1852
1853 start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
1854 curr_ce = start_ce;
1855 cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
1856 if (n == (cwperpage - 1))
1857 cmd_list->cw_desc[cw_desc_cnt].flags |=
1858 INT_UNLCK;
1859 cmd_list->count++;
1860
1861 msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
1862 READ, msm_virt_to_dma(chip,
1863 &dma_buffer->result[n].flash_status));
1864 curr_ce++;
1865
1866 msm_nand_prep_ce(curr_ce, MSM_NAND_BUFFER_STATUS(info),
1867 READ, msm_virt_to_dma(chip,
1868 &dma_buffer->result[n].buffer_status));
1869 curr_ce++;
1870
1871 msm_nand_prep_ce(curr_ce,
1872 MSM_NAND_ERASED_CW_DETECT_STATUS(info),
1873 READ, msm_virt_to_dma(chip,
1874 &dma_buffer->result[n].erased_cw_status));
1875 curr_ce++;
1876 cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce -
1877 start_ce;
1878 }
1879
1880 dma_buffer->xfer.iovec_count = cmd_list->count;
1881 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
1882 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
1883 &dma_buffer->cmd_iovec);
1884 iovec = dma_buffer->xfer.iovec;
1885
1886 iovec->addr = msm_virt_to_dma(chip,
1887 &cmd_list->setup_desc.ce[0]);
1888 iovec->size = sizeof(struct sps_command_element) *
1889 cmd_list->setup_desc.num_ce;
1890 iovec->flags = cmd_list->setup_desc.flags;
1891 iovec++;
1892 for (n = 0; n < (cmd_list->count - 1); n++) {
1893 iovec->addr = msm_virt_to_dma(chip,
1894 &cmd_list->cw_desc[n].ce[0]);
1895 iovec->size = sizeof(struct sps_command_element) *
1896 cmd_list->cw_desc[n].num_ce;
1897 iovec->flags = cmd_list->cw_desc[n].flags;
1898 iovec++;
1899 }
1900 mutex_lock(&info->lock);
1901 err = msm_nand_get_device(chip->dev);
1902 if (err)
1903 goto unlock_mutex;
1904 /* Submit data descriptors */
1905 for (n = rw_params.start_sector; n < cwperpage; n++) {
1906 err = msm_nand_submit_rw_data_desc(ops,
1907 &rw_params, info, n, 0);
1908 if (err) {
1909 pr_err("Failed to submit data descs %d\n", err);
1910 panic("error in nand driver\n");
1911 goto put_dev;
1912 }
1913 }
1914
1915 if (ops->mode == MTD_OPS_RAW) {
1916 submitted_num_desc = cwperpage - rw_params.start_sector;
1917 } else if (ops->mode == MTD_OPS_AUTO_OOB) {
1918 if (ops->datbuf)
1919 submitted_num_desc = cwperpage -
1920 rw_params.start_sector;
1921 if (ops->oobbuf)
1922 submitted_num_desc++;
1923 }
1924
1925 /* Submit command descriptors */
1926 err = sps_transfer(info->sps.cmd_pipe.handle,
1927 &dma_buffer->xfer);
1928 if (err) {
1929 pr_err("Failed to submit commands %d\n", err);
1930 goto put_dev;
1931 }
1932
1933 err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
1934 info->sps.cmd_pipe.index,
1935 dma_buffer->xfer.iovec_count,
1936 &iovec_temp);
1937 if (err) {
1938 pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
1939 (info->sps.cmd_pipe.index), err);
1940 goto put_dev;
1941 }
1942 err = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
1943 info->sps.data_prod.index, submitted_num_desc,
1944 &iovec_temp);
1945 if (err) {
1946 pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
1947 (info->sps.data_prod.index), err);
1948 goto put_dev;
1949 }
1950
1951 err = msm_nand_put_device(chip->dev);
1952 mutex_unlock(&info->lock);
1953 if (err)
1954 goto free_dma;
1955 /* Check for flash status errors */
1956 pageerr = rawerr = 0;
1957 for (n = rw_params.start_sector; n < cwperpage; n++) {
1958 if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
1959 FS_MPU_ERR)) {
1960 rawerr = -EIO;
1961 /*
1962 * Check if ECC error was due to an erased
1963 * codeword. If so, ignore the error.
1964 *
1965 * NOTE: There is a bug in erased page
1966 * detection hardware block when reading
1967 * only spare data. In order to work around
1968 * this issue, instead of using PAGE_ALL_ERASED
1969 * bit to check for whether a whole page is
1970 * erased or not, we use CODEWORD_ALL_ERASED
1971 * and CODEWORD_ERASED bits together and check
1972 * each codeword that has FP_OP_ERR bit set is
1973 * an erased codeword or not.
1974 */
1975 if ((dma_buffer->result[n].erased_cw_status &
1976 ERASED_CW) == ERASED_CW) {
1977 /*
1978 * At least one code word is detected
1979 * as an erased code word.
1980 */
1981 pr_debug("erased codeword detected - ignore ecc error\n");
1982 continue;
1983 }
1984 pageerr = rawerr;
1985 break;
1986 }
1987 }
1988 /* check for uncorrectable errors */
1989 if (pageerr) {
1990 for (n = rw_params.start_sector; n < cwperpage; n++) {
1991 if (dma_buffer->result[n].buffer_status &
1992 BS_UNCORRECTABLE_BIT) {
1993 /*
1994 * Check if page is actually
1995 * erased or not.
1996 */
1997 err = msm_nand_is_erased_page(mtd,
1998 from, ops,
1999 &rw_params,
2000 &erased_page);
2001 if (err)
2002 goto free_dma;
2003 if (!erased_page) {
2004 mtd->ecc_stats.failed++;
2005 pageerr = -EBADMSG;
2006 break;
2007 }
2008 pageerr = 0;
2009 pr_debug("Uncorrectable ECC errors dectected on an erased page and has been fixed.\n");
2010 break;
2011 }
2012 }
2013 }
2014
2015 if (rawerr && !pageerr && erased_page) {
2016 /*
2017 * This means an erased page had bit flips and now
2018 * those bit-flips need to be cleared in the data
2019 * being sent to upper layers. This will keep track
2020 * of those pages and at the end, the data will be
2021 * fixed before this function returns.
2022 * Note that a whole page worth of data will be fixed
2023 * and this will only handle about 64 pages being read
2024 * at a time i.e. one erase block worth of pages.
2025 */
2026 fix_data_in_pages |= BIT(rw_params.page_count);
2027 }
2028 /* check for correctable errors */
2029 if (!rawerr) {
2030 for (n = rw_params.start_sector; n < cwperpage; n++) {
2031 ecc_errors =
2032 dma_buffer->result[n].buffer_status
2033 & BS_CORRECTABLE_ERR_MSK;
2034 if (ecc_errors) {
2035 total_ecc_errors += ecc_errors;
2036 mtd->ecc_stats.corrected += ecc_errors;
2037 /*
2038 * Since the nand device can have the
2039 * ecc errors even on the first ever
2040 * write. Any reporting of EUCLEAN
2041 * when there are less then the ecc
2042 * capability of the device is not
2043 * useful.
2044 *
2045 * Also don't report EUCLEAN unless
2046 * the enable_euclean is set.
2047 */
2048 if (enable_euclean &&
2049 ecc_errors >= ecc_capability)
2050 pageerr = -EUCLEAN;
2051 }
2052 }
2053 }
2054 if (pageerr && (pageerr != -EUCLEAN || err == 0))
2055 err = pageerr;
2056
2057 if (rawerr && !pageerr) {
2058 pr_debug("%llx %x %x empty page\n",
2059 (loff_t)rw_params.page * mtd->writesize,
2060 ops->len, ops->ooblen);
2061 } else {
2062 for (n = rw_params.start_sector; n < cwperpage; n++)
2063 pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x, pageerr: %d, rawerr: %d\n",
2064 n, dma_buffer->result[n].flash_status,
2065 dma_buffer->result[n].buffer_status,
2066 dma_buffer->result[n].erased_cw_status,
2067 pageerr, rawerr);
2068 }
2069 if (err && err != -EUCLEAN && err != -EBADMSG)
2070 goto free_dma;
2071 pages_read++;
2072 rw_params.page++;
2073 }
2074 goto free_dma;
2075put_dev:
2076 msm_nand_put_device(chip->dev);
2077unlock_mutex:
2078 mutex_unlock(&info->lock);
2079free_dma:
2080 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
2081 if (ops->oobbuf)
2082 dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
2083 ops->ooblen, DMA_FROM_DEVICE);
2084 if (ops->datbuf)
2085 dma_unmap_page(chip->dev, rw_params.data_dma_addr,
2086 ops->len, DMA_BIDIRECTIONAL);
2087 /*
2088 * If there were any erased pages detected with ECC errors, then
2089 * it is most likely that the data is not all 0xff. So memset that
2090 * page to all 0xff.
2091 */
2092 while (fix_data_in_pages) {
2093 int temp_page = 0, oobsize = rw_params.cwperpage << 2;
2094 int count = 0, offset = 0;
2095
2096 temp_page = fix_data_in_pages & BIT_MASK(0);
2097 fix_data_in_pages = fix_data_in_pages >> 1;
2098 count++;
2099
2100 if (!temp_page)
2101 continue;
2102
2103 offset = (count - 1) * mtd->writesize;
2104 if (ops->datbuf)
2105 memset((ops->datbuf + offset), 0xff, mtd->writesize);
2106
2107 offset = (count - 1) * oobsize;
2108 if (ops->oobbuf)
2109 memset(ops->oobbuf + offset, 0xff, oobsize);
2110 }
2111validate_mtd_params_failed:
2112 if (ops->mode != MTD_OPS_RAW)
2113 ops->retlen = mtd->writesize * pages_read;
2114 else
2115 ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read;
2116 ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
2117 if (err)
2118 pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
2119 from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
2120 total_ecc_errors);
2121 pr_debug("ret %d, retlen %d oobretlen %d\n",
2122 err, ops->retlen, ops->oobretlen);
2123
2124 pr_debug("========================================================\n");
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05302125 if (unlikely(enable_perfstats) && likely(!err))
2126 msm_nand_update_read_perf_stats(info, start, ops->retlen);
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05302127 return err;
2128}
2129
2130/**
2131 * msm_nand_read_partial_page() - read partial page
2132 * @mtd: pointer to mtd info
2133 * @from: start address of the page
2134 * @ops: pointer to mtd_oob_ops
2135 *
2136 * Reads a page into a bounce buffer and copies the required
2137 * number of bytes to actual buffer. The pages that are aligned
2138 * do not use bounce buffer.
2139 */
2140static int msm_nand_read_partial_page(struct mtd_info *mtd,
2141 loff_t from, struct mtd_oob_ops *ops)
2142{
2143 int err = 0;
2144 unsigned char *actual_buf;
2145 unsigned char *bounce_buf;
2146 loff_t aligned_from;
2147 loff_t offset;
2148 size_t len;
2149 size_t actual_len, ret_len;
2150 int is_euclean = 0;
2151 int is_ebadmsg = 0;
2152
2153 actual_len = ops->len;
2154 ret_len = 0;
2155 actual_buf = ops->datbuf;
2156
2157 bounce_buf = kmalloc(mtd->writesize, GFP_KERNEL);
2158 if (!bounce_buf) {
2159 err = -ENOMEM;
2160 goto out;
2161 }
2162
2163 /* Get start address of page to read from */
2164 ops->len = mtd->writesize;
2165 offset = from & (mtd->writesize - 1);
2166 aligned_from = from - offset;
2167
2168 for (;;) {
2169 bool no_copy = false;
2170
2171 len = mtd->writesize - offset;
2172 if (len > actual_len)
2173 len = actual_len;
2174
2175 if (offset == 0 && len == mtd->writesize)
2176 no_copy = true;
2177
2178 if (!virt_addr_valid(actual_buf) &&
2179 !is_buffer_in_page(actual_buf, ops->len))
2180 no_copy = false;
2181
2182 ops->datbuf = no_copy ? actual_buf : bounce_buf;
2183 err = msm_nand_read_oob(mtd, aligned_from, ops);
2184 if (err == -EUCLEAN) {
2185 is_euclean = 1;
2186 err = 0;
2187 }
2188
2189 if (err == -EBADMSG) {
2190 is_ebadmsg = 1;
2191 err = 0;
2192 }
2193
2194 if (err < 0) {
2195 /* Clear previously set EUCLEAN / EBADMSG */
2196 is_euclean = 0;
2197 is_ebadmsg = 0;
2198 ret_len = ops->retlen;
2199 break;
2200 }
2201
2202 if (!no_copy)
2203 memcpy(actual_buf, bounce_buf + offset, len);
2204
2205 actual_len -= len;
2206 ret_len += len;
2207
2208 if (actual_len == 0)
2209 break;
2210
2211 actual_buf += len;
2212 offset = 0;
2213 aligned_from += mtd->writesize;
2214 }
2215
2216 ops->retlen = ret_len;
2217 kfree(bounce_buf);
2218out:
2219 if (is_euclean == 1)
2220 err = -EUCLEAN;
2221
2222 /* Snub EUCLEAN if we also have EBADMSG */
2223 if (is_ebadmsg == 1)
2224 err = -EBADMSG;
2225 return err;
2226}
2227
2228/*
2229 * Function that gets called from upper layers such as MTD/YAFFS2 to read a
2230 * page with only main data.
2231 */
2232static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
2233 size_t *retlen, u_char *buf)
2234{
2235 int ret;
2236 int is_euclean = 0;
2237 int is_ebadmsg = 0;
2238 struct mtd_oob_ops ops;
2239 unsigned char *bounce_buf = NULL;
2240
2241 ops.mode = MTD_OPS_AUTO_OOB;
2242 ops.retlen = 0;
2243 ops.ooblen = 0;
2244 ops.oobbuf = NULL;
2245 *retlen = 0;
2246
2247 if (!(from & (mtd->writesize - 1)) && !(len % mtd->writesize)) {
2248 /*
2249 * Handle reading of large size read buffer in vmalloc
2250 * address space that does not fit in an MMU page.
2251 */
2252 if (!virt_addr_valid(buf) && !is_buffer_in_page(buf, len)) {
2253 ops.len = mtd->writesize;
2254
2255 bounce_buf = kmalloc(ops.len, GFP_KERNEL);
2256 if (!bounce_buf) {
2257 ret = -ENOMEM;
2258 goto out;
2259 }
2260
2261 for (;;) {
2262 bool no_copy = false;
2263
2264 if (!is_buffer_in_page(buf, ops.len)) {
2265 memcpy(bounce_buf, buf, ops.len);
2266 ops.datbuf = (uint8_t *) bounce_buf;
2267 } else {
2268 ops.datbuf = (uint8_t *) buf;
2269 no_copy = true;
2270 }
2271 ret = msm_nand_read_oob(mtd, from, &ops);
2272 if (ret == -EUCLEAN) {
2273 is_euclean = 1;
2274 ret = 0;
2275 }
2276 if (ret == -EBADMSG) {
2277 is_ebadmsg = 1;
2278 ret = 0;
2279 }
2280 if (ret < 0) {
2281 /* Clear previously set errors */
2282 is_euclean = 0;
2283 is_ebadmsg = 0;
2284 break;
2285 }
2286
2287
2288 if (!no_copy)
2289 memcpy(buf, bounce_buf, ops.retlen);
2290
2291 len -= ops.retlen;
2292 *retlen += ops.retlen;
2293 if (len == 0)
2294 break;
2295 buf += ops.retlen;
2296 from += ops.retlen;
2297
2298 if (len < mtd->writesize) {
2299 ops.len = len;
2300 ops.datbuf = buf;
2301 ret = msm_nand_read_partial_page(
2302 mtd, from, &ops);
2303 *retlen += ops.retlen;
2304 break;
2305 }
2306 }
2307 kfree(bounce_buf);
2308 } else {
2309 ops.len = len;
2310 ops.datbuf = (uint8_t *)buf;
2311 ret = msm_nand_read_oob(mtd, from, &ops);
2312 *retlen = ops.retlen;
2313 }
2314 } else {
2315 ops.len = len;
2316 ops.datbuf = (uint8_t *)buf;
2317 ret = msm_nand_read_partial_page(mtd, from, &ops);
2318 *retlen = ops.retlen;
2319 }
2320out:
2321 if (is_euclean == 1)
2322 ret = -EUCLEAN;
2323
2324 /* Snub EUCLEAN if we also have EBADMSG */
2325 if (is_ebadmsg == 1)
2326 ret = -EBADMSG;
2327
2328 return ret;
2329}
2330
2331/*
2332 * Function that gets called from upper layers such as MTD/YAFFS2 to write a
2333 * page with both main and spare data.
2334 */
2335static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
2336 struct mtd_oob_ops *ops)
2337{
2338 struct msm_nand_info *info = mtd->priv;
2339 struct msm_nand_chip *chip = &info->nand_chip;
2340 uint32_t cwperpage = (mtd->writesize >> 9);
2341 uint32_t n, flash_sts, pages_written = 0;
2342 int err = 0, submitted_num_desc = 0;
2343 struct msm_nand_rw_params rw_params;
2344 struct msm_nand_rw_reg_data data;
2345 struct sps_iovec *iovec;
2346 struct sps_iovec iovec_temp;
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05302347 ktime_t start;
2348
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05302349 /*
2350 * The following 7 commands will be sent only once :
2351 * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
2352 * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
2353 * For last codeword (CW) - read_status(write)
2354 *
2355 * The following 4 commands will be sent for every CW :
2356 * flash, exec, flash_status (read), flash_status (write).
2357 */
2358 struct {
2359 struct sps_transfer xfer;
2360 struct sps_iovec cmd_iovec[MAX_DESC + 1];
2361 struct {
2362 uint32_t count;
2363 struct msm_nand_cmd_setup_desc setup_desc;
2364 struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC];
2365 } cmd_list;
2366 struct {
2367 uint32_t flash_status;
2368 } data[MAX_CW_PER_PAGE];
2369 } *dma_buffer;
2370 struct msm_nand_rw_cmd_desc *cmd_list = NULL;
2371
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05302372 if (unlikely(enable_perfstats))
2373 start = ktime_get();
2374
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05302375 memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
2376 err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
2377 if (err)
2378 goto validate_mtd_params_failed;
2379
2380 wait_event(chip->dma_wait_queue, (dma_buffer =
2381 msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
2382
2383 memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
2384 msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
2385 cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
2386
2387 while (rw_params.page_count-- > 0) {
2388 uint32_t cw_desc_cnt = 0;
2389 struct sps_command_element *curr_ce, *start_ce;
2390
2391 data.addr0 = (rw_params.page << 16);
2392 data.addr1 = (rw_params.page >> 16) & 0xff;
2393
2394 for (n = 0; n < cwperpage ; n++) {
2395 dma_buffer->data[n].flash_status = 0xeeeeeeee;
2396
2397 msm_nand_prep_rw_cmd_desc(ops, &rw_params, &data, info,
2398 n, cmd_list, &cw_desc_cnt, 0);
2399
2400 curr_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
2401 cmd_list->cw_desc[cw_desc_cnt].flags = CMD;
2402 cmd_list->count++;
2403
2404 msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
2405 READ, msm_virt_to_dma(chip,
2406 &dma_buffer->data[n].flash_status));
2407 cmd_list->cw_desc[cw_desc_cnt++].num_ce = 1;
2408 }
2409
2410 start_ce = &cmd_list->cw_desc[cw_desc_cnt].ce[0];
2411 curr_ce = start_ce;
2412 cmd_list->cw_desc[cw_desc_cnt].flags = CMD_INT_UNLCK;
2413 cmd_list->count++;
2414 msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_STATUS(info),
2415 WRITE, data.clrfstatus);
2416 curr_ce++;
2417
2418 msm_nand_prep_ce(curr_ce, MSM_NAND_READ_STATUS(info),
2419 WRITE, data.clrrstatus);
2420 curr_ce++;
2421 cmd_list->cw_desc[cw_desc_cnt++].num_ce = curr_ce - start_ce;
2422
2423 dma_buffer->xfer.iovec_count = cmd_list->count;
2424 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
2425 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
2426 &dma_buffer->cmd_iovec);
2427 iovec = dma_buffer->xfer.iovec;
2428
2429 iovec->addr = msm_virt_to_dma(chip,
2430 &cmd_list->setup_desc.ce[0]);
2431 iovec->size = sizeof(struct sps_command_element) *
2432 cmd_list->setup_desc.num_ce;
2433 iovec->flags = cmd_list->setup_desc.flags;
2434 iovec++;
2435 for (n = 0; n < (cmd_list->count - 1); n++) {
2436 iovec->addr = msm_virt_to_dma(chip,
2437 &cmd_list->cw_desc[n].ce[0]);
2438 iovec->size = sizeof(struct sps_command_element) *
2439 cmd_list->cw_desc[n].num_ce;
2440 iovec->flags = cmd_list->cw_desc[n].flags;
2441 iovec++;
2442 }
2443 mutex_lock(&info->lock);
2444 err = msm_nand_get_device(chip->dev);
2445 if (err)
2446 goto unlock_mutex;
2447 /* Submit data descriptors */
2448 for (n = 0; n < cwperpage; n++) {
2449 err = msm_nand_submit_rw_data_desc(ops,
2450 &rw_params, info, n, 0);
2451 if (err) {
2452 pr_err("Failed to submit data descs %d\n", err);
2453 panic("Error in nand driver\n");
2454 goto put_dev;
2455 }
2456 }
2457
2458 if (ops->mode == MTD_OPS_RAW) {
2459 submitted_num_desc = n;
2460 } else if (ops->mode == MTD_OPS_AUTO_OOB) {
2461 if (ops->datbuf)
2462 submitted_num_desc = n;
2463 if (ops->oobbuf)
2464 submitted_num_desc++;
2465 }
2466
2467 /* Submit command descriptors */
2468 err = sps_transfer(info->sps.cmd_pipe.handle,
2469 &dma_buffer->xfer);
2470 if (err) {
2471 pr_err("Failed to submit commands %d\n", err);
2472 goto put_dev;
2473 }
2474
2475 err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
2476 info->sps.cmd_pipe.index,
2477 dma_buffer->xfer.iovec_count,
2478 &iovec_temp);
2479 if (err) {
2480 pr_err("Failed to get iovec for pipe %d (err:%d)\n",
2481 (info->sps.cmd_pipe.index), err);
2482 goto put_dev;
2483 }
2484 err = msm_nand_sps_get_iovec(info->sps.data_cons.handle,
2485 info->sps.data_cons.index, submitted_num_desc,
2486 &iovec_temp);
2487 if (err) {
2488 pr_err("Failed to get iovec for pipe %d (err:%d)\n",
2489 (info->sps.data_cons.index), err);
2490 goto put_dev;
2491 }
2492
2493 err = msm_nand_put_device(chip->dev);
2494 mutex_unlock(&info->lock);
2495 if (err)
2496 goto free_dma;
2497
2498 for (n = 0; n < cwperpage; n++)
2499 pr_debug("write pg %d: flash_status[%d] = %x\n",
2500 rw_params.page, n,
2501 dma_buffer->data[n].flash_status);
2502
2503 /* Check for flash status errors */
2504 for (n = 0; n < cwperpage; n++) {
2505 flash_sts = dma_buffer->data[n].flash_status;
2506 if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
2507 pr_err("MPU/OP err (0x%x) set\n", flash_sts);
2508 err = -EIO;
2509 goto free_dma;
2510 }
2511 if (n == (cwperpage - 1)) {
2512 if (!(flash_sts & FS_DEVICE_WP) ||
2513 (flash_sts & FS_DEVICE_STS_ERR)) {
2514 pr_err("Dev sts err 0x%x\n", flash_sts);
2515 err = -EIO;
2516 goto free_dma;
2517 }
2518 }
2519 }
2520 pages_written++;
2521 rw_params.page++;
2522 }
2523 goto free_dma;
2524put_dev:
2525 msm_nand_put_device(chip->dev);
2526unlock_mutex:
2527 mutex_unlock(&info->lock);
2528free_dma:
2529 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
2530 if (ops->oobbuf)
2531 dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
2532 ops->ooblen, DMA_TO_DEVICE);
2533 if (ops->datbuf)
2534 dma_unmap_page(chip->dev, rw_params.data_dma_addr,
2535 ops->len, DMA_TO_DEVICE);
2536validate_mtd_params_failed:
2537 if (ops->mode != MTD_OPS_RAW)
2538 ops->retlen = mtd->writesize * pages_written;
2539 else
2540 ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
2541
2542 ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
2543 if (err)
2544 pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
2545 to, ops->len, ops->ooblen, err);
2546 pr_debug("ret %d, retlen %d oobretlen %d\n",
2547 err, ops->retlen, ops->oobretlen);
2548
2549 pr_debug("================================================\n");
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05302550 if (unlikely(enable_perfstats) && likely(!err))
2551 msm_nand_update_write_perf_stats(info, start, ops->retlen);
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05302552 return err;
2553}
2554
2555/*
2556 * Function that gets called from upper layers such as MTD/YAFFS2 to write a
2557 * page with only main data.
2558 */
2559static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
2560 size_t *retlen, const u_char *buf)
2561{
2562 int ret;
2563 struct mtd_oob_ops ops;
2564 unsigned char *bounce_buf = NULL;
2565
2566 ops.mode = MTD_OPS_AUTO_OOB;
2567 ops.retlen = 0;
2568 ops.ooblen = 0;
2569 ops.oobbuf = NULL;
2570
2571 /* partial page writes are not supported */
2572 if ((to & (mtd->writesize - 1)) || (len % mtd->writesize)) {
2573 ret = -EINVAL;
2574 *retlen = ops.retlen;
2575 pr_err("%s: partial page writes are not supported\n", __func__);
2576 goto out;
2577 }
2578
2579 /*
2580 * Handle writing of large size write buffer in vmalloc
2581 * address space that does not fit in an MMU page.
2582 */
2583 if (!virt_addr_valid(buf) && !is_buffer_in_page(buf, len)) {
2584 ops.len = mtd->writesize;
2585
2586 bounce_buf = kmalloc(ops.len, GFP_KERNEL);
2587 if (!bounce_buf) {
2588 ret = -ENOMEM;
2589 goto out;
2590 }
2591
2592 for (;;) {
2593 if (!is_buffer_in_page(buf, ops.len)) {
2594 memcpy(bounce_buf, buf, ops.len);
2595 ops.datbuf = (uint8_t *) bounce_buf;
2596 } else {
2597 ops.datbuf = (uint8_t *) buf;
2598 }
2599 ret = msm_nand_write_oob(mtd, to, &ops);
2600 if (ret < 0)
2601 break;
2602
2603 len -= mtd->writesize;
2604 *retlen += mtd->writesize;
2605 if (len == 0)
2606 break;
2607
2608 buf += mtd->writesize;
2609 to += mtd->writesize;
2610 }
2611 kfree(bounce_buf);
2612 } else {
2613 ops.len = len;
2614 ops.datbuf = (uint8_t *)buf;
2615 ret = msm_nand_write_oob(mtd, to, &ops);
2616 *retlen = ops.retlen;
2617 }
2618out:
2619 return ret;
2620}
2621
2622/*
2623 * Structure that contains NANDc register data for commands required
2624 * for Erase operation.
2625 */
2626struct msm_nand_erase_reg_data {
2627 struct msm_nand_common_cfgs cfg;
2628 uint32_t exec;
2629 uint32_t flash_status;
2630 uint32_t clrfstatus;
2631 uint32_t clrrstatus;
2632};
2633
2634/*
2635 * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
2636 * block within NAND device.
2637 */
2638#define ERASE_CMDS 9
2639static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2640{
2641 int i = 0, err = 0;
2642 struct msm_nand_info *info = mtd->priv;
2643 struct msm_nand_chip *chip = &info->nand_chip;
2644 uint32_t page = 0;
2645 struct msm_nand_sps_cmd *cmd, *curr_cmd;
2646 struct msm_nand_erase_reg_data data;
2647 struct sps_iovec *iovec;
2648 struct sps_iovec iovec_temp;
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05302649 ktime_t start;
2650
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05302651 /*
2652 * The following 9 commands are required to erase a page -
2653 * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
2654 * flash_status(write), read_status.
2655 */
2656 struct {
2657 struct sps_transfer xfer;
2658 struct sps_iovec cmd_iovec[ERASE_CMDS];
2659 struct msm_nand_sps_cmd cmd[ERASE_CMDS];
2660 uint32_t flash_status;
2661 } *dma_buffer;
2662
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05302663 if (unlikely(enable_perfstats))
2664 start = ktime_get();
2665
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05302666 if (mtd->writesize == PAGE_SIZE_2K)
2667 page = instr->addr >> 11;
2668
2669 if (mtd->writesize == PAGE_SIZE_4K)
2670 page = instr->addr >> 12;
2671
2672 if (instr->addr & (mtd->erasesize - 1)) {
2673 pr_err("unsupported erase address, 0x%llx\n", instr->addr);
2674 err = -EINVAL;
2675 goto out;
2676 }
2677 if (instr->len != mtd->erasesize) {
2678 pr_err("unsupported erase len, %lld\n", instr->len);
2679 err = -EINVAL;
2680 goto out;
2681 }
2682
2683 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
2684 chip, sizeof(*dma_buffer))));
2685 cmd = dma_buffer->cmd;
2686
2687 memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
2688 data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
2689 data.cfg.addr0 = page;
2690 data.cfg.addr1 = 0;
2691 data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
2692 data.cfg.cfg1 = chip->cfg1;
2693 data.exec = 1;
2694 dma_buffer->flash_status = 0xeeeeeeee;
2695 data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
2696 data.clrrstatus = MSM_NAND_RESET_READ_STS;
2697
2698 curr_cmd = cmd;
2699 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
2700
2701 cmd = curr_cmd;
2702 msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
2703 data.exec, SPS_IOVEC_FLAG_NWD);
2704 cmd++;
2705
2706 msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
2707 msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
2708 cmd++;
2709
2710 msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
2711 data.clrfstatus, 0);
2712 cmd++;
2713
2714 msm_nand_prep_single_desc(cmd, MSM_NAND_READ_STATUS(info), WRITE,
2715 data.clrrstatus,
2716 SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
2717 cmd++;
2718
2719 WARN_ON((cmd - dma_buffer->cmd) > ERASE_CMDS);
2720 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
2721 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
2722 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
2723 &dma_buffer->cmd_iovec);
2724 iovec = dma_buffer->xfer.iovec;
2725
2726 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
2727 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
2728 iovec->size = sizeof(struct sps_command_element);
2729 iovec->flags = dma_buffer->cmd[i].flags;
2730 iovec++;
2731 }
2732 mutex_lock(&info->lock);
2733 err = msm_nand_get_device(chip->dev);
2734 if (err)
2735 goto unlock_mutex;
2736
2737 err = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
2738 if (err) {
2739 pr_err("Failed to submit commands %d\n", err);
2740 goto put_dev;
2741 }
2742 err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
2743 info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
2744 &iovec_temp);
2745 if (err) {
2746 pr_err("Failed to get iovec for pipe %d (err: %d)\n",
2747 (info->sps.cmd_pipe.index), err);
2748 goto put_dev;
2749 }
2750 err = msm_nand_put_device(chip->dev);
2751 if (err)
2752 goto unlock_mutex;
2753
2754 /* Check for flash status errors */
2755 if (dma_buffer->flash_status & (FS_OP_ERR |
2756 FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
2757 pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
2758 err = -EIO;
2759 }
2760 if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
2761 pr_err("Device is write protected\n");
2762 err = -EIO;
2763 }
2764 if (err) {
2765 pr_err("Erase failed, 0x%llx\n", instr->addr);
2766 instr->fail_addr = instr->addr;
2767 instr->state = MTD_ERASE_FAILED;
2768 } else {
2769 instr->state = MTD_ERASE_DONE;
2770 instr->fail_addr = 0xffffffff;
2771 mtd_erase_callback(instr);
2772 }
2773 goto unlock_mutex;
2774put_dev:
2775 msm_nand_put_device(chip->dev);
2776unlock_mutex:
2777 mutex_unlock(&info->lock);
2778 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
2779out:
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05302780 if (unlikely(enable_perfstats) && likely(!err))
2781 msm_nand_update_erase_perf_stats(info, start, 1);
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05302782 return err;
2783}
2784
2785/*
2786 * Structure that contains NANDc register data for commands required
2787 * for checking if a block is bad.
2788 */
2789struct msm_nand_blk_isbad_data {
2790 struct msm_nand_common_cfgs cfg;
2791 uint32_t ecc_bch_cfg;
2792 uint32_t exec;
2793 uint32_t read_offset;
2794};
2795
2796/*
2797 * Function that gets called from upper layers such as MTD/YAFFS2 to check if
2798 * a block is bad. This is done by reading the first page within a block and
2799 * checking whether the bad block byte location contains 0xFF or not. If it
2800 * doesn't contain 0xFF, then it is considered as bad block.
2801 */
2802#define ISBAD_CMDS 9
2803static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
2804{
2805 struct msm_nand_info *info = mtd->priv;
2806 struct msm_nand_chip *chip = &info->nand_chip;
2807 int i = 0, ret = 0, bad_block = 0, submitted_num_desc = 1;
2808 uint8_t *buf;
2809 uint32_t page = 0, rdata, cwperpage;
2810 struct msm_nand_sps_cmd *cmd, *curr_cmd;
2811 struct msm_nand_blk_isbad_data data;
2812 struct sps_iovec *iovec;
2813 struct sps_iovec iovec_temp;
2814 /*
2815 * The following 9 commands are required to check bad block -
2816 * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
2817 * exec, flash_status(read).
2818 */
2819 struct {
2820 struct sps_transfer xfer;
2821 struct sps_iovec cmd_iovec[ISBAD_CMDS];
2822 struct msm_nand_sps_cmd cmd[ISBAD_CMDS];
2823 uint32_t flash_status;
2824 } *dma_buffer;
2825
2826 if (mtd->writesize == PAGE_SIZE_2K)
2827 page = ofs >> 11;
2828
2829 if (mtd->writesize == PAGE_SIZE_4K)
2830 page = ofs >> 12;
2831
2832 cwperpage = (mtd->writesize >> 9);
2833
2834 if (ofs > mtd->size) {
2835 pr_err("Invalid offset 0x%llx\n", ofs);
2836 bad_block = -EINVAL;
2837 goto out;
2838 }
2839 if (ofs & (mtd->erasesize - 1)) {
2840 pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
2841 bad_block = -EINVAL;
2842 goto out;
2843 }
2844
2845 wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
2846 chip, sizeof(*dma_buffer) + 4)));
2847 buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
2848
2849 cmd = dma_buffer->cmd;
2850 memset(&data, 0, sizeof(struct msm_nand_blk_isbad_data));
2851 data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
2852 data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
2853 data.cfg.cfg1 = chip->cfg1_raw;
2854
2855 if (chip->cfg1 & (1 << WIDE_FLASH))
2856 data.cfg.addr0 = (page << 16) |
2857 ((chip->cw_size * (cwperpage-1)) >> 1);
2858 else
2859 data.cfg.addr0 = (page << 16) |
2860 (chip->cw_size * (cwperpage-1));
2861
2862 data.cfg.addr1 = (page >> 16) & 0xff;
2863 data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
2864 data.exec = 1;
2865 data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
2866 dma_buffer->flash_status = 0xeeeeeeee;
2867
2868 curr_cmd = cmd;
2869 msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
2870
2871 cmd = curr_cmd;
2872 msm_nand_prep_single_desc(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
2873 data.ecc_bch_cfg, 0);
2874 cmd++;
2875
2876 rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
2877 msm_nand_prep_single_desc(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
2878 rdata, 0);
2879 cmd++;
2880
2881 msm_nand_prep_single_desc(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
2882 data.exec, SPS_IOVEC_FLAG_NWD);
2883 cmd++;
2884
2885 msm_nand_prep_single_desc(cmd, MSM_NAND_FLASH_STATUS(info), READ,
2886 msm_virt_to_dma(chip, &dma_buffer->flash_status),
2887 SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
2888 cmd++;
2889
2890 WARN_ON(cmd - dma_buffer->cmd > ISBAD_CMDS);
2891 dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
2892 dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
2893 dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
2894 &dma_buffer->cmd_iovec);
2895 iovec = dma_buffer->xfer.iovec;
2896
2897 for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
2898 iovec->addr = msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
2899 iovec->size = sizeof(struct sps_command_element);
2900 iovec->flags = dma_buffer->cmd[i].flags;
2901 iovec++;
2902 }
2903 mutex_lock(&info->lock);
2904 ret = msm_nand_get_device(chip->dev);
2905 if (ret) {
2906 mutex_unlock(&info->lock);
2907 goto free_dma;
2908 }
2909 /* Submit data descriptor */
2910 ret = sps_transfer_one(info->sps.data_prod.handle,
2911 msm_virt_to_dma(chip, buf),
2912 4, NULL, SPS_IOVEC_FLAG_INT);
2913
2914 if (ret) {
2915 pr_err("Failed to submit data desc %d\n", ret);
2916 goto put_dev;
2917 }
2918 /* Submit command descriptor */
2919 ret = sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
2920 if (ret) {
2921 pr_err("Failed to submit commands %d\n", ret);
2922 goto put_dev;
2923 }
2924
2925 ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
2926 info->sps.cmd_pipe.index, dma_buffer->xfer.iovec_count,
2927 &iovec_temp);
2928 if (ret) {
2929 pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
2930 (info->sps.cmd_pipe.index), ret);
2931 goto put_dev;
2932 }
2933 ret = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
2934 info->sps.data_prod.index, submitted_num_desc,
2935 &iovec_temp);
2936 if (ret) {
2937 pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
2938 (info->sps.data_prod.index), ret);
2939 goto put_dev;
2940 }
2941
2942 ret = msm_nand_put_device(chip->dev);
2943 mutex_unlock(&info->lock);
2944 if (ret)
2945 goto free_dma;
2946
2947 /* Check for flash status errors */
2948 if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
2949 pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
2950 bad_block = -EIO;
2951 goto free_dma;
2952 }
2953
2954 /* Check for bad block marker byte */
2955 if (chip->cfg1 & (1 << WIDE_FLASH)) {
2956 if (buf[0] != 0xFF || buf[1] != 0xFF)
2957 bad_block = 1;
2958 } else {
2959 if (buf[0] != 0xFF)
2960 bad_block = 1;
2961 }
2962 goto free_dma;
2963put_dev:
2964 msm_nand_put_device(chip->dev);
2965 mutex_unlock(&info->lock);
2966free_dma:
2967 msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
2968out:
2969 return ret ? ret : bad_block;
2970}
2971
2972/*
2973 * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
2974 * block as bad. This is done by writing the first page within a block with 0,
2975 * thus setting the bad block byte location as well to 0.
2976 */
2977static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2978{
2979 struct mtd_oob_ops ops;
2980 int ret;
2981 uint8_t *buf;
2982 size_t len;
2983
2984 if (ofs > mtd->size) {
2985 pr_err("Invalid offset 0x%llx\n", ofs);
2986 ret = -EINVAL;
2987 goto out;
2988 }
2989 if (ofs & (mtd->erasesize - 1)) {
2990 pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
2991 ret = -EINVAL;
2992 goto out;
2993 }
2994 len = mtd->writesize + mtd->oobsize;
2995 buf = kzalloc(len, GFP_KERNEL);
2996 if (!buf) {
2997 pr_err("unable to allocate memory for 0x%x size\n", len);
2998 ret = -ENOMEM;
2999 goto out;
3000 }
3001 ops.mode = MTD_OPS_RAW;
3002 ops.len = len;
3003 ops.retlen = 0;
3004 ops.ooblen = 0;
3005 ops.datbuf = buf;
3006 ops.oobbuf = NULL;
3007 ret = msm_nand_write_oob(mtd, ofs, &ops);
3008 kfree(buf);
3009out:
3010 return ret;
3011}
3012
3013/*
3014 * Function that scans for the attached NAND device. This fills out all
3015 * the uninitialized function pointers with the defaults. The flash ID is
3016 * read and the mtd/chip structures are filled with the appropriate values.
3017 */
3018static int msm_nand_scan(struct mtd_info *mtd)
3019{
3020 struct msm_nand_info *info = mtd->priv;
3021 struct msm_nand_chip *chip = &info->nand_chip;
3022 struct flash_identification *supported_flash = &info->flash_dev;
3023 int err = 0;
3024 uint32_t i, j, mtd_writesize;
3025 uint8_t dev_found = 0, wide_bus;
3026 uint32_t manid, devid, devcfg;
3027 uint32_t flash_id = 0, flash_id2 = 0;
3028 uint8_t id_byte[NAND_MAX_ID_LEN];
3029 uint32_t bad_block_byte, spare_bytes;
3030 struct nand_flash_dev *flashdev = NULL;
3031 struct nand_manufacturers *flashman = NULL;
3032
3033 /* Probe the Flash device for ONFI compliance */
3034 if (!msm_nand_flash_onfi_probe(info)) {
3035 dev_found = 1;
3036 } else {
3037 err = msm_nand_flash_read_id(info, 0, &flash_id, &flash_id2);
3038 if (err < 0) {
3039 pr_err("Failed to read Flash ID\n");
3040 err = -EINVAL;
3041 goto out;
3042 }
3043 manid = id_byte[0] = flash_id & 0xFF;
3044 devid = id_byte[1] = (flash_id >> 8) & 0xFF;
3045 devcfg = id_byte[3] = (flash_id >> 24) & 0xFF;
3046 id_byte[2] = (flash_id >> 16) & 0xFF;
3047 id_byte[4] = flash_id2 & 0xFF;
3048 id_byte[5] = (flash_id2 >> 8) & 0xFF;
3049 id_byte[6] = (flash_id2 >> 16) & 0xFF;
3050 id_byte[7] = (flash_id2 >> 24) & 0xFF;
3051
3052 for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
3053 if (nand_manuf_ids[i].id == manid)
3054 flashman = &nand_manuf_ids[i];
Shiraz Hashim4e297f42020-02-04 13:34:29 +05303055 for (i = 0; !flashdev && (nand_flash_ids[i].id != NULL); ++i) {
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303056 /*
3057 * If id_len is specified for an entry in the nand ids
3058 * array, then at least 4 bytes of the nand id is
3059 * present in the nand ids array - use that to identify
3060 * the nand device first. If that is not present, only
3061 * then fall back to searching the legacy or extended
3062 * ids in the nand ids array.
3063 * The id_len number of bytes in the nand id read from
3064 * the device are checked against those in the nand id
3065 * table for exact match.
3066 */
3067 if (nand_flash_ids[i].id_len) {
3068 for (j = 0; j < nand_flash_ids[i].id_len; j++) {
3069 if (nand_flash_ids[i].id[j] ==
3070 id_byte[j])
3071 continue;
3072 else
3073 break;
3074 }
3075 if (j == nand_flash_ids[i].id_len)
3076 flashdev = &nand_flash_ids[i];
3077 } else if (!nand_flash_ids[i].id_len &&
3078 nand_flash_ids[i].dev_id == devid)
3079 flashdev = &nand_flash_ids[i];
3080 }
3081 if (!flashdev || !flashman) {
3082 pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
3083 flash_id, manid, devid);
3084 err = -ENOENT;
3085 goto out;
3086 }
3087 dev_found = 1;
3088 if (!flashdev->pagesize) {
3089 pr_err("missing page size info - extract from NAND ID\n");
3090 supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
3091 supported_flash->pagesize = 1024 << (devcfg & 0x3);
3092 supported_flash->blksize = (64 * 1024) <<
3093 ((devcfg >> 4) & 0x3);
3094 supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
3095 (supported_flash->pagesize >> 9);
3096 } else {
3097 supported_flash->widebus = flashdev->options &
3098 NAND_BUSWIDTH_16 ? 1 : 0;
3099 supported_flash->pagesize = flashdev->pagesize;
3100 supported_flash->blksize = flashdev->erasesize;
3101 supported_flash->oobsize = flashdev->oobsize;
3102 supported_flash->ecc_correctability =
3103 flashdev->ecc.strength_ds;
3104 if (!flashdev->ecc.strength_ds)
3105 pr_err("num ecc correctable bit not specified and defaults to 4 bit BCH\n");
3106 }
3107 supported_flash->flash_id = flash_id;
3108 supported_flash->density = ((uint64_t)flashdev->chipsize) << 20;
3109 }
3110
3111 if (dev_found) {
3112 wide_bus = supported_flash->widebus;
3113 mtd->size = supported_flash->density;
3114 mtd->writesize = supported_flash->pagesize;
3115 mtd->oobsize = supported_flash->oobsize;
3116 mtd->erasesize = supported_flash->blksize;
3117 mtd->writebufsize = mtd->writesize;
3118 mtd_writesize = mtd->writesize;
3119
3120 /* Check whether NAND device support 8bit ECC*/
3121 if (supported_flash->ecc_correctability >= 8) {
3122 chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
3123 supported_flash->ecc_capability = 8;
3124 } else {
3125 chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
3126 supported_flash->ecc_capability = 4;
3127 }
3128
3129 pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
3130 supported_flash->flash_id, (wide_bus) ? 16 : 8,
3131 (mtd->size >> 20));
3132 pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
3133 mtd->writesize, mtd->erasesize, mtd->oobsize);
3134 pr_info("BCH ECC: %d Bit\n", supported_flash->ecc_capability);
3135 }
3136
3137 chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
3138 chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
3139 | (516 << UD_SIZE_BYTES)
3140 | (0 << DISABLE_STATUS_AFTER_WRITE)
3141 | (5 << NUM_ADDR_CYCLES);
3142
3143 bad_block_byte = (mtd_writesize - (chip->cw_size * (
3144 (mtd_writesize >> 9) - 1)) + 1);
3145 chip->cfg1 = (7 << NAND_RECOVERY_CYCLES)
3146 | (0 << CS_ACTIVE_BSY)
3147 | (bad_block_byte << BAD_BLOCK_BYTE_NUM)
3148 | (0 << BAD_BLOCK_IN_SPARE_AREA)
3149 | (2 << WR_RD_BSY_GAP)
3150 | ((wide_bus ? 1 : 0) << WIDE_FLASH)
3151 | (1 << ENABLE_BCH_ECC);
3152
3153 /*
3154 * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
3155 * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
3156 */
3157 chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
3158 (wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
3159
3160 spare_bytes = chip->cw_size - (BYTES_512 + chip->ecc_parity_bytes);
3161 chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
3162 | (5 << NUM_ADDR_CYCLES)
3163 | (spare_bytes << SPARE_SIZE_BYTES)
3164 | (BYTES_512 << UD_SIZE_BYTES);
3165
3166 chip->cfg1_raw = (2 << WR_RD_BSY_GAP)
3167 | (1 << BAD_BLOCK_IN_SPARE_AREA)
3168 | (21 << BAD_BLOCK_BYTE_NUM)
3169 | (0 << CS_ACTIVE_BSY)
3170 | (7 << NAND_RECOVERY_CYCLES)
3171 | ((wide_bus ? 1 : 0) << WIDE_FLASH)
3172 | (1 << DEV0_CFG1_ECC_DISABLE);
3173
3174 chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
3175 | (0 << ECC_SW_RESET)
3176 | (516 << ECC_NUM_DATA_BYTES)
3177 | (chip->ecc_parity_bytes << ECC_PARITY_SIZE_BYTES)
3178 | (1 << ECC_FORCE_CLK_OPEN);
3179
3180 chip->ecc_cfg_raw = (1 << ECC_FORCE_CLK_OPEN)
3181 | (BYTES_512 << ECC_NUM_DATA_BYTES)
3182 | (chip->ecc_parity_bytes << ECC_PARITY_SIZE_BYTES)
3183 | (0 << ECC_SW_RESET)
3184 | (1 << ECC_CFG_ECC_DISABLE);
3185
3186 if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
3187 chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
3188 2 << SPARE_SIZE_BYTES);
3189 chip->ecc_bch_cfg |= (1 << ECC_MODE);
3190 chip->ecc_cfg_raw |= (1 << ECC_MODE);
3191 } else {
3192 chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
3193 4 << SPARE_SIZE_BYTES);
3194 chip->ecc_bch_cfg |= (0 << ECC_MODE);
3195 chip->ecc_cfg_raw |= (0 << ECC_MODE);
3196 }
3197
3198 chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
3199
3200 pr_info("CFG0: 0x%08x, CFG1: 0x%08x\n"
3201 " RAWCFG0: 0x%08x, RAWCFG1: 0x%08x\n"
3202 " ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n"
3203 " RAWECCCFG: 0x%08x, BAD BLOCK BYTE: 0x%08x\n",
3204 chip->cfg0, chip->cfg1, chip->cfg0_raw, chip->cfg1_raw,
3205 chip->ecc_buf_cfg, chip->ecc_bch_cfg,
3206 chip->ecc_cfg_raw, bad_block_byte);
3207
3208 if (mtd->writesize == 2048)
3209 mtd->oobavail = 16;
3210 else if (mtd->writesize == 4096)
3211 mtd->oobavail = 32;
3212 else {
3213 pr_err("Unsupported NAND pagesize: 0x%x\n", mtd->writesize);
3214 err = -ENODEV;
3215 goto out;
3216 }
3217
3218 /* Fill in remaining MTD driver data */
3219 mtd->type = MTD_NANDFLASH;
3220 mtd->flags = MTD_CAP_NANDFLASH;
3221 mtd->_erase = msm_nand_erase;
3222 mtd->_block_isbad = msm_nand_block_isbad;
3223 mtd->_block_markbad = msm_nand_block_markbad;
3224 mtd->_read = msm_nand_read;
3225 mtd->_write = msm_nand_write;
3226 mtd->_read_oob = msm_nand_read_oob;
3227 mtd->_write_oob = msm_nand_write_oob;
3228 mtd->owner = THIS_MODULE;
3229out:
3230 return err;
3231}
3232
3233#define BAM_APPS_PIPE_LOCK_GRP0 0
3234#define BAM_APPS_PIPE_LOCK_GRP1 1
3235/*
3236 * This function allocates, configures, connects an end point and
3237 * also registers event notification for an end point. It also allocates
3238 * DMA memory for descriptor FIFO of a pipe.
3239 */
3240static int msm_nand_init_endpoint(struct msm_nand_info *info,
3241 struct msm_nand_sps_endpt *end_point,
3242 uint32_t pipe_index)
3243{
3244 int rc = 0;
3245 struct sps_pipe *pipe_handle;
3246 struct sps_connect *sps_config = &end_point->config;
3247 struct sps_register_event *sps_event = &end_point->event;
3248
3249 pipe_handle = sps_alloc_endpoint();
3250 if (!pipe_handle) {
3251 pr_err("sps_alloc_endpoint() failed\n");
3252 rc = -ENOMEM;
3253 goto out;
3254 }
3255
3256 rc = sps_get_config(pipe_handle, sps_config);
3257 if (rc) {
3258 pr_err("sps_get_config() failed %d\n", rc);
3259 goto free_endpoint;
3260 }
3261
3262 if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
3263 /* READ CASE: source - BAM; destination - system memory */
3264 sps_config->source = info->sps.bam_handle;
3265 sps_config->destination = SPS_DEV_HANDLE_MEM;
3266 sps_config->mode = SPS_MODE_SRC;
3267 sps_config->src_pipe_index = pipe_index;
3268 } else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
3269 pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
3270 /* WRITE CASE: source - system memory; destination - BAM */
3271 sps_config->source = SPS_DEV_HANDLE_MEM;
3272 sps_config->destination = info->sps.bam_handle;
3273 sps_config->mode = SPS_MODE_DEST;
3274 sps_config->dest_pipe_index = pipe_index;
3275 }
3276
3277 sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_POLL |
3278 SPS_O_ACK_TRANSFERS;
3279
3280 if (pipe_index == SPS_DATA_PROD_PIPE_INDEX ||
3281 pipe_index == SPS_DATA_CONS_PIPE_INDEX)
3282 sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP0;
3283 else if (pipe_index == SPS_CMD_CONS_PIPE_INDEX)
3284 sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP1;
3285
3286 /*
3287 * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
3288 * are allowed to be submitted before we get any ack for any of them,
3289 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
3290 * sizeof(struct sps_iovec).
3291 */
3292 sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
3293 sizeof(struct sps_iovec);
3294 sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
3295 sps_config->desc.size,
3296 &sps_config->desc.phys_base,
3297 GFP_KERNEL);
3298 if (!sps_config->desc.base) {
3299 pr_err("dmam_alloc_coherent() failed for size %x\n",
3300 sps_config->desc.size);
3301 rc = -ENOMEM;
3302 goto free_endpoint;
3303 }
3304 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
3305
3306 rc = sps_connect(pipe_handle, sps_config);
3307 if (rc) {
3308 pr_err("sps_connect() failed %d\n", rc);
3309 goto free_endpoint;
3310 }
3311
3312 sps_event->options = SPS_O_EOT;
3313 sps_event->mode = SPS_TRIGGER_WAIT;
3314 sps_event->user = (void *)info;
3315
3316 rc = sps_register_event(pipe_handle, sps_event);
3317 if (rc) {
3318 pr_err("sps_register_event() failed %d\n", rc);
3319 goto sps_disconnect;
3320 }
3321 end_point->index = pipe_index;
3322 end_point->handle = pipe_handle;
3323 pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
3324 pipe_index);
3325 goto out;
3326sps_disconnect:
3327 sps_disconnect(pipe_handle);
3328free_endpoint:
3329 sps_free_endpoint(pipe_handle);
3330out:
3331 return rc;
3332}
3333
3334/* This function disconnects and frees an end point */
3335static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
3336 struct msm_nand_sps_endpt *end_point)
3337{
3338 sps_disconnect(end_point->handle);
3339 sps_free_endpoint(end_point->handle);
3340}
3341
3342/*
3343 * This function registers BAM device and initializes its end points for
3344 * the following pipes -
3345 * system consumer pipe for data (pipe#0),
3346 * system producer pipe for data (pipe#1),
3347 * system consumer pipe for commands (pipe#2).
3348 */
3349static int msm_nand_bam_init(struct msm_nand_info *nand_info)
3350{
3351 struct sps_bam_props bam = {0};
3352 int rc = 0;
3353
3354 bam.phys_addr = nand_info->bam_phys;
3355 bam.virt_addr = nand_info->bam_base;
3356 bam.irq = nand_info->bam_irq;
3357 /*
3358 * NAND device is accessible from both Apps and Modem processor and
3359 * thus, NANDc and BAM are shared between both the processors. But BAM
3360 * must be enabled and instantiated only once during boot up by
3361 * Trustzone before Modem/Apps is brought out from reset.
3362 *
3363 * This is indicated to SPS driver on Apps by marking flag
3364 * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
3365 * initializations that will be done by Trustzone - Execution
3366 * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
3367 * Descriptor summing threshold.
3368 *
3369 * NANDc BAM device supports 2 execution environments - Modem and Apps
3370 * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
3371 */
3372 bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
3373 bam.ipc_loglevel = QPIC_BAM_DEFAULT_IPC_LOGLVL;
3374
3375 rc = sps_phy2h(bam.phys_addr, &nand_info->sps.bam_handle);
3376 if (!rc)
3377 goto init_sps_ep;
3378 rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
3379 if (rc) {
3380 pr_err("%s: sps_register_bam_device() failed with %d\n",
3381 __func__, rc);
3382 goto out;
3383 }
3384 pr_info("%s: BAM device registered: bam_handle 0x%lx\n",
3385 __func__, nand_info->sps.bam_handle);
3386init_sps_ep:
3387 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
3388 SPS_DATA_PROD_PIPE_INDEX);
3389 if (rc)
3390 goto out;
3391 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
3392 SPS_DATA_CONS_PIPE_INDEX);
3393 if (rc)
3394 goto deinit_data_prod;
3395
3396 rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
3397 SPS_CMD_CONS_PIPE_INDEX);
3398 if (rc)
3399 goto deinit_data_cons;
3400 goto out;
3401deinit_data_cons:
3402 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
3403deinit_data_prod:
3404 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
3405out:
3406 return rc;
3407}
3408
3409/*
3410 * This function disconnects and frees its end points for all the pipes.
3411 * Since the BAM is shared resource, it is not deregistered as its handle
3412 * might be in use with LCDC.
3413 */
3414static void msm_nand_bam_free(struct msm_nand_info *nand_info)
3415{
3416 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
3417 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
3418 msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
3419}
3420
3421/* This function enables DMA support for the NANDc in BAM mode. */
3422static int msm_nand_enable_dma(struct msm_nand_info *info)
3423{
3424 struct msm_nand_sps_cmd *sps_cmd;
3425 struct msm_nand_chip *chip = &info->nand_chip;
3426 int ret, submitted_num_desc = 1;
3427 struct sps_iovec iovec_temp;
3428
3429 wait_event(chip->dma_wait_queue,
3430 (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
3431
3432 msm_nand_prep_single_desc(sps_cmd, MSM_NAND_CTRL(info), WRITE,
3433 (1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
3434
3435 mutex_lock(&info->lock);
3436 ret = msm_nand_get_device(chip->dev);
3437 if (ret) {
3438 mutex_unlock(&info->lock);
3439 goto out;
3440 }
3441 ret = sps_transfer_one(info->sps.cmd_pipe.handle,
3442 msm_virt_to_dma(chip, &sps_cmd->ce),
3443 sizeof(struct sps_command_element), NULL,
3444 sps_cmd->flags);
3445 if (ret) {
3446 pr_err("Failed to submit command: %d\n", ret);
3447 goto put_dev;
3448 }
3449 ret = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
3450 info->sps.cmd_pipe.index, submitted_num_desc,
3451 &iovec_temp);
3452 if (ret) {
3453 pr_err("Failed to get iovec for pipe %d (ret: %d)\n",
3454 (info->sps.cmd_pipe.index), ret);
3455 goto put_dev;
3456 }
3457put_dev:
3458 ret = msm_nand_put_device(chip->dev);
3459out:
3460 mutex_unlock(&info->lock);
3461 msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
3462 return ret;
3463
3464}
3465
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303466static int msm_nand_parse_smem_ptable(int *nr_parts)
3467{
3468
3469 uint32_t i, j;
3470 uint32_t len = FLASH_PTABLE_HDR_LEN;
3471 struct flash_partition_entry *pentry;
3472 char *delimiter = ":";
3473 void *temp_ptable = NULL;
3474
3475 pr_info("Parsing partition table info from SMEM\n");
3476 temp_ptable = smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len, 0,
3477 SMEM_ANY_HOST_FLAG);
3478
Sahitya Tummala6cd4b252017-10-09 11:15:44 +05303479 if (IS_ERR_OR_NULL(temp_ptable)) {
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303480 pr_err("Error reading partition table header\n");
3481 goto out;
3482 }
3483
3484 /* Read only the header portion of ptable */
3485 ptable = *(struct flash_partition_table *)temp_ptable;
3486
3487 /* Verify ptable magic */
3488 if (ptable.magic1 != FLASH_PART_MAGIC1 ||
3489 ptable.magic2 != FLASH_PART_MAGIC2) {
3490 pr_err("Partition table magic verification failed\n");
3491 goto out;
3492 }
3493 /* Ensure that # of partitions is less than the max we have allocated */
3494 if (ptable.numparts > FLASH_PTABLE_MAX_PARTS_V4) {
3495 pr_err("Partition numbers exceed the max limit\n");
3496 goto out;
3497 }
3498 /* Find out length of partition data based on table version. */
3499 if (ptable.version <= FLASH_PTABLE_V3) {
3500 len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V3 *
3501 sizeof(struct flash_partition_entry);
3502 } else if (ptable.version == FLASH_PTABLE_V4) {
3503 len = FLASH_PTABLE_HDR_LEN + FLASH_PTABLE_MAX_PARTS_V4 *
3504 sizeof(struct flash_partition_entry);
3505 } else {
3506 pr_err("Unknown ptable version (%d)", ptable.version);
3507 goto out;
3508 }
3509
3510 *nr_parts = ptable.numparts;
3511
3512 /*
3513 * Now that the partition table header has been parsed, verified
3514 * and the length of the partition table calculated, read the
3515 * complete partition table.
3516 */
3517 temp_ptable = smem_get_entry(SMEM_AARM_PARTITION_TABLE, &len, 0,
3518 SMEM_ANY_HOST_FLAG);
Sahitya Tummala6cd4b252017-10-09 11:15:44 +05303519 if (IS_ERR_OR_NULL(temp_ptable)) {
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303520 pr_err("Error reading partition table\n");
3521 goto out;
3522 }
3523
3524 /* Read only the header portion of ptable */
3525 ptable = *(struct flash_partition_table *)temp_ptable;
3526
3527 for (i = 0; i < ptable.numparts; i++) {
3528 pentry = &ptable.part_entry[i];
3529 if (pentry->name[0] == '\0')
3530 continue;
3531 /* Convert name to lower case and discard the initial chars */
3532 mtd_part[i].name = pentry->name;
Sahitya Tummala6cd4b252017-10-09 11:15:44 +05303533 strsep(&(mtd_part[i].name), delimiter);
3534 if (!mtd_part[i].name)
3535 mtd_part[i].name = pentry->name;
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303536 for (j = 0; j < strlen(mtd_part[i].name); j++)
3537 *(mtd_part[i].name + j) =
3538 tolower(*(mtd_part[i].name + j));
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303539 mtd_part[i].offset = pentry->offset;
3540 mtd_part[i].mask_flags = pentry->attr;
3541 mtd_part[i].size = pentry->length;
3542 pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
3543 i, pentry->name, pentry->offset, pentry->length,
3544 pentry->attr);
3545 }
3546 pr_info("SMEM partition table found: ver: %d len: %d\n",
3547 ptable.version, ptable.numparts);
3548 return 0;
3549out:
3550 return -EINVAL;
3551}
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303552
3553#define BOOT_DEV_MASK 0x1E
3554#define BOOT_DEV_NAND 0x4
3555
3556/*
3557 * This function gets called when its device named msm-nand is added to
3558 * device tree .dts file with all its resources such as physical addresses
3559 * for NANDc and BAM, BAM IRQ.
3560 *
3561 * It also expects the NAND flash partition information to be passed in .dts
3562 * file so that it can parse the partitions by calling MTD function
3563 * mtd_device_parse_register().
3564 *
3565 */
3566static int msm_nand_probe(struct platform_device *pdev)
3567{
3568 struct msm_nand_info *info;
3569 struct resource *res;
3570 int i, err, nr_parts;
3571 struct device *dev;
3572 u32 adjustment_offset;
3573 void __iomem *boot_cfg_base;
3574 u32 boot_dev;
3575
3576 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3577 "boot_cfg");
3578 if (res && res->start) {
3579 boot_cfg_base = devm_ioremap(&pdev->dev, res->start,
3580 resource_size(res));
3581 if (!boot_cfg_base) {
3582 pr_err("ioremap() failed for addr 0x%x size 0x%x\n",
3583 res->start, resource_size(res));
3584 return -ENOMEM;
3585 }
3586 boot_dev = (readl_relaxed(boot_cfg_base) & BOOT_DEV_MASK) >> 1;
3587 if (boot_dev != BOOT_DEV_NAND) {
3588 pr_err("disabling nand as boot device (%x) is not NAND\n",
3589 boot_dev);
3590 return -ENODEV;
3591 }
3592 }
3593 /*
3594 * The partition information can also be passed from kernel command
3595 * line. Also, the MTD core layer supports adding the whole device as
3596 * one MTD device when no partition information is available at all.
3597 */
3598 info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
3599 GFP_KERNEL);
3600 if (!info) {
3601 err = -ENOMEM;
3602 goto out;
3603 }
3604 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3605 "nand_phys");
3606 if (!res || !res->start) {
3607 pr_err("NAND phys address range is not provided\n");
3608 err = -ENODEV;
3609 goto out;
3610 }
3611 info->nand_phys = res->start;
3612
3613 err = of_property_read_u32(pdev->dev.of_node,
3614 "qcom,reg-adjustment-offset",
3615 &adjustment_offset);
3616 if (err) {
3617 pr_err("adjustment_offset not found, err = %d\n", err);
3618 WARN_ON(1);
3619 return err;
3620 }
3621
3622 info->nand_phys_adjusted = info->nand_phys + adjustment_offset;
3623
3624 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3625 "bam_phys");
3626 if (!res || !res->start) {
3627 pr_err("BAM phys address range is not provided\n");
3628 err = -ENODEV;
3629 goto out;
3630 }
3631 info->bam_phys = res->start;
3632 info->bam_base = devm_ioremap(&pdev->dev, res->start,
3633 resource_size(res));
3634 if (!info->bam_base) {
3635 pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
3636 res->start, resource_size(res));
3637 err = -ENOMEM;
3638 goto out;
3639 }
3640
3641 info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
3642 if (info->bam_irq < 0) {
3643 pr_err("BAM IRQ is not provided\n");
3644 err = -ENODEV;
3645 goto out;
3646 }
3647
3648 info->mtd.name = dev_name(&pdev->dev);
3649 info->mtd.priv = info;
3650 info->mtd.owner = THIS_MODULE;
3651 info->nand_chip.dev = &pdev->dev;
3652 init_waitqueue_head(&info->nand_chip.dma_wait_queue);
3653 mutex_init(&info->lock);
3654
3655 dev = &pdev->dev;
3656 if (dma_supported(dev, DMA_BIT_MASK(32))) {
3657 info->dma_mask = DMA_BIT_MASK(32);
3658 dev->coherent_dma_mask = info->dma_mask;
3659 }
3660
3661 info->nand_chip.dma_virt_addr =
3662 dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
3663 &info->nand_chip.dma_phys_addr, GFP_KERNEL);
3664 if (!info->nand_chip.dma_virt_addr) {
3665 pr_err("No memory for DMA buffer size %x\n",
3666 MSM_NAND_DMA_BUFFER_SIZE);
3667 err = -ENOMEM;
3668 goto out;
3669 }
3670 err = msm_nand_bus_register(pdev, info);
3671 if (err)
3672 goto out;
Sahitya Tummalaa2de55d2017-06-30 16:02:59 +05303673
3674 if (of_property_read_bool(pdev->dev.of_node, "qcom,qpic-clk-rpmh"))
3675 info->clk_data.rpmh_clk = true;
3676
3677 if (!info->clk_data.rpmh_clk) {
3678 info->clk_data.qpic_clk = devm_clk_get(&pdev->dev, "core_clk");
3679 if (!IS_ERR_OR_NULL(info->clk_data.qpic_clk)) {
3680 err = clk_set_rate(info->clk_data.qpic_clk,
3681 MSM_NAND_BUS_VOTE_MAX_RATE);
3682 } else {
3683 err = PTR_ERR(info->clk_data.qpic_clk);
3684 pr_err("Failed to get clock handle, err=%d\n", err);
3685 }
3686 if (err)
3687 goto bus_unregister;
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303688 }
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303689
3690 err = msm_nand_setup_clocks_and_bus_bw(info, true);
3691 if (err)
3692 goto bus_unregister;
3693 dev_set_drvdata(&pdev->dev, info);
3694 err = pm_runtime_set_active(&pdev->dev);
3695 if (err)
3696 pr_err("pm_runtime_set_active() failed with error %d", err);
3697 pm_runtime_enable(&pdev->dev);
3698 pm_runtime_use_autosuspend(&pdev->dev);
3699 pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_NAND_IDLE_TIMEOUT);
3700
3701 err = msm_nand_bam_init(info);
3702 if (err) {
3703 pr_err("msm_nand_bam_init() failed %d\n", err);
3704 goto clk_rpm_disable;
3705 }
3706 err = msm_nand_enable_dma(info);
3707 if (err) {
3708 pr_err("Failed to enable DMA in NANDc\n");
3709 goto free_bam;
3710 }
3711 err = msm_nand_parse_smem_ptable(&nr_parts);
3712 if (err < 0) {
3713 pr_err("Failed to parse partition table in SMEM\n");
3714 goto free_bam;
3715 }
3716 if (msm_nand_scan(&info->mtd)) {
3717 pr_err("No nand device found\n");
3718 err = -ENXIO;
3719 goto free_bam;
3720 }
3721 for (i = 0; i < nr_parts; i++) {
3722 mtd_part[i].offset *= info->mtd.erasesize;
3723 mtd_part[i].size *= info->mtd.erasesize;
3724 }
3725 err = mtd_device_parse_register(&info->mtd, NULL, NULL,
3726 &mtd_part[0], nr_parts);
3727 if (err < 0) {
3728 pr_err("Unable to register MTD partitions %d\n", err);
3729 goto free_bam;
3730 }
3731
3732 pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
3733 info->nand_phys, info->bam_phys, info->bam_irq);
3734 pr_info("Allocated DMA buffer at virt_addr 0x%pK, phys_addr 0x%x\n",
3735 info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05303736 msm_nand_init_sysfs(dev);
3737 msm_nand_init_perf_stats(info);
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303738 goto out;
3739free_bam:
3740 msm_nand_bam_free(info);
3741clk_rpm_disable:
3742 msm_nand_setup_clocks_and_bus_bw(info, false);
3743 pm_runtime_disable(&(pdev)->dev);
3744 pm_runtime_set_suspended(&(pdev)->dev);
3745bus_unregister:
3746 msm_nand_bus_unregister(info);
3747out:
3748 return err;
3749}
3750
3751/*
3752 * Remove functionality that gets called when driver/device msm-nand
3753 * is removed.
3754 */
3755static int msm_nand_remove(struct platform_device *pdev)
3756{
3757 struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
3758
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05303759 msm_nand_cleanup_sysfs(&pdev->dev);
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303760 if (pm_runtime_suspended(&(pdev)->dev))
3761 pm_runtime_resume(&(pdev)->dev);
3762
3763 pm_runtime_disable(&(pdev)->dev);
3764 pm_runtime_set_suspended(&(pdev)->dev);
3765
3766 dev_set_drvdata(&pdev->dev, NULL);
3767
3768 if (info) {
3769 msm_nand_setup_clocks_and_bus_bw(info, false);
3770 if (info->clk_data.client_handle)
3771 msm_nand_bus_unregister(info);
3772 mtd_device_unregister(&info->mtd);
3773 msm_nand_bam_free(info);
3774 }
3775 return 0;
3776}
3777
3778#define DRIVER_NAME "msm_qpic_nand"
3779static const struct of_device_id msm_nand_match_table[] = {
3780 { .compatible = "qcom,msm-nand", },
3781 {},
3782};
3783
3784static const struct dev_pm_ops msm_nand_pm_ops = {
3785 .suspend = msm_nand_suspend,
3786 .resume = msm_nand_resume,
3787 .runtime_suspend = msm_nand_runtime_suspend,
3788 .runtime_resume = msm_nand_runtime_resume,
3789};
3790
3791static struct platform_driver msm_nand_driver = {
3792 .probe = msm_nand_probe,
3793 .remove = msm_nand_remove,
3794 .driver = {
3795 .name = DRIVER_NAME,
3796 .of_match_table = msm_nand_match_table,
3797 .pm = &msm_nand_pm_ops,
3798 },
3799};
3800
3801module_param(enable_euclean, bool, 0644);
3802MODULE_PARM_DESC(enable_euclean, "Set this parameter to enable reporting EUCLEAN to upper layer when the correctable bitflips are equal to the max correctable limit.");
3803
Sahitya Tummaladd8caf42017-10-11 10:12:19 +05303804module_param(enable_perfstats, bool, 0644);
3805MODULE_PARM_DESC(enable_perfstats, "Set this parameter to enable collection and reporting of performance data.");
3806
Sahitya Tummala3f3f2f92017-06-05 09:03:42 +05303807module_platform_driver(msm_nand_driver);
3808
3809MODULE_ALIAS(DRIVER_NAME);
3810MODULE_LICENSE("GPL v2");
3811MODULE_DESCRIPTION("MSM QPIC NAND flash driver");