blob: 7fce58833a07aaa254273dd5113a96e86989b6d2 [file] [log] [blame]
Matias Bjørlinge3eb3792016-01-12 07:49:36 +01001/*
2 * Copyright (C) 2015 Matias Bjorling. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; see the file COPYING. If not, write to
15 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16 * USA.
17 *
18 */
19
20#include <linux/lightnvm.h>
21
22#define MAX_SYSBLKS 3 /* remember to update mapping scheme on change */
23#define MAX_BLKS_PR_SYSBLK 2 /* 2 blks with 256 pages and 3000 erases
24 * enables ~1.5M updates per sysblk unit
25 */
26
27struct sysblk_scan {
28 /* A row is a collection of flash blocks for a system block. */
29 int nr_rows;
30 int row;
31 int act_blk[MAX_SYSBLKS];
32
33 int nr_ppas;
34 struct ppa_addr ppas[MAX_SYSBLKS * MAX_BLKS_PR_SYSBLK];/* all sysblks */
35};
36
37static inline int scan_ppa_idx(int row, int blkid)
38{
39 return (row * MAX_BLKS_PR_SYSBLK) + blkid;
40}
41
42void nvm_sysblk_to_cpu(struct nvm_sb_info *info, struct nvm_system_block *sb)
43{
44 info->seqnr = be32_to_cpu(sb->seqnr);
45 info->erase_cnt = be32_to_cpu(sb->erase_cnt);
46 info->version = be16_to_cpu(sb->version);
47 strncpy(info->mmtype, sb->mmtype, NVM_MMTYPE_LEN);
48 info->fs_ppa.ppa = be64_to_cpu(sb->fs_ppa);
49}
50
51void nvm_cpu_to_sysblk(struct nvm_system_block *sb, struct nvm_sb_info *info)
52{
53 sb->magic = cpu_to_be32(NVM_SYSBLK_MAGIC);
54 sb->seqnr = cpu_to_be32(info->seqnr);
55 sb->erase_cnt = cpu_to_be32(info->erase_cnt);
56 sb->version = cpu_to_be16(info->version);
57 strncpy(sb->mmtype, info->mmtype, NVM_MMTYPE_LEN);
58 sb->fs_ppa = cpu_to_be64(info->fs_ppa.ppa);
59}
60
61static int nvm_setup_sysblks(struct nvm_dev *dev, struct ppa_addr *sysblk_ppas)
62{
63 int nr_rows = min_t(int, MAX_SYSBLKS, dev->nr_chnls);
64 int i;
65
66 for (i = 0; i < nr_rows; i++)
67 sysblk_ppas[i].ppa = 0;
68
69 /* if possible, place sysblk at first channel, middle channel and last
70 * channel of the device. If not, create only one or two sys blocks
71 */
72 switch (dev->nr_chnls) {
73 case 2:
74 sysblk_ppas[1].g.ch = 1;
75 /* fall-through */
76 case 1:
77 sysblk_ppas[0].g.ch = 0;
78 break;
79 default:
80 sysblk_ppas[0].g.ch = 0;
81 sysblk_ppas[1].g.ch = dev->nr_chnls / 2;
82 sysblk_ppas[2].g.ch = dev->nr_chnls - 1;
83 break;
84 }
85
86 return nr_rows;
87}
88
89void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
90 struct ppa_addr *sysblk_ppas)
91{
92 memset(s, 0, sizeof(struct sysblk_scan));
93 s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
94}
95
Matias Bjørling22e8c972016-05-06 20:02:58 +020096static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
97 u8 *blks, int nr_blks, void *private)
Matias Bjørlinge3eb3792016-01-12 07:49:36 +010098{
99 struct sysblk_scan *s = private;
100 int i, nr_sysblk = 0;
101
Matias Bjørling22e8c972016-05-06 20:02:58 +0200102 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
103 if (nr_blks < 0)
104 return nr_blks;
105
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100106 for (i = 0; i < nr_blks; i++) {
107 if (blks[i] != NVM_BLK_T_HOST)
108 continue;
109
110 if (s->nr_ppas == MAX_BLKS_PR_SYSBLK * MAX_SYSBLKS) {
111 pr_err("nvm: too many host blks\n");
112 return -EINVAL;
113 }
114
115 ppa.g.blk = i;
116
117 s->ppas[scan_ppa_idx(s->row, nr_sysblk)] = ppa;
118 s->nr_ppas++;
119 nr_sysblk++;
120 }
121
122 return 0;
123}
124
125static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
126 struct ppa_addr *ppas, nvm_bb_update_fn *fn)
127{
128 struct ppa_addr dppa;
Jeff Mahoney57aac2f2016-05-06 20:02:54 +0200129 int i, ret = 0;
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100130
131 s->nr_ppas = 0;
132
133 for (i = 0; i < s->nr_rows; i++) {
134 dppa = generic_to_dev_addr(dev, ppas[i]);
135 s->row = i;
136
Matias Bjørling22e8c972016-05-06 20:02:58 +0200137 ret = dev->ops->get_bb_tbl(dev, dppa, fn, s);
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100138 if (ret) {
139 pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
140 ppas[i].g.ch,
141 ppas[i].g.blk);
142 return ret;
143 }
144 }
145
146 return ret;
147}
148
149/*
150 * scans a block for latest sysblk.
151 * Returns:
152 * 0 - newer sysblk not found. PPA is updated to latest page.
153 * 1 - newer sysblk found and stored in *cur. PPA is updated to
154 * next valid page.
155 * <0- error.
156 */
157static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
158 struct nvm_system_block *sblk)
159{
160 struct nvm_system_block *cur;
Matias Bjørling4891d122016-05-06 20:02:57 +0200161 int pg, ret, found = 0;
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100162
163 /* the full buffer for a flash page is allocated. Only the first of it
164 * contains the system block information
165 */
Matias Bjørling4891d122016-05-06 20:02:57 +0200166 cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100167 if (!cur)
168 return -ENOMEM;
169
170 /* perform linear scan through the block */
171 for (pg = 0; pg < dev->lps_per_blk; pg++) {
172 ppa->g.pg = ppa_to_slc(dev, pg);
173
174 ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
Matias Bjørling4891d122016-05-06 20:02:57 +0200175 cur, dev->pfpg_size);
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100176 if (ret) {
177 if (ret == NVM_RSP_ERR_EMPTYPAGE) {
178 pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
179 ppa->g.ch,
180 ppa->g.lun,
181 ppa->g.blk,
182 ppa->g.pg);
183 break;
184 }
185 pr_err("nvm: read failed (%x) for ppa (%u %u %u %u)",
186 ret,
187 ppa->g.ch,
188 ppa->g.lun,
189 ppa->g.blk,
190 ppa->g.pg);
191 break; /* if we can't read a page, continue to the
192 * next blk
193 */
194 }
195
196 if (be32_to_cpu(cur->magic) != NVM_SYSBLK_MAGIC) {
197 pr_debug("nvm: scan break for ppa (%u %u %u %u)\n",
198 ppa->g.ch,
199 ppa->g.lun,
200 ppa->g.blk,
201 ppa->g.pg);
202 break; /* last valid page already found */
203 }
204
205 if (be32_to_cpu(cur->seqnr) < be32_to_cpu(sblk->seqnr))
206 continue;
207
208 memcpy(sblk, cur, sizeof(struct nvm_system_block));
209 found = 1;
210 }
211
212 kfree(cur);
213
214 return found;
215}
216
217static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
218{
219 struct nvm_rq rqd;
220 int ret;
221
222 if (s->nr_ppas > dev->ops->max_phys_sect) {
223 pr_err("nvm: unable to update all sysblocks atomically\n");
224 return -EINVAL;
225 }
226
227 memset(&rqd, 0, sizeof(struct nvm_rq));
228
229 nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
230 nvm_generic_to_addr_mode(dev, &rqd);
231
232 ret = dev->ops->set_bb_tbl(dev, &rqd, type);
233 nvm_free_rqd_ppalist(dev, &rqd);
234 if (ret) {
235 pr_err("nvm: sysblk failed bb mark\n");
236 return -EINVAL;
237 }
238
239 return 0;
240}
241
Matias Bjørling22e8c972016-05-06 20:02:58 +0200242static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
243 u8 *blks, int nr_blks, void *private)
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100244{
245 struct sysblk_scan *s = private;
246 struct ppa_addr *sppa;
247 int i, blkid = 0;
248
Matias Bjørling22e8c972016-05-06 20:02:58 +0200249 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
250 if (nr_blks < 0)
251 return nr_blks;
252
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100253 for (i = 0; i < nr_blks; i++) {
254 if (blks[i] == NVM_BLK_T_HOST)
255 return -EEXIST;
256
257 if (blks[i] != NVM_BLK_T_FREE)
258 continue;
259
260 sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
261 sppa->g.ch = ppa.g.ch;
262 sppa->g.lun = ppa.g.lun;
263 sppa->g.blk = i;
264 s->nr_ppas++;
265 blkid++;
266
267 pr_debug("nvm: use (%u %u %u) as sysblk\n",
268 sppa->g.ch, sppa->g.lun, sppa->g.blk);
269 if (blkid > MAX_BLKS_PR_SYSBLK - 1)
270 return 0;
271 }
272
273 pr_err("nvm: sysblk failed get sysblk\n");
274 return -EINVAL;
275}
276
277static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
278 struct sysblk_scan *s)
279{
280 struct nvm_system_block nvmsb;
281 void *buf;
Matias Bjørling4891d122016-05-06 20:02:57 +0200282 int i, sect, ret = 0;
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100283 struct ppa_addr *ppas;
284
285 nvm_cpu_to_sysblk(&nvmsb, info);
286
Matias Bjørling4891d122016-05-06 20:02:57 +0200287 buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100288 if (!buf)
289 return -ENOMEM;
290 memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
291
292 ppas = kcalloc(dev->sec_per_pg, sizeof(struct ppa_addr), GFP_KERNEL);
293 if (!ppas) {
294 ret = -ENOMEM;
295 goto err;
296 }
297
298 /* Write and verify */
299 for (i = 0; i < s->nr_rows; i++) {
300 ppas[0] = s->ppas[scan_ppa_idx(i, s->act_blk[i])];
301
302 pr_debug("nvm: writing sysblk to ppa (%u %u %u %u)\n",
303 ppas[0].g.ch,
304 ppas[0].g.lun,
305 ppas[0].g.blk,
306 ppas[0].g.pg);
307
308 /* Expand to all sectors within a flash page */
309 if (dev->sec_per_pg > 1) {
310 for (sect = 1; sect < dev->sec_per_pg; sect++) {
311 ppas[sect].ppa = ppas[0].ppa;
312 ppas[sect].g.sec = sect;
313 }
314 }
315
316 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
Matias Bjørling4891d122016-05-06 20:02:57 +0200317 NVM_IO_SLC_MODE, buf, dev->pfpg_size);
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100318 if (ret) {
319 pr_err("nvm: sysblk failed program (%u %u %u)\n",
320 ppas[0].g.ch,
321 ppas[0].g.lun,
322 ppas[0].g.blk);
323 break;
324 }
325
326 ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
Matias Bjørling4891d122016-05-06 20:02:57 +0200327 NVM_IO_SLC_MODE, buf, dev->pfpg_size);
Matias Bjørlinge3eb3792016-01-12 07:49:36 +0100328 if (ret) {
329 pr_err("nvm: sysblk failed read (%u %u %u)\n",
330 ppas[0].g.ch,
331 ppas[0].g.lun,
332 ppas[0].g.blk);
333 break;
334 }
335
336 if (memcmp(buf, &nvmsb, sizeof(struct nvm_system_block))) {
337 pr_err("nvm: sysblk failed verify (%u %u %u)\n",
338 ppas[0].g.ch,
339 ppas[0].g.lun,
340 ppas[0].g.blk);
341 ret = -EINVAL;
342 break;
343 }
344 }
345
346 kfree(ppas);
347err:
348 kfree(buf);
349
350 return ret;
351}
352
353static int nvm_prepare_new_sysblks(struct nvm_dev *dev, struct sysblk_scan *s)
354{
355 int i, ret;
356 unsigned long nxt_blk;
357 struct ppa_addr *ppa;
358
359 for (i = 0; i < s->nr_rows; i++) {
360 nxt_blk = (s->act_blk[i] + 1) % MAX_BLKS_PR_SYSBLK;
361 ppa = &s->ppas[scan_ppa_idx(i, nxt_blk)];
362 ppa->g.pg = ppa_to_slc(dev, 0);
363
364 ret = nvm_erase_ppa(dev, ppa, 1);
365 if (ret)
366 return ret;
367
368 s->act_blk[i] = nxt_blk;
369 }
370
371 return 0;
372}
373
374int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
375{
376 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
377 struct sysblk_scan s;
378 struct nvm_system_block *cur;
379 int i, j, found = 0;
380 int ret = -ENOMEM;
381
382 /*
383 * 1. setup sysblk locations
384 * 2. get bad block list
385 * 3. filter on host-specific (type 3)
386 * 4. iterate through all and find the highest seq nr.
387 * 5. return superblock information
388 */
389
390 if (!dev->ops->get_bb_tbl)
391 return -EINVAL;
392
393 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
394
395 mutex_lock(&dev->mlock);
396 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
397 if (ret)
398 goto err_sysblk;
399
400 /* no sysblocks initialized */
401 if (!s.nr_ppas)
402 goto err_sysblk;
403
404 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
405 if (!cur)
406 goto err_sysblk;
407
408 /* find the latest block across all sysblocks */
409 for (i = 0; i < s.nr_rows; i++) {
410 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
411 struct ppa_addr ppa = s.ppas[scan_ppa_idx(i, j)];
412
413 ret = nvm_scan_block(dev, &ppa, cur);
414 if (ret > 0)
415 found = 1;
416 else if (ret < 0)
417 break;
418 }
419 }
420
421 nvm_sysblk_to_cpu(info, cur);
422
423 kfree(cur);
424err_sysblk:
425 mutex_unlock(&dev->mlock);
426
427 if (found)
428 return 1;
429 return ret;
430}
431
432int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
433{
434 /* 1. for each latest superblock
435 * 2. if room
436 * a. write new flash page entry with the updated information
437 * 3. if no room
438 * a. find next available block on lun (linear search)
439 * if none, continue to next lun
440 * if none at all, report error. also report that it wasn't
441 * possible to write to all superblocks.
442 * c. write data to block.
443 */
444 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
445 struct sysblk_scan s;
446 struct nvm_system_block *cur;
447 int i, j, ppaidx, found = 0;
448 int ret = -ENOMEM;
449
450 if (!dev->ops->get_bb_tbl)
451 return -EINVAL;
452
453 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
454
455 mutex_lock(&dev->mlock);
456 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
457 if (ret)
458 goto err_sysblk;
459
460 cur = kzalloc(sizeof(struct nvm_system_block), GFP_KERNEL);
461 if (!cur)
462 goto err_sysblk;
463
464 /* Get the latest sysblk for each sysblk row */
465 for (i = 0; i < s.nr_rows; i++) {
466 found = 0;
467 for (j = 0; j < MAX_BLKS_PR_SYSBLK; j++) {
468 ppaidx = scan_ppa_idx(i, j);
469 ret = nvm_scan_block(dev, &s.ppas[ppaidx], cur);
470 if (ret > 0) {
471 s.act_blk[i] = j;
472 found = 1;
473 } else if (ret < 0)
474 break;
475 }
476 }
477
478 if (!found) {
479 pr_err("nvm: no valid sysblks found to update\n");
480 ret = -EINVAL;
481 goto err_cur;
482 }
483
484 /*
485 * All sysblocks found. Check that they have same page id in their flash
486 * blocks
487 */
488 for (i = 1; i < s.nr_rows; i++) {
489 struct ppa_addr l = s.ppas[scan_ppa_idx(0, s.act_blk[0])];
490 struct ppa_addr r = s.ppas[scan_ppa_idx(i, s.act_blk[i])];
491
492 if (l.g.pg != r.g.pg) {
493 pr_err("nvm: sysblks not on same page. Previous update failed.\n");
494 ret = -EINVAL;
495 goto err_cur;
496 }
497 }
498
499 /*
500 * Check that there haven't been another update to the seqnr since we
501 * began
502 */
503 if ((new->seqnr - 1) != be32_to_cpu(cur->seqnr)) {
504 pr_err("nvm: seq is not sequential\n");
505 ret = -EINVAL;
506 goto err_cur;
507 }
508
509 /*
510 * When all pages in a block has been written, a new block is selected
511 * and writing is performed on the new block.
512 */
513 if (s.ppas[scan_ppa_idx(0, s.act_blk[0])].g.pg ==
514 dev->lps_per_blk - 1) {
515 ret = nvm_prepare_new_sysblks(dev, &s);
516 if (ret)
517 goto err_cur;
518 }
519
520 ret = nvm_write_and_verify(dev, new, &s);
521err_cur:
522 kfree(cur);
523err_sysblk:
524 mutex_unlock(&dev->mlock);
525
526 return ret;
527}
528
529int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
530{
531 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
532 struct sysblk_scan s;
533 int ret;
534
535 /*
536 * 1. select master blocks and select first available blks
537 * 2. get bad block list
538 * 3. mark MAX_SYSBLKS block as host-based device allocated.
539 * 4. write and verify data to block
540 */
541
542 if (!dev->ops->get_bb_tbl || !dev->ops->set_bb_tbl)
543 return -EINVAL;
544
545 if (!(dev->mccap & NVM_ID_CAP_SLC) || !dev->lps_per_blk) {
546 pr_err("nvm: memory does not support SLC access\n");
547 return -EINVAL;
548 }
549
550 /* Index all sysblocks and mark them as host-driven */
551 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
552
553 mutex_lock(&dev->mlock);
554 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
555 if (ret)
556 goto err_mark;
557
558 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_HOST);
559 if (ret)
560 goto err_mark;
561
562 /* Write to the first block of each row */
563 ret = nvm_write_and_verify(dev, info, &s);
564err_mark:
565 mutex_unlock(&dev->mlock);
566 return ret;
567}
Matias Bjørling8b4970c2016-01-12 07:49:39 +0100568
569struct factory_blks {
570 struct nvm_dev *dev;
571 int flags;
572 unsigned long *blks;
573};
574
575static int factory_nblks(int nblks)
576{
577 /* Round up to nearest BITS_PER_LONG */
578 return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
579}
580
581static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
582{
583 int nblks = factory_nblks(dev->blks_per_lun);
584
585 return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
586 BITS_PER_LONG;
587}
588
Matias Bjørling22e8c972016-05-06 20:02:58 +0200589static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
590 u8 *blks, int nr_blks, void *private)
Matias Bjørling8b4970c2016-01-12 07:49:39 +0100591{
592 struct factory_blks *f = private;
Matias Bjørling8b4970c2016-01-12 07:49:39 +0100593 int i, lunoff;
594
Matias Bjørling22e8c972016-05-06 20:02:58 +0200595 nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
596 if (nr_blks < 0)
597 return nr_blks;
598
Matias Bjørling8b4970c2016-01-12 07:49:39 +0100599 lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
600
601 /* non-set bits correspond to the block must be erased */
602 for (i = 0; i < nr_blks; i++) {
603 switch (blks[i]) {
604 case NVM_BLK_T_FREE:
605 if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
606 set_bit(i, &f->blks[lunoff]);
607 break;
608 case NVM_BLK_T_HOST:
609 if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
610 set_bit(i, &f->blks[lunoff]);
611 break;
612 case NVM_BLK_T_GRWN_BAD:
613 if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
614 set_bit(i, &f->blks[lunoff]);
615 break;
616 default:
617 set_bit(i, &f->blks[lunoff]);
618 break;
619 }
620 }
621
622 return 0;
623}
624
625static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
626 int max_ppas, struct factory_blks *f)
627{
628 struct ppa_addr ppa;
629 int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
630 unsigned long *offset;
631
632 while (!done) {
633 done = 1;
634 for (ch = 0; ch < dev->nr_chnls; ch++) {
635 for (lun = 0; lun < dev->luns_per_chnl; lun++) {
636 idx = factory_blk_offset(dev, ch, lun);
637 offset = &f->blks[idx];
638
639 blkid = find_first_zero_bit(offset,
640 dev->blks_per_lun);
641 if (blkid >= dev->blks_per_lun)
642 continue;
643 set_bit(blkid, offset);
644
645 ppa.ppa = 0;
646 ppa.g.ch = ch;
647 ppa.g.lun = lun;
648 ppa.g.blk = blkid;
649 pr_debug("nvm: erase ppa (%u %u %u)\n",
650 ppa.g.ch,
651 ppa.g.lun,
652 ppa.g.blk);
653
654 erase_list[ppa_cnt] = ppa;
655 ppa_cnt++;
656 done = 0;
657
658 if (ppa_cnt == max_ppas)
659 return ppa_cnt;
660 }
661 }
662 }
663
664 return ppa_cnt;
665}
666
667static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
668 nvm_bb_update_fn *fn, void *priv)
669{
670 struct ppa_addr dev_ppa;
671 int ret;
672
673 dev_ppa = generic_to_dev_addr(dev, ppa);
674
Matias Bjørling22e8c972016-05-06 20:02:58 +0200675 ret = dev->ops->get_bb_tbl(dev, dev_ppa, fn, priv);
Matias Bjørling8b4970c2016-01-12 07:49:39 +0100676 if (ret)
677 pr_err("nvm: failed bb tbl for ch%u lun%u\n",
678 ppa.g.ch, ppa.g.blk);
679 return ret;
680}
681
682static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
683{
684 int ch, lun, ret;
685 struct ppa_addr ppa;
686
687 ppa.ppa = 0;
688 for (ch = 0; ch < dev->nr_chnls; ch++) {
689 for (lun = 0; lun < dev->luns_per_chnl; lun++) {
690 ppa.g.ch = ch;
691 ppa.g.lun = lun;
692
693 ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
694 f);
695 if (ret)
696 return ret;
697 }
698 }
699
700 return 0;
701}
702
703int nvm_dev_factory(struct nvm_dev *dev, int flags)
704{
705 struct factory_blks f;
706 struct ppa_addr *ppas;
707 int ppa_cnt, ret = -ENOMEM;
708 int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
709 struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
710 struct sysblk_scan s;
711
712 f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
713 GFP_KERNEL);
714 if (!f.blks)
715 return ret;
716
717 ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
718 if (!ppas)
719 goto err_blks;
720
721 f.dev = dev;
722 f.flags = flags;
723
724 /* create list of blks to be erased */
725 ret = nvm_fact_select_blks(dev, &f);
726 if (ret)
727 goto err_ppas;
728
729 /* continue to erase until list of blks until empty */
730 while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
731 nvm_erase_ppa(dev, ppas, ppa_cnt);
732
733 /* mark host reserved blocks free */
734 if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
735 nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
736 mutex_lock(&dev->mlock);
737 ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
738 sysblk_get_host_blks);
739 if (!ret)
740 ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
741 mutex_unlock(&dev->mlock);
742 }
743err_ppas:
744 kfree(ppas);
745err_blks:
746 kfree(f.blks);
747 return ret;
748}
749EXPORT_SYMBOL(nvm_dev_factory);