blob: ba02d0bc3e459b68e79a2daf6eb7e92bdaeee24a [file] [log] [blame]
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial: Javier Gonzalez <javier@cnexlabs.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * pblk-recovery.c - pblk's recovery path
15 */
16
17#include "pblk.h"
18
19void pblk_submit_rec(struct work_struct *work)
20{
21 struct pblk_rec_ctx *recovery =
22 container_of(work, struct pblk_rec_ctx, ws_rec);
23 struct pblk *pblk = recovery->pblk;
24 struct nvm_tgt_dev *dev = pblk->dev;
25 struct nvm_rq *rqd = recovery->rqd;
26 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
27 int max_secs = nvm_max_phys_sects(dev);
28 struct bio *bio;
29 unsigned int nr_rec_secs;
30 unsigned int pgs_read;
31 int ret;
32
33 nr_rec_secs = bitmap_weight((unsigned long int *)&rqd->ppa_status,
34 max_secs);
35
36 bio = bio_alloc(GFP_KERNEL, nr_rec_secs);
37 if (!bio) {
38 pr_err("pblk: not able to create recovery bio\n");
39 return;
40 }
41
42 bio->bi_iter.bi_sector = 0;
43 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
44 rqd->bio = bio;
45 rqd->nr_ppas = nr_rec_secs;
46
47 pgs_read = pblk_rb_read_to_bio_list(&pblk->rwb, bio, &recovery->failed,
48 nr_rec_secs);
49 if (pgs_read != nr_rec_secs) {
50 pr_err("pblk: could not read recovery entries\n");
51 goto err;
52 }
53
54 if (pblk_setup_w_rec_rq(pblk, rqd, c_ctx)) {
55 pr_err("pblk: could not setup recovery request\n");
56 goto err;
57 }
58
59#ifdef CONFIG_NVM_DEBUG
60 atomic_long_add(nr_rec_secs, &pblk->recov_writes);
61#endif
62
63 ret = pblk_submit_io(pblk, rqd);
64 if (ret) {
65 pr_err("pblk: I/O submission failed: %d\n", ret);
66 goto err;
67 }
68
69 mempool_free(recovery, pblk->rec_pool);
70 return;
71
72err:
73 bio_put(bio);
74 pblk_free_rqd(pblk, rqd, WRITE);
75}
76
77int pblk_recov_setup_rq(struct pblk *pblk, struct pblk_c_ctx *c_ctx,
78 struct pblk_rec_ctx *recovery, u64 *comp_bits,
79 unsigned int comp)
80{
81 struct nvm_tgt_dev *dev = pblk->dev;
82 int max_secs = nvm_max_phys_sects(dev);
83 struct nvm_rq *rec_rqd;
84 struct pblk_c_ctx *rec_ctx;
85 int nr_entries = c_ctx->nr_valid + c_ctx->nr_padded;
86
87 rec_rqd = pblk_alloc_rqd(pblk, WRITE);
88 if (IS_ERR(rec_rqd)) {
89 pr_err("pblk: could not create recovery req.\n");
90 return -ENOMEM;
91 }
92
93 rec_ctx = nvm_rq_to_pdu(rec_rqd);
94
95 /* Copy completion bitmap, but exclude the first X completed entries */
96 bitmap_shift_right((unsigned long int *)&rec_rqd->ppa_status,
97 (unsigned long int *)comp_bits,
98 comp, max_secs);
99
100 /* Save the context for the entries that need to be re-written and
101 * update current context with the completed entries.
102 */
103 rec_ctx->sentry = pblk_rb_wrap_pos(&pblk->rwb, c_ctx->sentry + comp);
104 if (comp >= c_ctx->nr_valid) {
105 rec_ctx->nr_valid = 0;
106 rec_ctx->nr_padded = nr_entries - comp;
107
108 c_ctx->nr_padded = comp - c_ctx->nr_valid;
109 } else {
110 rec_ctx->nr_valid = c_ctx->nr_valid - comp;
111 rec_ctx->nr_padded = c_ctx->nr_padded;
112
113 c_ctx->nr_valid = comp;
114 c_ctx->nr_padded = 0;
115 }
116
117 recovery->rqd = rec_rqd;
118 recovery->pblk = pblk;
119
120 return 0;
121}
122
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200123__le64 *pblk_recov_get_lba_list(struct pblk *pblk, struct line_emeta *emeta_buf)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200124{
125 u32 crc;
126
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200127 crc = pblk_calc_emeta_crc(pblk, emeta_buf);
128 if (le32_to_cpu(emeta_buf->crc) != crc)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200129 return NULL;
130
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200131 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200132 return NULL;
133
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200134 return emeta_to_lbas(pblk, emeta_buf);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200135}
136
137static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
138{
139 struct nvm_tgt_dev *dev = pblk->dev;
140 struct nvm_geo *geo = &dev->geo;
141 struct pblk_line_meta *lm = &pblk->lm;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200142 struct pblk_emeta *emeta = line->emeta;
143 struct line_emeta *emeta_buf = emeta->buf;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200144 __le64 *lba_list;
145 int data_start;
146 int nr_data_lbas, nr_valid_lbas, nr_lbas = 0;
147 int i;
148
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200149 lba_list = pblk_recov_get_lba_list(pblk, emeta_buf);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200150 if (!lba_list)
151 return 1;
152
153 data_start = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200154 nr_data_lbas = lm->sec_per_line - lm->emeta_sec[0];
155 nr_valid_lbas = le64_to_cpu(emeta_buf->nr_valid_lbas);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200156
157 for (i = data_start; i < nr_data_lbas && nr_lbas < nr_valid_lbas; i++) {
158 struct ppa_addr ppa;
159 int pos;
160
161 ppa = addr_to_pblk_ppa(pblk, i, line->id);
162 pos = pblk_ppa_to_pos(geo, ppa);
163
164 /* Do not update bad blocks */
165 if (test_bit(pos, line->blk_bitmap))
166 continue;
167
168 if (le64_to_cpu(lba_list[i]) == ADDR_EMPTY) {
169 spin_lock(&line->lock);
170 if (test_and_set_bit(i, line->invalid_bitmap))
Dan Carpenter2a79efd2017-04-15 20:55:52 +0200171 WARN_ONCE(1, "pblk: rec. double invalidate:\n");
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200172 else
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200173 le32_add_cpu(line->vsc, -1);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200174 spin_unlock(&line->lock);
175
176 continue;
177 }
178
179 pblk_update_map(pblk, le64_to_cpu(lba_list[i]), ppa);
180 nr_lbas++;
181 }
182
183 if (nr_valid_lbas != nr_lbas)
184 pr_err("pblk: line %d - inconsistent lba list(%llu/%d)\n",
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200185 line->id, emeta_buf->nr_valid_lbas, nr_lbas);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200186
187 line->left_msecs = 0;
188
189 return 0;
190}
191
192static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
193{
194 struct nvm_tgt_dev *dev = pblk->dev;
195 struct nvm_geo *geo = &dev->geo;
196 struct pblk_line_meta *lm = &pblk->lm;
197 int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
198
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200199 return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200200 nr_bb * geo->sec_per_blk;
201}
202
203struct pblk_recov_alloc {
204 struct ppa_addr *ppa_list;
205 struct pblk_sec_meta *meta_list;
206 struct nvm_rq *rqd;
207 void *data;
208 dma_addr_t dma_ppa_list;
209 dma_addr_t dma_meta_list;
210};
211
212static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
213 struct pblk_recov_alloc p, u64 r_ptr)
214{
215 struct nvm_tgt_dev *dev = pblk->dev;
216 struct nvm_geo *geo = &dev->geo;
217 struct ppa_addr *ppa_list;
218 struct pblk_sec_meta *meta_list;
219 struct nvm_rq *rqd;
220 struct bio *bio;
221 void *data;
222 dma_addr_t dma_ppa_list, dma_meta_list;
223 u64 r_ptr_int;
224 int left_ppas;
225 int rq_ppas, rq_len;
226 int i, j;
227 int ret = 0;
228 DECLARE_COMPLETION_ONSTACK(wait);
229
230 ppa_list = p.ppa_list;
231 meta_list = p.meta_list;
232 rqd = p.rqd;
233 data = p.data;
234 dma_ppa_list = p.dma_ppa_list;
235 dma_meta_list = p.dma_meta_list;
236
237 left_ppas = line->cur_sec - r_ptr;
238 if (!left_ppas)
239 return 0;
240
241 r_ptr_int = r_ptr;
242
243next_read_rq:
Javier González084ec9b2017-06-26 16:27:13 -0600244 memset(rqd, 0, pblk_g_rq_size);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200245
246 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
247 if (!rq_ppas)
248 rq_ppas = pblk->min_write_pgs;
249 rq_len = rq_ppas * geo->sec_size;
250
251 bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
252 if (IS_ERR(bio))
253 return PTR_ERR(bio);
254
255 bio->bi_iter.bi_sector = 0; /* internal bio */
256 bio_set_op_attrs(bio, REQ_OP_READ, 0);
257
258 rqd->bio = bio;
259 rqd->opcode = NVM_OP_PREAD;
260 rqd->flags = pblk_set_read_mode(pblk);
261 rqd->meta_list = meta_list;
262 rqd->nr_ppas = rq_ppas;
263 rqd->ppa_list = ppa_list;
264 rqd->dma_ppa_list = dma_ppa_list;
265 rqd->dma_meta_list = dma_meta_list;
266 rqd->end_io = pblk_end_io_sync;
267 rqd->private = &wait;
268
269 for (i = 0; i < rqd->nr_ppas; ) {
270 struct ppa_addr ppa;
271 int pos;
272
273 ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
274 pos = pblk_dev_ppa_to_pos(geo, ppa);
275
276 while (test_bit(pos, line->blk_bitmap)) {
277 r_ptr_int += pblk->min_write_pgs;
278 ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
279 pos = pblk_dev_ppa_to_pos(geo, ppa);
280 }
281
282 for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
283 rqd->ppa_list[i] =
284 addr_to_gen_ppa(pblk, r_ptr_int, line->id);
285 }
286
287 /* If read fails, more padding is needed */
288 ret = pblk_submit_io(pblk, rqd);
289 if (ret) {
290 pr_err("pblk: I/O submission failed: %d\n", ret);
291 return ret;
292 }
293
294 if (!wait_for_completion_io_timeout(&wait,
295 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
296 pr_err("pblk: L2P recovery read timed out\n");
297 return -EINTR;
298 }
299
300 reinit_completion(&wait);
301
302 /* At this point, the read should not fail. If it does, it is a problem
303 * we cannot recover from here. Need FTL log.
304 */
305 if (rqd->error) {
306 pr_err("pblk: L2P recovery failed (%d)\n", rqd->error);
307 return -EINTR;
308 }
309
310 for (i = 0; i < rqd->nr_ppas; i++) {
311 u64 lba = le64_to_cpu(meta_list[i].lba);
312
313 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
314 continue;
315
316 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
317 }
318
319 left_ppas -= rq_ppas;
320 if (left_ppas > 0)
321 goto next_read_rq;
322
323 return 0;
324}
325
326static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
327 struct pblk_recov_alloc p, int left_ppas)
328{
329 struct nvm_tgt_dev *dev = pblk->dev;
330 struct nvm_geo *geo = &dev->geo;
331 struct ppa_addr *ppa_list;
332 struct pblk_sec_meta *meta_list;
333 struct nvm_rq *rqd;
334 struct bio *bio;
335 void *data;
336 dma_addr_t dma_ppa_list, dma_meta_list;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200337 __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200338 u64 w_ptr = line->cur_sec;
339 int left_line_ppas = line->left_msecs;
340 int rq_ppas, rq_len;
341 int i, j;
342 int ret = 0;
343 DECLARE_COMPLETION_ONSTACK(wait);
344
345 ppa_list = p.ppa_list;
346 meta_list = p.meta_list;
347 rqd = p.rqd;
348 data = p.data;
349 dma_ppa_list = p.dma_ppa_list;
350 dma_meta_list = p.dma_meta_list;
351
352next_pad_rq:
353 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
354 if (!rq_ppas)
355 rq_ppas = pblk->min_write_pgs;
356 rq_len = rq_ppas * geo->sec_size;
357
358 bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
359 if (IS_ERR(bio))
360 return PTR_ERR(bio);
361
362 bio->bi_iter.bi_sector = 0; /* internal bio */
363 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
364
Javier González084ec9b2017-06-26 16:27:13 -0600365 memset(rqd, 0, pblk_g_rq_size);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200366
367 rqd->bio = bio;
368 rqd->opcode = NVM_OP_PWRITE;
369 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
370 rqd->meta_list = meta_list;
371 rqd->nr_ppas = rq_ppas;
372 rqd->ppa_list = ppa_list;
373 rqd->dma_ppa_list = dma_ppa_list;
374 rqd->dma_meta_list = dma_meta_list;
375 rqd->end_io = pblk_end_io_sync;
376 rqd->private = &wait;
377
378 for (i = 0; i < rqd->nr_ppas; ) {
379 struct ppa_addr ppa;
380 int pos;
381
382 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
383 ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
384 pos = pblk_ppa_to_pos(geo, ppa);
385
386 while (test_bit(pos, line->blk_bitmap)) {
387 w_ptr += pblk->min_write_pgs;
388 ppa = addr_to_pblk_ppa(pblk, w_ptr, line->id);
389 pos = pblk_ppa_to_pos(geo, ppa);
390 }
391
392 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
393 struct ppa_addr dev_ppa;
Javier Gonzálezcaa69fa2017-06-26 11:57:12 +0200394 u64 addr_empty = cpu_to_le64(ADDR_EMPTY);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200395
396 dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
397
398 pblk_map_invalidate(pblk, dev_ppa);
Javier Gonzálezcaa69fa2017-06-26 11:57:12 +0200399 lba_list[w_ptr] = meta_list[i].lba = addr_empty;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200400 rqd->ppa_list[i] = dev_ppa;
401 }
402 }
403
404 ret = pblk_submit_io(pblk, rqd);
405 if (ret) {
406 pr_err("pblk: I/O submission failed: %d\n", ret);
407 return ret;
408 }
409
410 if (!wait_for_completion_io_timeout(&wait,
411 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
412 pr_err("pblk: L2P recovery write timed out\n");
413 }
414 reinit_completion(&wait);
415
416 left_line_ppas -= rq_ppas;
417 left_ppas -= rq_ppas;
418 if (left_ppas > 0 && left_line_ppas)
419 goto next_pad_rq;
420
421 return 0;
422}
423
424/* When this function is called, it means that not all upper pages have been
425 * written in a page that contains valid data. In order to recover this data, we
426 * first find the write pointer on the device, then we pad all necessary
427 * sectors, and finally attempt to read the valid data
428 */
429static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
430 struct pblk_recov_alloc p)
431{
432 struct nvm_tgt_dev *dev = pblk->dev;
433 struct nvm_geo *geo = &dev->geo;
434 struct ppa_addr *ppa_list;
435 struct pblk_sec_meta *meta_list;
436 struct nvm_rq *rqd;
437 struct bio *bio;
438 void *data;
439 dma_addr_t dma_ppa_list, dma_meta_list;
440 u64 w_ptr = 0, r_ptr;
441 int rq_ppas, rq_len;
442 int i, j;
443 int ret = 0;
444 int rec_round;
445 int left_ppas = pblk_calc_sec_in_line(pblk, line) - line->cur_sec;
446 DECLARE_COMPLETION_ONSTACK(wait);
447
448 ppa_list = p.ppa_list;
449 meta_list = p.meta_list;
450 rqd = p.rqd;
451 data = p.data;
452 dma_ppa_list = p.dma_ppa_list;
453 dma_meta_list = p.dma_meta_list;
454
455 /* we could recover up until the line write pointer */
456 r_ptr = line->cur_sec;
457 rec_round = 0;
458
459next_rq:
Javier González084ec9b2017-06-26 16:27:13 -0600460 memset(rqd, 0, pblk_g_rq_size);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200461
462 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
463 if (!rq_ppas)
464 rq_ppas = pblk->min_write_pgs;
465 rq_len = rq_ppas * geo->sec_size;
466
467 bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
468 if (IS_ERR(bio))
469 return PTR_ERR(bio);
470
471 bio->bi_iter.bi_sector = 0; /* internal bio */
472 bio_set_op_attrs(bio, REQ_OP_READ, 0);
473
474 rqd->bio = bio;
475 rqd->opcode = NVM_OP_PREAD;
476 rqd->flags = pblk_set_read_mode(pblk);
477 rqd->meta_list = meta_list;
478 rqd->nr_ppas = rq_ppas;
479 rqd->ppa_list = ppa_list;
480 rqd->dma_ppa_list = dma_ppa_list;
481 rqd->dma_meta_list = dma_meta_list;
482 rqd->end_io = pblk_end_io_sync;
483 rqd->private = &wait;
484
485 for (i = 0; i < rqd->nr_ppas; ) {
486 struct ppa_addr ppa;
487 int pos;
488
489 w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
490 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
491 pos = pblk_dev_ppa_to_pos(geo, ppa);
492
493 while (test_bit(pos, line->blk_bitmap)) {
494 w_ptr += pblk->min_write_pgs;
495 ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
496 pos = pblk_dev_ppa_to_pos(geo, ppa);
497 }
498
499 for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
500 rqd->ppa_list[i] =
501 addr_to_gen_ppa(pblk, w_ptr, line->id);
502 }
503
504 ret = pblk_submit_io(pblk, rqd);
505 if (ret) {
506 pr_err("pblk: I/O submission failed: %d\n", ret);
507 return ret;
508 }
509
510 if (!wait_for_completion_io_timeout(&wait,
511 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
512 pr_err("pblk: L2P recovery read timed out\n");
513 }
514 reinit_completion(&wait);
515
516 /* This should not happen since the read failed during normal recovery,
517 * but the media works funny sometimes...
518 */
519 if (!rec_round++ && !rqd->error) {
520 rec_round = 0;
521 for (i = 0; i < rqd->nr_ppas; i++, r_ptr++) {
522 u64 lba = le64_to_cpu(meta_list[i].lba);
523
524 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
525 continue;
526
527 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
528 }
529 }
530
531 /* Reached the end of the written line */
532 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
533 int pad_secs, nr_error_bits, bit;
534 int ret;
535
536 bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
537 nr_error_bits = rqd->nr_ppas - bit;
538
539 /* Roll back failed sectors */
540 line->cur_sec -= nr_error_bits;
541 line->left_msecs += nr_error_bits;
542 bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
543
544 pad_secs = pblk_pad_distance(pblk);
545 if (pad_secs > line->left_msecs)
546 pad_secs = line->left_msecs;
547
548 ret = pblk_recov_pad_oob(pblk, line, p, pad_secs);
549 if (ret)
550 pr_err("pblk: OOB padding failed (err:%d)\n", ret);
551
552 ret = pblk_recov_read_oob(pblk, line, p, r_ptr);
553 if (ret)
554 pr_err("pblk: OOB read failed (err:%d)\n", ret);
555
556 line->left_ssecs = line->left_msecs;
557 left_ppas = 0;
558 }
559
560 left_ppas -= rq_ppas;
561 if (left_ppas > 0)
562 goto next_rq;
563
564 return ret;
565}
566
567static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
568 struct pblk_recov_alloc p, int *done)
569{
570 struct nvm_tgt_dev *dev = pblk->dev;
571 struct nvm_geo *geo = &dev->geo;
572 struct ppa_addr *ppa_list;
573 struct pblk_sec_meta *meta_list;
574 struct nvm_rq *rqd;
575 struct bio *bio;
576 void *data;
577 dma_addr_t dma_ppa_list, dma_meta_list;
578 u64 paddr;
579 int rq_ppas, rq_len;
580 int i, j;
581 int ret = 0;
582 int left_ppas = pblk_calc_sec_in_line(pblk, line);
583 DECLARE_COMPLETION_ONSTACK(wait);
584
585 ppa_list = p.ppa_list;
586 meta_list = p.meta_list;
587 rqd = p.rqd;
588 data = p.data;
589 dma_ppa_list = p.dma_ppa_list;
590 dma_meta_list = p.dma_meta_list;
591
592 *done = 1;
593
594next_rq:
Javier González084ec9b2017-06-26 16:27:13 -0600595 memset(rqd, 0, pblk_g_rq_size);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200596
597 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
598 if (!rq_ppas)
599 rq_ppas = pblk->min_write_pgs;
600 rq_len = rq_ppas * geo->sec_size;
601
602 bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
603 if (IS_ERR(bio))
604 return PTR_ERR(bio);
605
606 bio->bi_iter.bi_sector = 0; /* internal bio */
607 bio_set_op_attrs(bio, REQ_OP_READ, 0);
608
609 rqd->bio = bio;
610 rqd->opcode = NVM_OP_PREAD;
611 rqd->flags = pblk_set_read_mode(pblk);
612 rqd->meta_list = meta_list;
613 rqd->nr_ppas = rq_ppas;
614 rqd->ppa_list = ppa_list;
615 rqd->dma_ppa_list = dma_ppa_list;
616 rqd->dma_meta_list = dma_meta_list;
617 rqd->end_io = pblk_end_io_sync;
618 rqd->private = &wait;
619
620 for (i = 0; i < rqd->nr_ppas; ) {
621 struct ppa_addr ppa;
622 int pos;
623
624 paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
625 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
626 pos = pblk_dev_ppa_to_pos(geo, ppa);
627
628 while (test_bit(pos, line->blk_bitmap)) {
629 paddr += pblk->min_write_pgs;
630 ppa = addr_to_gen_ppa(pblk, paddr, line->id);
631 pos = pblk_dev_ppa_to_pos(geo, ppa);
632 }
633
634 for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
635 rqd->ppa_list[i] =
636 addr_to_gen_ppa(pblk, paddr, line->id);
637 }
638
639 ret = pblk_submit_io(pblk, rqd);
640 if (ret) {
641 pr_err("pblk: I/O submission failed: %d\n", ret);
642 bio_put(bio);
643 return ret;
644 }
645
646 if (!wait_for_completion_io_timeout(&wait,
647 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
648 pr_err("pblk: L2P recovery read timed out\n");
649 }
650 reinit_completion(&wait);
651
652 /* Reached the end of the written line */
653 if (rqd->error) {
654 int nr_error_bits, bit;
655
656 bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
657 nr_error_bits = rqd->nr_ppas - bit;
658
659 /* Roll back failed sectors */
660 line->cur_sec -= nr_error_bits;
661 line->left_msecs += nr_error_bits;
662 line->left_ssecs = line->left_msecs;
663 bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
664
665 left_ppas = 0;
666 rqd->nr_ppas = bit;
667
668 if (rqd->error != NVM_RSP_ERR_EMPTYPAGE)
669 *done = 0;
670 }
671
672 for (i = 0; i < rqd->nr_ppas; i++) {
673 u64 lba = le64_to_cpu(meta_list[i].lba);
674
675 if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
676 continue;
677
678 pblk_update_map(pblk, lba, rqd->ppa_list[i]);
679 }
680
681 left_ppas -= rq_ppas;
682 if (left_ppas > 0)
683 goto next_rq;
684
685 return ret;
686}
687
688/* Scan line for lbas on out of bound area */
689static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
690{
691 struct nvm_tgt_dev *dev = pblk->dev;
692 struct nvm_geo *geo = &dev->geo;
693 struct nvm_rq *rqd;
694 struct ppa_addr *ppa_list;
695 struct pblk_sec_meta *meta_list;
696 struct pblk_recov_alloc p;
697 void *data;
698 dma_addr_t dma_ppa_list, dma_meta_list;
699 int done, ret = 0;
700
701 rqd = pblk_alloc_rqd(pblk, READ);
702 if (IS_ERR(rqd))
703 return PTR_ERR(rqd);
704
705 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
706 if (!meta_list) {
707 ret = -ENOMEM;
708 goto free_rqd;
709 }
710
711 ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
712 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
713
714 data = kcalloc(pblk->max_write_pgs, geo->sec_size, GFP_KERNEL);
715 if (!data) {
716 ret = -ENOMEM;
717 goto free_meta_list;
718 }
719
720 p.ppa_list = ppa_list;
721 p.meta_list = meta_list;
722 p.rqd = rqd;
723 p.data = data;
724 p.dma_ppa_list = dma_ppa_list;
725 p.dma_meta_list = dma_meta_list;
726
727 ret = pblk_recov_scan_oob(pblk, line, p, &done);
728 if (ret) {
729 pr_err("pblk: could not recover L2P from OOB\n");
730 goto out;
731 }
732
733 if (!done) {
734 ret = pblk_recov_scan_all_oob(pblk, line, p);
735 if (ret) {
736 pr_err("pblk: could not recover L2P from OOB\n");
737 goto out;
738 }
739 }
740
741 if (pblk_line_is_full(line))
742 pblk_line_recov_close(pblk, line);
743
744out:
745 kfree(data);
746free_meta_list:
747 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
748free_rqd:
749 pblk_free_rqd(pblk, rqd, READ);
750
751 return ret;
752}
753
754/* Insert lines ordered by sequence number (seq_num) on list */
755static void pblk_recov_line_add_ordered(struct list_head *head,
756 struct pblk_line *line)
757{
758 struct pblk_line *t = NULL;
759
760 list_for_each_entry(t, head, list)
761 if (t->seq_nr > line->seq_nr)
762 break;
763
764 __list_add(&line->list, t->list.prev, &t->list);
765}
766
767struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
768{
769 struct nvm_tgt_dev *dev = pblk->dev;
770 struct nvm_geo *geo = &dev->geo;
771 struct pblk_line_meta *lm = &pblk->lm;
772 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
773 struct pblk_line *line, *tline, *data_line = NULL;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200774 struct pblk_smeta *smeta;
775 struct pblk_emeta *emeta;
776 struct line_smeta *smeta_buf;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200777 int found_lines = 0, recovered_lines = 0, open_lines = 0;
778 int is_next = 0;
779 int meta_line;
780 int i, valid_uuid = 0;
781 LIST_HEAD(recov_list);
782
783 /* TODO: Implement FTL snapshot */
784
785 /* Scan recovery - takes place when FTL snapshot fails */
786 spin_lock(&l_mg->free_lock);
787 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
788 set_bit(meta_line, &l_mg->meta_bitmap);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200789 smeta = l_mg->sline_meta[meta_line];
790 emeta = l_mg->eline_meta[meta_line];
791 smeta_buf = smeta->buf;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200792 spin_unlock(&l_mg->free_lock);
793
794 /* Order data lines using their sequence number */
795 for (i = 0; i < l_mg->nr_lines; i++) {
796 u32 crc;
797
798 line = &pblk->lines[i];
799
800 memset(smeta, 0, lm->smeta_len);
801 line->smeta = smeta;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200802 line->lun_bitmap = ((void *)(smeta_buf)) +
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200803 sizeof(struct line_smeta);
804
805 /* Lines that cannot be read are assumed as not written here */
806 if (pblk_line_read_smeta(pblk, line))
807 continue;
808
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200809 crc = pblk_calc_smeta_crc(pblk, smeta_buf);
810 if (le32_to_cpu(smeta_buf->crc) != crc)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200811 continue;
812
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200813 if (le32_to_cpu(smeta_buf->header.identifier) != PBLK_MAGIC)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200814 continue;
815
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200816 if (le16_to_cpu(smeta_buf->header.version) != 1) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200817 pr_err("pblk: found incompatible line version %u\n",
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200818 smeta_buf->header.version);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200819 return ERR_PTR(-EINVAL);
820 }
821
822 /* The first valid instance uuid is used for initialization */
823 if (!valid_uuid) {
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200824 memcpy(pblk->instance_uuid, smeta_buf->header.uuid, 16);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200825 valid_uuid = 1;
826 }
827
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200828 if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200829 pr_debug("pblk: ignore line %u due to uuid mismatch\n",
830 i);
831 continue;
832 }
833
834 /* Update line metadata */
835 spin_lock(&line->lock);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200836 line->id = le32_to_cpu(smeta_buf->header.id);
837 line->type = le16_to_cpu(smeta_buf->header.type);
838 line->seq_nr = le64_to_cpu(smeta_buf->seq_nr);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200839 spin_unlock(&line->lock);
840
841 /* Update general metadata */
842 spin_lock(&l_mg->free_lock);
843 if (line->seq_nr >= l_mg->d_seq_nr)
844 l_mg->d_seq_nr = line->seq_nr + 1;
845 l_mg->nr_free_lines--;
846 spin_unlock(&l_mg->free_lock);
847
848 if (pblk_line_recov_alloc(pblk, line))
849 goto out;
850
851 pblk_recov_line_add_ordered(&recov_list, line);
852 found_lines++;
853 pr_debug("pblk: recovering data line %d, seq:%llu\n",
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200854 line->id, smeta_buf->seq_nr);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200855 }
856
857 if (!found_lines) {
858 pblk_setup_uuid(pblk);
859
860 spin_lock(&l_mg->free_lock);
861 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
862 &l_mg->meta_bitmap));
863 spin_unlock(&l_mg->free_lock);
864
865 goto out;
866 }
867
868 /* Verify closed blocks and recover this portion of L2P table*/
869 list_for_each_entry_safe(line, tline, &recov_list, list) {
870 int off, nr_bb;
871
872 recovered_lines++;
873 /* Calculate where emeta starts based on the line bb */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200874 off = lm->sec_per_line - lm->emeta_sec[0];
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200875 nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
876 off -= nr_bb * geo->sec_per_pl;
877
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200878 memset(&emeta->buf, 0, lm->emeta_len[0]);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200879 line->emeta = emeta;
880 line->emeta_ssec = off;
881
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200882 if (pblk_line_read_emeta(pblk, line, line->emeta->buf)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200883 pblk_recov_l2p_from_oob(pblk, line);
884 goto next;
885 }
886
887 if (pblk_recov_l2p_from_emeta(pblk, line))
888 pblk_recov_l2p_from_oob(pblk, line);
889
890next:
891 if (pblk_line_is_full(line)) {
892 struct list_head *move_list;
893
894 spin_lock(&line->lock);
895 line->state = PBLK_LINESTATE_CLOSED;
896 move_list = pblk_line_gc_list(pblk, line);
897 spin_unlock(&line->lock);
898
899 spin_lock(&l_mg->gc_lock);
900 list_move_tail(&line->list, move_list);
901 spin_unlock(&l_mg->gc_lock);
902
903 mempool_free(line->map_bitmap, pblk->line_meta_pool);
904 line->map_bitmap = NULL;
905 line->smeta = NULL;
906 line->emeta = NULL;
907 } else {
908 if (open_lines > 1)
909 pr_err("pblk: failed to recover L2P\n");
910
911 open_lines++;
912 line->meta_line = meta_line;
913 data_line = line;
914 }
915 }
916
917 spin_lock(&l_mg->free_lock);
918 if (!open_lines) {
919 WARN_ON_ONCE(!test_and_clear_bit(meta_line,
920 &l_mg->meta_bitmap));
921 pblk_line_replace_data(pblk);
922 } else {
923 /* Allocate next line for preparation */
924 l_mg->data_next = pblk_line_get(pblk);
925 if (l_mg->data_next) {
926 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
927 l_mg->data_next->type = PBLK_LINETYPE_DATA;
928 is_next = 1;
929 }
930 }
931 spin_unlock(&l_mg->free_lock);
932
933 if (is_next) {
934 pblk_line_erase(pblk, l_mg->data_next);
935 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
936 }
937
938out:
939 if (found_lines != recovered_lines)
940 pr_err("pblk: failed to recover all found lines %d/%d\n",
941 found_lines, recovered_lines);
942
943 return data_line;
944}
945
946/*
947 * Pad until smeta can be read on current data line
948 */
949void pblk_recov_pad(struct pblk *pblk)
950{
951 struct nvm_tgt_dev *dev = pblk->dev;
952 struct nvm_geo *geo = &dev->geo;
953 struct pblk_line *line;
954 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
955 struct nvm_rq *rqd;
956 struct pblk_recov_alloc p;
957 struct ppa_addr *ppa_list;
958 struct pblk_sec_meta *meta_list;
959 void *data;
960 dma_addr_t dma_ppa_list, dma_meta_list;
961
962 spin_lock(&l_mg->free_lock);
963 line = l_mg->data_line;
964 spin_unlock(&l_mg->free_lock);
965
966 rqd = pblk_alloc_rqd(pblk, READ);
967 if (IS_ERR(rqd))
968 return;
969
970 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
971 if (!meta_list)
972 goto free_rqd;
973
974 ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
975 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
976
977 data = kcalloc(pblk->max_write_pgs, geo->sec_size, GFP_KERNEL);
978 if (!data)
979 goto free_meta_list;
980
981 p.ppa_list = ppa_list;
982 p.meta_list = meta_list;
983 p.rqd = rqd;
984 p.data = data;
985 p.dma_ppa_list = dma_ppa_list;
986 p.dma_meta_list = dma_meta_list;
987
988 if (pblk_recov_pad_oob(pblk, line, p, line->left_msecs)) {
989 pr_err("pblk: Tear down padding failed\n");
990 goto free_data;
991 }
992
993 pblk_line_close(pblk, line);
994
995free_data:
996 kfree(data);
997free_meta_list:
998 nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
999free_rqd:
1000 pblk_free_rqd(pblk, rqd, READ);
1001}