blob: beae1618483fd71fc82a4c85be36e1dff644802d [file] [log] [blame]
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-core.c - pblk's core functionality
16 *
17 */
18
19#include "pblk.h"
20#include <linux/time.h>
21
22static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
23 struct ppa_addr *ppa)
24{
25 struct nvm_tgt_dev *dev = pblk->dev;
26 struct nvm_geo *geo = &dev->geo;
27 int pos = pblk_dev_ppa_to_pos(geo, *ppa);
28
29 pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos);
30 atomic_long_inc(&pblk->erase_failed);
31
Javier Gonzáleza44f53f2017-04-22 01:32:49 +020032 atomic_dec(&line->blk_in_line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020033 if (test_and_set_bit(pos, line->blk_bitmap))
34 pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n",
35 line->id, pos);
36
37 pblk_line_run_ws(pblk, NULL, ppa, pblk_line_mark_bb);
38}
39
40static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
41{
42 struct pblk_line *line;
43
44 line = &pblk->lines[pblk_dev_ppa_to_line(rqd->ppa_addr)];
45 atomic_dec(&line->left_seblks);
46
47 if (rqd->error) {
48 struct ppa_addr *ppa;
49
50 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
51 if (!ppa)
52 return;
53
54 *ppa = rqd->ppa_addr;
55 pblk_mark_bb(pblk, line, ppa);
56 }
57}
58
59/* Erase completion assumes that only one block is erased at the time */
60static void pblk_end_io_erase(struct nvm_rq *rqd)
61{
62 struct pblk *pblk = rqd->private;
63
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020064 __pblk_end_io_erase(pblk, rqd);
Javier González084ec9b2017-06-26 16:27:13 -060065 mempool_free(rqd, pblk->g_rq_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020066}
67
Javier González0880a9a2017-06-26 11:57:19 +020068void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
69 u64 paddr)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020070{
71 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
72 struct list_head *move_list = NULL;
73
74 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
75 * table is modified with reclaimed sectors, a check is done to endure
76 * that newer updates are not overwritten.
77 */
78 spin_lock(&line->lock);
79 if (line->state == PBLK_LINESTATE_GC ||
80 line->state == PBLK_LINESTATE_FREE) {
81 spin_unlock(&line->lock);
82 return;
83 }
84
85 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
86 WARN_ONCE(1, "pblk: double invalidate\n");
87 spin_unlock(&line->lock);
88 return;
89 }
Javier Gonzálezdd2a4342017-06-26 11:57:17 +020090 le32_add_cpu(line->vsc, -1);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020091
92 if (line->state == PBLK_LINESTATE_CLOSED)
93 move_list = pblk_line_gc_list(pblk, line);
94 spin_unlock(&line->lock);
95
96 if (move_list) {
97 spin_lock(&l_mg->gc_lock);
98 spin_lock(&line->lock);
99 /* Prevent moving a line that has just been chosen for GC */
100 if (line->state == PBLK_LINESTATE_GC ||
101 line->state == PBLK_LINESTATE_FREE) {
102 spin_unlock(&line->lock);
103 spin_unlock(&l_mg->gc_lock);
104 return;
105 }
106 spin_unlock(&line->lock);
107
108 list_move_tail(&line->list, move_list);
109 spin_unlock(&l_mg->gc_lock);
110 }
111}
112
113void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
114{
115 struct pblk_line *line;
116 u64 paddr;
117 int line_id;
118
119#ifdef CONFIG_NVM_DEBUG
120 /* Callers must ensure that the ppa points to a device address */
121 BUG_ON(pblk_addr_in_cache(ppa));
122 BUG_ON(pblk_ppa_empty(ppa));
123#endif
124
125 line_id = pblk_tgt_ppa_to_line(ppa);
126 line = &pblk->lines[line_id];
127 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
128
129 __pblk_map_invalidate(pblk, line, paddr);
130}
131
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200132static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
133 unsigned int nr_secs)
134{
135 sector_t lba;
136
137 spin_lock(&pblk->trans_lock);
138 for (lba = slba; lba < slba + nr_secs; lba++) {
139 struct ppa_addr ppa;
140
141 ppa = pblk_trans_map_get(pblk, lba);
142
143 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
144 pblk_map_invalidate(pblk, ppa);
145
146 pblk_ppa_set_empty(&ppa);
147 pblk_trans_map_set(pblk, lba, ppa);
148 }
149 spin_unlock(&pblk->trans_lock);
150}
151
152struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int rw)
153{
154 mempool_t *pool;
155 struct nvm_rq *rqd;
156 int rq_size;
157
158 if (rw == WRITE) {
159 pool = pblk->w_rq_pool;
160 rq_size = pblk_w_rq_size;
161 } else {
Javier González084ec9b2017-06-26 16:27:13 -0600162 pool = pblk->g_rq_pool;
163 rq_size = pblk_g_rq_size;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200164 }
165
166 rqd = mempool_alloc(pool, GFP_KERNEL);
167 memset(rqd, 0, rq_size);
168
169 return rqd;
170}
171
172void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int rw)
173{
174 mempool_t *pool;
175
176 if (rw == WRITE)
177 pool = pblk->w_rq_pool;
178 else
Javier González084ec9b2017-06-26 16:27:13 -0600179 pool = pblk->g_rq_pool;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200180
181 mempool_free(rqd, pool);
182}
183
184void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
185 int nr_pages)
186{
187 struct bio_vec bv;
188 int i;
189
190 WARN_ON(off + nr_pages != bio->bi_vcnt);
191
192 bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
193 for (i = off; i < nr_pages + off; i++) {
194 bv = bio->bi_io_vec[i];
195 mempool_free(bv.bv_page, pblk->page_pool);
196 }
197}
198
199int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
200 int nr_pages)
201{
202 struct request_queue *q = pblk->dev->q;
203 struct page *page;
204 int i, ret;
205
206 for (i = 0; i < nr_pages; i++) {
207 page = mempool_alloc(pblk->page_pool, flags);
208 if (!page)
209 goto err;
210
211 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
212 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
213 pr_err("pblk: could not add page to bio\n");
214 mempool_free(page, pblk->page_pool);
215 goto err;
216 }
217 }
218
219 return 0;
220err:
221 pblk_bio_free_pages(pblk, bio, 0, i - 1);
222 return -1;
223}
224
225static void pblk_write_kick(struct pblk *pblk)
226{
227 wake_up_process(pblk->writer_ts);
228 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
229}
230
231void pblk_write_timer_fn(unsigned long data)
232{
233 struct pblk *pblk = (struct pblk *)data;
234
235 /* kick the write thread every tick to flush outstanding data */
236 pblk_write_kick(pblk);
237}
238
239void pblk_write_should_kick(struct pblk *pblk)
240{
241 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
242
243 if (secs_avail >= pblk->min_write_pgs)
244 pblk_write_kick(pblk);
245}
246
247void pblk_end_bio_sync(struct bio *bio)
248{
249 struct completion *waiting = bio->bi_private;
250
251 complete(waiting);
252}
253
254void pblk_end_io_sync(struct nvm_rq *rqd)
255{
256 struct completion *waiting = rqd->private;
257
258 complete(waiting);
259}
260
261void pblk_flush_writer(struct pblk *pblk)
262{
263 struct bio *bio;
264 int ret;
265 DECLARE_COMPLETION_ONSTACK(wait);
266
267 bio = bio_alloc(GFP_KERNEL, 1);
268 if (!bio)
269 return;
270
271 bio->bi_iter.bi_sector = 0; /* internal bio */
272 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_OP_FLUSH);
273 bio->bi_private = &wait;
274 bio->bi_end_io = pblk_end_bio_sync;
275
276 ret = pblk_write_to_cache(pblk, bio, 0);
277 if (ret == NVM_IO_OK) {
278 if (!wait_for_completion_io_timeout(&wait,
279 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
280 pr_err("pblk: flush cache timed out\n");
281 }
282 } else if (ret != NVM_IO_DONE) {
283 pr_err("pblk: tear down bio failed\n");
284 }
285
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200286 if (bio->bi_status)
287 pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200288
289 bio_put(bio);
290}
291
292struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
293{
294 struct pblk_line_meta *lm = &pblk->lm;
295 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
296 struct list_head *move_list = NULL;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200297 int vsc = le32_to_cpu(*line->vsc);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200298
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200299 if (!vsc) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200300 if (line->gc_group != PBLK_LINEGC_FULL) {
301 line->gc_group = PBLK_LINEGC_FULL;
302 move_list = &l_mg->gc_full_list;
303 }
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200304 } else if (vsc < lm->mid_thrs) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200305 if (line->gc_group != PBLK_LINEGC_HIGH) {
306 line->gc_group = PBLK_LINEGC_HIGH;
307 move_list = &l_mg->gc_high_list;
308 }
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200309 } else if (vsc < lm->high_thrs) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200310 if (line->gc_group != PBLK_LINEGC_MID) {
311 line->gc_group = PBLK_LINEGC_MID;
312 move_list = &l_mg->gc_mid_list;
313 }
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200314 } else if (vsc < line->sec_in_line) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200315 if (line->gc_group != PBLK_LINEGC_LOW) {
316 line->gc_group = PBLK_LINEGC_LOW;
317 move_list = &l_mg->gc_low_list;
318 }
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200319 } else if (vsc == line->sec_in_line) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200320 if (line->gc_group != PBLK_LINEGC_EMPTY) {
321 line->gc_group = PBLK_LINEGC_EMPTY;
322 move_list = &l_mg->gc_empty_list;
323 }
324 } else {
325 line->state = PBLK_LINESTATE_CORRUPT;
326 line->gc_group = PBLK_LINEGC_NONE;
327 move_list = &l_mg->corrupt_list;
328 pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200329 line->id, vsc,
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200330 line->sec_in_line,
331 lm->high_thrs, lm->mid_thrs);
332 }
333
334 return move_list;
335}
336
337void pblk_discard(struct pblk *pblk, struct bio *bio)
338{
339 sector_t slba = pblk_get_lba(bio);
340 sector_t nr_secs = pblk_get_secs(bio);
341
342 pblk_invalidate_range(pblk, slba, nr_secs);
343}
344
345struct ppa_addr pblk_get_lba_map(struct pblk *pblk, sector_t lba)
346{
347 struct ppa_addr ppa;
348
349 spin_lock(&pblk->trans_lock);
350 ppa = pblk_trans_map_get(pblk, lba);
351 spin_unlock(&pblk->trans_lock);
352
353 return ppa;
354}
355
356void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
357{
358 atomic_long_inc(&pblk->write_failed);
359#ifdef CONFIG_NVM_DEBUG
360 pblk_print_failed_rqd(pblk, rqd, rqd->error);
361#endif
362}
363
364void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
365{
366 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
367 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
368 atomic_long_inc(&pblk->read_empty);
369 return;
370 }
371
372 switch (rqd->error) {
373 case NVM_RSP_WARN_HIGHECC:
374 atomic_long_inc(&pblk->read_high_ecc);
375 break;
376 case NVM_RSP_ERR_FAILECC:
377 case NVM_RSP_ERR_FAILCRC:
378 atomic_long_inc(&pblk->read_failed);
379 break;
380 default:
381 pr_err("pblk: unknown read error:%d\n", rqd->error);
382 }
383#ifdef CONFIG_NVM_DEBUG
384 pblk_print_failed_rqd(pblk, rqd, rqd->error);
385#endif
386}
387
Javier Gonzálezc2e9f5d2017-06-26 11:57:14 +0200388void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
389{
390 pblk->sec_per_write = sec_per_write;
391}
392
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200393int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
394{
395 struct nvm_tgt_dev *dev = pblk->dev;
396
397#ifdef CONFIG_NVM_DEBUG
398 struct ppa_addr *ppa_list;
399
400 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
401 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
402 WARN_ON(1);
403 return -EINVAL;
404 }
405
406 if (rqd->opcode == NVM_OP_PWRITE) {
407 struct pblk_line *line;
408 struct ppa_addr ppa;
409 int i;
410
411 for (i = 0; i < rqd->nr_ppas; i++) {
412 ppa = ppa_list[i];
413 line = &pblk->lines[pblk_dev_ppa_to_line(ppa)];
414
415 spin_lock(&line->lock);
416 if (line->state != PBLK_LINESTATE_OPEN) {
417 pr_err("pblk: bad ppa: line:%d,state:%d\n",
418 line->id, line->state);
419 WARN_ON(1);
420 spin_unlock(&line->lock);
421 return -EINVAL;
422 }
423 spin_unlock(&line->lock);
424 }
425 }
426#endif
427 return nvm_submit_io(dev, rqd);
428}
429
430struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
431 unsigned int nr_secs, unsigned int len,
432 gfp_t gfp_mask)
433{
434 struct nvm_tgt_dev *dev = pblk->dev;
435 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
436 void *kaddr = data;
437 struct page *page;
438 struct bio *bio;
439 int i, ret;
440
441 if (l_mg->emeta_alloc_type == PBLK_KMALLOC_META)
442 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
443
444 bio = bio_kmalloc(gfp_mask, nr_secs);
445 if (!bio)
446 return ERR_PTR(-ENOMEM);
447
448 for (i = 0; i < nr_secs; i++) {
449 page = vmalloc_to_page(kaddr);
450 if (!page) {
451 pr_err("pblk: could not map vmalloc bio\n");
452 bio_put(bio);
453 bio = ERR_PTR(-ENOMEM);
454 goto out;
455 }
456
457 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
458 if (ret != PAGE_SIZE) {
459 pr_err("pblk: could not add page to bio\n");
460 bio_put(bio);
461 bio = ERR_PTR(-ENOMEM);
462 goto out;
463 }
464
465 kaddr += PAGE_SIZE;
466 }
467out:
468 return bio;
469}
470
471int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
472 unsigned long secs_to_flush)
473{
Javier Gonzálezc2e9f5d2017-06-26 11:57:14 +0200474 int max = pblk->sec_per_write;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200475 int min = pblk->min_write_pgs;
476 int secs_to_sync = 0;
477
478 if (secs_avail >= max)
479 secs_to_sync = max;
480 else if (secs_avail >= min)
481 secs_to_sync = min * (secs_avail / min);
482 else if (secs_to_flush)
483 secs_to_sync = min;
484
485 return secs_to_sync;
486}
487
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200488void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
489{
490 u64 addr;
491 int i;
492
493 addr = find_next_zero_bit(line->map_bitmap,
494 pblk->lm.sec_per_line, line->cur_sec);
495 line->cur_sec = addr - nr_secs;
496
497 for (i = 0; i < nr_secs; i++, line->cur_sec--)
498 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
499}
500
501u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200502{
503 u64 addr;
504 int i;
505
506 /* logic error: ppa out-of-bounds. Prevent generating bad address */
507 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
508 WARN(1, "pblk: page allocation out of bounds\n");
509 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
510 }
511
512 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
513 pblk->lm.sec_per_line, line->cur_sec);
514 for (i = 0; i < nr_secs; i++, line->cur_sec++)
515 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
516
517 return addr;
518}
519
520u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
521{
522 u64 addr;
523
524 /* Lock needed in case a write fails and a recovery needs to remap
525 * failed write buffer entries
526 */
527 spin_lock(&line->lock);
528 addr = __pblk_alloc_page(pblk, line, nr_secs);
529 line->left_msecs -= nr_secs;
530 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
531 spin_unlock(&line->lock);
532
533 return addr;
534}
535
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200536u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
537{
538 u64 paddr;
539
540 spin_lock(&line->lock);
541 paddr = find_next_zero_bit(line->map_bitmap,
542 pblk->lm.sec_per_line, line->cur_sec);
543 spin_unlock(&line->lock);
544
545 return paddr;
546}
547
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200548/*
549 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
550 * taking the per LUN semaphore.
551 */
552static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200553 void *emeta_buf, u64 paddr, int dir)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200554{
555 struct nvm_tgt_dev *dev = pblk->dev;
556 struct nvm_geo *geo = &dev->geo;
557 struct pblk_line_meta *lm = &pblk->lm;
558 struct bio *bio;
559 struct nvm_rq rqd;
560 struct ppa_addr *ppa_list;
561 dma_addr_t dma_ppa_list;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200562 int min = pblk->min_write_pgs;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200563 int left_ppas = lm->emeta_sec[0];
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200564 int id = line->id;
565 int rq_ppas, rq_len;
566 int cmd_op, bio_op;
567 int flags;
568 int i, j;
569 int ret;
570 DECLARE_COMPLETION_ONSTACK(wait);
571
572 if (dir == WRITE) {
573 bio_op = REQ_OP_WRITE;
574 cmd_op = NVM_OP_PWRITE;
575 flags = pblk_set_progr_mode(pblk, WRITE);
576 } else if (dir == READ) {
577 bio_op = REQ_OP_READ;
578 cmd_op = NVM_OP_PREAD;
579 flags = pblk_set_read_mode(pblk);
580 } else
581 return -EINVAL;
582
583 ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_ppa_list);
584 if (!ppa_list)
585 return -ENOMEM;
586
587next_rq:
588 memset(&rqd, 0, sizeof(struct nvm_rq));
589
590 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
591 rq_len = rq_ppas * geo->sec_size;
592
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200593 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len, GFP_KERNEL);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200594 if (IS_ERR(bio)) {
595 ret = PTR_ERR(bio);
596 goto free_rqd_dma;
597 }
598
599 bio->bi_iter.bi_sector = 0; /* internal bio */
600 bio_set_op_attrs(bio, bio_op, 0);
601
602 rqd.bio = bio;
603 rqd.opcode = cmd_op;
604 rqd.flags = flags;
605 rqd.nr_ppas = rq_ppas;
606 rqd.ppa_list = ppa_list;
607 rqd.dma_ppa_list = dma_ppa_list;
608 rqd.end_io = pblk_end_io_sync;
609 rqd.private = &wait;
610
611 if (dir == WRITE) {
612 for (i = 0; i < rqd.nr_ppas; ) {
613 spin_lock(&line->lock);
614 paddr = __pblk_alloc_page(pblk, line, min);
615 spin_unlock(&line->lock);
616 for (j = 0; j < min; j++, i++, paddr++)
617 rqd.ppa_list[i] =
618 addr_to_gen_ppa(pblk, paddr, id);
619 }
620 } else {
621 for (i = 0; i < rqd.nr_ppas; ) {
622 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
623 int pos = pblk_dev_ppa_to_pos(geo, ppa);
624
625 while (test_bit(pos, line->blk_bitmap)) {
626 paddr += min;
627 if (pblk_boundary_paddr_checks(pblk, paddr)) {
628 pr_err("pblk: corrupt emeta line:%d\n",
629 line->id);
630 bio_put(bio);
631 ret = -EINTR;
632 goto free_rqd_dma;
633 }
634
635 ppa = addr_to_gen_ppa(pblk, paddr, id);
636 pos = pblk_dev_ppa_to_pos(geo, ppa);
637 }
638
639 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
640 pr_err("pblk: corrupt emeta line:%d\n",
641 line->id);
642 bio_put(bio);
643 ret = -EINTR;
644 goto free_rqd_dma;
645 }
646
647 for (j = 0; j < min; j++, i++, paddr++)
648 rqd.ppa_list[i] =
649 addr_to_gen_ppa(pblk, paddr, line->id);
650 }
651 }
652
653 ret = pblk_submit_io(pblk, &rqd);
654 if (ret) {
655 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
656 bio_put(bio);
657 goto free_rqd_dma;
658 }
659
660 if (!wait_for_completion_io_timeout(&wait,
661 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
662 pr_err("pblk: emeta I/O timed out\n");
663 }
664 reinit_completion(&wait);
665
666 bio_put(bio);
667
668 if (rqd.error) {
669 if (dir == WRITE)
670 pblk_log_write_err(pblk, &rqd);
671 else
672 pblk_log_read_err(pblk, &rqd);
673 }
674
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200675 emeta_buf += rq_len;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200676 left_ppas -= rq_ppas;
677 if (left_ppas)
678 goto next_rq;
679free_rqd_dma:
680 nvm_dev_dma_free(dev->parent, ppa_list, dma_ppa_list);
681 return ret;
682}
683
684u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
685{
686 struct nvm_tgt_dev *dev = pblk->dev;
687 struct nvm_geo *geo = &dev->geo;
688 struct pblk_line_meta *lm = &pblk->lm;
689 int bit;
690
691 /* This usually only happens on bad lines */
692 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
693 if (bit >= lm->blk_per_line)
694 return -1;
695
696 return bit * geo->sec_per_pl;
697}
698
699static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
700 u64 paddr, int dir)
701{
702 struct nvm_tgt_dev *dev = pblk->dev;
703 struct pblk_line_meta *lm = &pblk->lm;
704 struct bio *bio;
705 struct nvm_rq rqd;
706 __le64 *lba_list = NULL;
707 int i, ret;
708 int cmd_op, bio_op;
709 int flags;
710 DECLARE_COMPLETION_ONSTACK(wait);
711
712 if (dir == WRITE) {
713 bio_op = REQ_OP_WRITE;
714 cmd_op = NVM_OP_PWRITE;
715 flags = pblk_set_progr_mode(pblk, WRITE);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200716 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200717 } else if (dir == READ) {
718 bio_op = REQ_OP_READ;
719 cmd_op = NVM_OP_PREAD;
720 flags = pblk_set_read_mode(pblk);
721 } else
722 return -EINVAL;
723
724 memset(&rqd, 0, sizeof(struct nvm_rq));
725
726 rqd.ppa_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
727 &rqd.dma_ppa_list);
728 if (!rqd.ppa_list)
729 return -ENOMEM;
730
731 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
732 if (IS_ERR(bio)) {
733 ret = PTR_ERR(bio);
734 goto free_ppa_list;
735 }
736
737 bio->bi_iter.bi_sector = 0; /* internal bio */
738 bio_set_op_attrs(bio, bio_op, 0);
739
740 rqd.bio = bio;
741 rqd.opcode = cmd_op;
742 rqd.flags = flags;
743 rqd.nr_ppas = lm->smeta_sec;
744 rqd.end_io = pblk_end_io_sync;
745 rqd.private = &wait;
746
747 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
748 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
749 if (dir == WRITE)
750 lba_list[paddr] = cpu_to_le64(ADDR_EMPTY);
751 }
752
753 /*
754 * This I/O is sent by the write thread when a line is replace. Since
755 * the write thread is the only one sending write and erase commands,
756 * there is no need to take the LUN semaphore.
757 */
758 ret = pblk_submit_io(pblk, &rqd);
759 if (ret) {
760 pr_err("pblk: smeta I/O submission failed: %d\n", ret);
761 bio_put(bio);
762 goto free_ppa_list;
763 }
764
765 if (!wait_for_completion_io_timeout(&wait,
766 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
767 pr_err("pblk: smeta I/O timed out\n");
768 }
769
770 if (rqd.error) {
771 if (dir == WRITE)
772 pblk_log_write_err(pblk, &rqd);
773 else
774 pblk_log_read_err(pblk, &rqd);
775 }
776
777free_ppa_list:
778 nvm_dev_dma_free(dev->parent, rqd.ppa_list, rqd.dma_ppa_list);
779
780 return ret;
781}
782
783int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
784{
785 u64 bpaddr = pblk_line_smeta_start(pblk, line);
786
787 return pblk_line_submit_smeta_io(pblk, line, bpaddr, READ);
788}
789
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200790int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
791 void *emeta_buf)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200792{
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200793 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
794 line->emeta_ssec, READ);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200795}
796
797static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
798 struct ppa_addr ppa)
799{
800 rqd->opcode = NVM_OP_ERASE;
801 rqd->ppa_addr = ppa;
802 rqd->nr_ppas = 1;
803 rqd->flags = pblk_set_progr_mode(pblk, ERASE);
804 rqd->bio = NULL;
805}
806
807static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
808{
809 struct nvm_rq rqd;
810 int ret;
811 DECLARE_COMPLETION_ONSTACK(wait);
812
813 memset(&rqd, 0, sizeof(struct nvm_rq));
814
815 pblk_setup_e_rq(pblk, &rqd, ppa);
816
817 rqd.end_io = pblk_end_io_sync;
818 rqd.private = &wait;
819
820 /* The write thread schedules erases so that it minimizes disturbances
821 * with writes. Thus, there is no need to take the LUN semaphore.
822 */
823 ret = pblk_submit_io(pblk, &rqd);
824 if (ret) {
825 struct nvm_tgt_dev *dev = pblk->dev;
826 struct nvm_geo *geo = &dev->geo;
827
828 pr_err("pblk: could not sync erase line:%d,blk:%d\n",
829 pblk_dev_ppa_to_line(ppa),
830 pblk_dev_ppa_to_pos(geo, ppa));
831
832 rqd.error = ret;
833 goto out;
834 }
835
836 if (!wait_for_completion_io_timeout(&wait,
837 msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) {
838 pr_err("pblk: sync erase timed out\n");
839 }
840
841out:
842 rqd.private = pblk;
843 __pblk_end_io_erase(pblk, &rqd);
844
845 return 0;
846}
847
848int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
849{
850 struct pblk_line_meta *lm = &pblk->lm;
851 struct ppa_addr ppa;
852 int bit = -1;
853
Javier Gonzáleza44f53f2017-04-22 01:32:49 +0200854 /* Erase only good blocks, one at a time */
855 do {
856 spin_lock(&line->lock);
857 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
858 bit + 1);
859 if (bit >= lm->blk_per_line) {
860 spin_unlock(&line->lock);
861 break;
862 }
863
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200864 ppa = pblk->luns[bit].bppa; /* set ch and lun */
865 ppa.g.blk = line->id;
866
Javier Gonzáleza44f53f2017-04-22 01:32:49 +0200867 atomic_dec(&line->left_eblks);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200868 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
Javier Gonzáleza44f53f2017-04-22 01:32:49 +0200869 spin_unlock(&line->lock);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200870
871 if (pblk_blk_erase_sync(pblk, ppa)) {
872 pr_err("pblk: failed to erase line %d\n", line->id);
873 return -ENOMEM;
874 }
Javier Gonzáleza44f53f2017-04-22 01:32:49 +0200875 } while (1);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200876
877 return 0;
878}
879
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200880static void pblk_line_setup_metadata(struct pblk_line *line,
881 struct pblk_line_mgmt *l_mg,
882 struct pblk_line_meta *lm)
883{
884 int meta_line;
885
886retry_meta:
887 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
888 if (meta_line == PBLK_DATA_LINES) {
889 spin_unlock(&l_mg->free_lock);
890 io_schedule();
891 spin_lock(&l_mg->free_lock);
892 goto retry_meta;
893 }
894
895 set_bit(meta_line, &l_mg->meta_bitmap);
896 line->meta_line = meta_line;
897
898 line->smeta = l_mg->sline_meta[meta_line];
899 line->emeta = l_mg->eline_meta[meta_line];
900
901 memset(line->smeta, 0, lm->smeta_len);
902 memset(line->emeta->buf, 0, lm->emeta_len[0]);
903
904 line->emeta->mem = 0;
905 atomic_set(&line->emeta->sync, 0);
906}
907
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200908/* For now lines are always assumed full lines. Thus, smeta former and current
909 * lun bitmaps are omitted.
910 */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200911static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200912 struct pblk_line *cur)
913{
914 struct nvm_tgt_dev *dev = pblk->dev;
915 struct nvm_geo *geo = &dev->geo;
916 struct pblk_line_meta *lm = &pblk->lm;
917 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200918 struct pblk_emeta *emeta = line->emeta;
919 struct line_emeta *emeta_buf = emeta->buf;
920 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200921 int nr_blk_line;
922
923 /* After erasing the line, new bad blocks might appear and we risk
924 * having an invalid line
925 */
926 nr_blk_line = lm->blk_per_line -
927 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
928 if (nr_blk_line < lm->min_blk_line) {
929 spin_lock(&l_mg->free_lock);
930 spin_lock(&line->lock);
931 line->state = PBLK_LINESTATE_BAD;
932 spin_unlock(&line->lock);
933
934 list_add_tail(&line->list, &l_mg->bad_list);
935 spin_unlock(&l_mg->free_lock);
936
937 pr_debug("pblk: line %d is bad\n", line->id);
938
939 return 0;
940 }
941
942 /* Run-time metadata */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200943 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200944
945 /* Mark LUNs allocated in this line (all for now) */
946 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
947
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200948 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
949 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
950 smeta_buf->header.id = cpu_to_le32(line->id);
951 smeta_buf->header.type = cpu_to_le16(line->type);
952 smeta_buf->header.version = cpu_to_le16(1);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200953
954 /* Start metadata */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200955 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
956 smeta_buf->window_wr_lun = cpu_to_le32(geo->nr_luns);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200957
958 /* Fill metadata among lines */
959 if (cur) {
960 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200961 smeta_buf->prev_id = cpu_to_le32(cur->id);
962 cur->emeta->buf->next_id = cpu_to_le32(line->id);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200963 } else {
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200964 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200965 }
966
967 /* All smeta must be set at this point */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200968 smeta_buf->header.crc = cpu_to_le32(
969 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
970 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200971
972 /* End metadata */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200973 memcpy(&emeta_buf->header, &smeta_buf->header,
974 sizeof(struct line_header));
975 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
976 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
977 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
978 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
979 emeta_buf->crc = cpu_to_le32(0);
980 emeta_buf->prev_id = smeta_buf->prev_id;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200981
982 return 1;
983}
984
985/* For now lines are always assumed full lines. Thus, smeta former and current
986 * lun bitmaps are omitted.
987 */
988static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
989 int init)
990{
991 struct nvm_tgt_dev *dev = pblk->dev;
992 struct nvm_geo *geo = &dev->geo;
993 struct pblk_line_meta *lm = &pblk->lm;
994 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
995 int nr_bb = 0;
996 u64 off;
997 int bit = -1;
998
999 line->sec_in_line = lm->sec_per_line;
1000
1001 /* Capture bad block information on line mapping bitmaps */
1002 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1003 bit + 1)) < lm->blk_per_line) {
1004 off = bit * geo->sec_per_pl;
1005 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1006 lm->sec_per_line);
1007 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1008 lm->sec_per_line);
1009 line->sec_in_line -= geo->sec_per_blk;
1010 if (bit >= lm->emeta_bb)
1011 nr_bb++;
1012 }
1013
1014 /* Mark smeta metadata sectors as bad sectors */
1015 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1016 off = bit * geo->sec_per_pl;
1017retry_smeta:
1018 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1019 line->sec_in_line -= lm->smeta_sec;
1020 line->smeta_ssec = off;
1021 line->cur_sec = off + lm->smeta_sec;
1022
1023 if (init && pblk_line_submit_smeta_io(pblk, line, off, WRITE)) {
1024 pr_debug("pblk: line smeta I/O failed. Retry\n");
1025 off += geo->sec_per_pl;
1026 goto retry_smeta;
1027 }
1028
1029 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1030
1031 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1032 * blocks to make sure that there are enough sectors to store emeta
1033 */
1034 bit = lm->sec_per_line;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001035 off = lm->sec_per_line - lm->emeta_sec[0];
1036 bitmap_set(line->invalid_bitmap, off, lm->emeta_sec[0]);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001037 while (nr_bb) {
1038 off -= geo->sec_per_pl;
1039 if (!test_bit(off, line->invalid_bitmap)) {
1040 bitmap_set(line->invalid_bitmap, off, geo->sec_per_pl);
1041 nr_bb--;
1042 }
1043 }
1044
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001045 line->sec_in_line -= lm->emeta_sec[0];
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001046 line->emeta_ssec = off;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001047 line->nr_valid_lbas = 0;
Javier González0880a9a2017-06-26 11:57:19 +02001048 line->left_msecs = line->sec_in_line;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001049 *line->vsc = cpu_to_le32(line->sec_in_line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001050
1051 if (lm->sec_per_line - line->sec_in_line !=
1052 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1053 spin_lock(&line->lock);
1054 line->state = PBLK_LINESTATE_BAD;
1055 spin_unlock(&line->lock);
1056
1057 list_add_tail(&line->list, &l_mg->bad_list);
1058 pr_err("pblk: unexpected line %d is bad\n", line->id);
1059
1060 return 0;
1061 }
1062
1063 return 1;
1064}
1065
1066static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1067{
1068 struct pblk_line_meta *lm = &pblk->lm;
Javier Gonzáleza44f53f2017-04-22 01:32:49 +02001069 int blk_in_line = atomic_read(&line->blk_in_line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001070
1071 line->map_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1072 if (!line->map_bitmap)
1073 return -ENOMEM;
1074 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1075
1076 /* invalid_bitmap is special since it is used when line is closed. No
1077 * need to zeroized; it will be initialized using bb info form
1078 * map_bitmap
1079 */
1080 line->invalid_bitmap = mempool_alloc(pblk->line_meta_pool, GFP_ATOMIC);
1081 if (!line->invalid_bitmap) {
1082 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1083 return -ENOMEM;
1084 }
1085
1086 spin_lock(&line->lock);
1087 if (line->state != PBLK_LINESTATE_FREE) {
1088 spin_unlock(&line->lock);
1089 WARN(1, "pblk: corrupted line state\n");
1090 return -EINTR;
1091 }
1092 line->state = PBLK_LINESTATE_OPEN;
Javier Gonzáleza44f53f2017-04-22 01:32:49 +02001093
1094 atomic_set(&line->left_eblks, blk_in_line);
1095 atomic_set(&line->left_seblks, blk_in_line);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001096
1097 line->meta_distance = lm->meta_distance;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001098 spin_unlock(&line->lock);
1099
1100 /* Bad blocks do not need to be erased */
1101 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001102
1103 kref_init(&line->ref);
1104
1105 return 0;
1106}
1107
1108int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1109{
1110 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1111 int ret;
1112
1113 spin_lock(&l_mg->free_lock);
1114 l_mg->data_line = line;
1115 list_del(&line->list);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001116
1117 ret = pblk_line_prepare(pblk, line);
1118 if (ret) {
1119 list_add(&line->list, &l_mg->free_list);
Javier González3dc001f2017-04-22 01:32:45 +02001120 spin_unlock(&l_mg->free_lock);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001121 return ret;
1122 }
Javier González3dc001f2017-04-22 01:32:45 +02001123 spin_unlock(&l_mg->free_lock);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001124
1125 pblk_rl_free_lines_dec(&pblk->rl, line);
1126
1127 if (!pblk_line_init_bb(pblk, line, 0)) {
1128 list_add(&line->list, &l_mg->free_list);
1129 return -EINTR;
1130 }
1131
1132 return 0;
1133}
1134
1135void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1136{
1137 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1138 line->map_bitmap = NULL;
1139 line->smeta = NULL;
1140 line->emeta = NULL;
1141}
1142
1143struct pblk_line *pblk_line_get(struct pblk *pblk)
1144{
1145 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1146 struct pblk_line_meta *lm = &pblk->lm;
1147 struct pblk_line *line = NULL;
1148 int bit;
1149
1150 lockdep_assert_held(&l_mg->free_lock);
1151
1152retry_get:
1153 if (list_empty(&l_mg->free_list)) {
1154 pr_err("pblk: no free lines\n");
1155 goto out;
1156 }
1157
1158 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1159 list_del(&line->list);
1160 l_mg->nr_free_lines--;
1161
1162 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1163 if (unlikely(bit >= lm->blk_per_line)) {
1164 spin_lock(&line->lock);
1165 line->state = PBLK_LINESTATE_BAD;
1166 spin_unlock(&line->lock);
1167
1168 list_add_tail(&line->list, &l_mg->bad_list);
1169
1170 pr_debug("pblk: line %d is bad\n", line->id);
1171 goto retry_get;
1172 }
1173
1174 if (pblk_line_prepare(pblk, line)) {
1175 pr_err("pblk: failed to prepare line %d\n", line->id);
1176 list_add(&line->list, &l_mg->free_list);
1177 return NULL;
1178 }
1179
1180out:
1181 return line;
1182}
1183
1184static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1185 struct pblk_line *line)
1186{
1187 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1188 struct pblk_line *retry_line;
1189
1190 spin_lock(&l_mg->free_lock);
1191 retry_line = pblk_line_get(pblk);
1192 if (!retry_line) {
Javier Gonzálezbe388d92017-04-22 01:32:48 +02001193 l_mg->data_line = NULL;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001194 spin_unlock(&l_mg->free_lock);
1195 return NULL;
1196 }
1197
1198 retry_line->smeta = line->smeta;
1199 retry_line->emeta = line->emeta;
1200 retry_line->meta_line = line->meta_line;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001201
Javier Gonzálezbe388d92017-04-22 01:32:48 +02001202 pblk_line_free(pblk, line);
Javier González3dc001f2017-04-22 01:32:45 +02001203 l_mg->data_line = retry_line;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001204 spin_unlock(&l_mg->free_lock);
1205
Javier Gonzálezbe388d92017-04-22 01:32:48 +02001206 if (pblk_line_erase(pblk, retry_line)) {
1207 spin_lock(&l_mg->free_lock);
1208 l_mg->data_line = NULL;
1209 spin_unlock(&l_mg->free_lock);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001210 return NULL;
Javier Gonzálezbe388d92017-04-22 01:32:48 +02001211 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001212
1213 pblk_rl_free_lines_dec(&pblk->rl, retry_line);
1214
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001215 return retry_line;
1216}
1217
1218struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1219{
1220 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1221 struct pblk_line *line;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001222 int is_next = 0;
1223
1224 spin_lock(&l_mg->free_lock);
1225 line = pblk_line_get(pblk);
1226 if (!line) {
1227 spin_unlock(&l_mg->free_lock);
1228 return NULL;
1229 }
1230
1231 line->seq_nr = l_mg->d_seq_nr++;
1232 line->type = PBLK_LINETYPE_DATA;
1233 l_mg->data_line = line;
1234
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001235 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001236
1237 /* Allocate next line for preparation */
1238 l_mg->data_next = pblk_line_get(pblk);
1239 if (l_mg->data_next) {
1240 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1241 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1242 is_next = 1;
1243 }
1244 spin_unlock(&l_mg->free_lock);
1245
1246 pblk_rl_free_lines_dec(&pblk->rl, line);
1247 if (is_next)
1248 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1249
1250 if (pblk_line_erase(pblk, line))
1251 return NULL;
1252
1253retry_setup:
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001254 if (!pblk_line_init_metadata(pblk, line, NULL)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001255 line = pblk_line_retry(pblk, line);
1256 if (!line)
1257 return NULL;
1258
1259 goto retry_setup;
1260 }
1261
1262 if (!pblk_line_init_bb(pblk, line, 1)) {
1263 line = pblk_line_retry(pblk, line);
1264 if (!line)
1265 return NULL;
1266
1267 goto retry_setup;
1268 }
1269
1270 return line;
1271}
1272
1273struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1274{
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001275 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1276 struct pblk_line *cur, *new;
1277 unsigned int left_seblks;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001278 int is_next = 0;
1279
1280 cur = l_mg->data_line;
1281 new = l_mg->data_next;
1282 if (!new)
1283 return NULL;
1284 l_mg->data_line = new;
1285
1286retry_line:
1287 left_seblks = atomic_read(&new->left_seblks);
1288 if (left_seblks) {
1289 /* If line is not fully erased, erase it */
Javier Gonzáleza44f53f2017-04-22 01:32:49 +02001290 if (atomic_read(&new->left_eblks)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001291 if (pblk_line_erase(pblk, new))
1292 return NULL;
1293 } else {
1294 io_schedule();
1295 }
1296 goto retry_line;
1297 }
1298
1299 spin_lock(&l_mg->free_lock);
1300 /* Allocate next line for preparation */
1301 l_mg->data_next = pblk_line_get(pblk);
1302 if (l_mg->data_next) {
1303 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1304 l_mg->data_next->type = PBLK_LINETYPE_DATA;
1305 is_next = 1;
1306 }
1307
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001308 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001309 spin_unlock(&l_mg->free_lock);
1310
1311 if (is_next)
1312 pblk_rl_free_lines_dec(&pblk->rl, l_mg->data_next);
1313
1314retry_setup:
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001315 if (!pblk_line_init_metadata(pblk, new, cur)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001316 new = pblk_line_retry(pblk, new);
Javier Gonzálezf3236cef2017-04-22 01:32:46 +02001317 if (!new)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001318 return NULL;
1319
1320 goto retry_setup;
1321 }
1322
1323 if (!pblk_line_init_bb(pblk, new, 1)) {
1324 new = pblk_line_retry(pblk, new);
1325 if (!new)
1326 return NULL;
1327
1328 goto retry_setup;
1329 }
1330
1331 return new;
1332}
1333
1334void pblk_line_free(struct pblk *pblk, struct pblk_line *line)
1335{
1336 if (line->map_bitmap)
1337 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1338 if (line->invalid_bitmap)
1339 mempool_free(line->invalid_bitmap, pblk->line_meta_pool);
1340
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001341 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1342
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001343 line->map_bitmap = NULL;
1344 line->invalid_bitmap = NULL;
Javier Gonzálezbe388d92017-04-22 01:32:48 +02001345 line->smeta = NULL;
1346 line->emeta = NULL;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001347}
1348
1349void pblk_line_put(struct kref *ref)
1350{
1351 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1352 struct pblk *pblk = line->pblk;
1353 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1354
1355 spin_lock(&line->lock);
1356 WARN_ON(line->state != PBLK_LINESTATE_GC);
1357 line->state = PBLK_LINESTATE_FREE;
1358 line->gc_group = PBLK_LINEGC_NONE;
1359 pblk_line_free(pblk, line);
1360 spin_unlock(&line->lock);
1361
1362 spin_lock(&l_mg->free_lock);
1363 list_add_tail(&line->list, &l_mg->free_list);
1364 l_mg->nr_free_lines++;
1365 spin_unlock(&l_mg->free_lock);
1366
1367 pblk_rl_free_lines_inc(&pblk->rl, line);
1368}
1369
1370int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1371{
1372 struct nvm_rq *rqd;
1373 int err;
1374
Javier González084ec9b2017-06-26 16:27:13 -06001375 rqd = mempool_alloc(pblk->g_rq_pool, GFP_KERNEL);
1376 memset(rqd, 0, pblk_g_rq_size);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001377
1378 pblk_setup_e_rq(pblk, rqd, ppa);
1379
1380 rqd->end_io = pblk_end_io_erase;
1381 rqd->private = pblk;
1382
1383 /* The write thread schedules erases so that it minimizes disturbances
1384 * with writes. Thus, there is no need to take the LUN semaphore.
1385 */
1386 err = pblk_submit_io(pblk, rqd);
1387 if (err) {
1388 struct nvm_tgt_dev *dev = pblk->dev;
1389 struct nvm_geo *geo = &dev->geo;
1390
1391 pr_err("pblk: could not async erase line:%d,blk:%d\n",
1392 pblk_dev_ppa_to_line(ppa),
1393 pblk_dev_ppa_to_pos(geo, ppa));
1394 }
1395
1396 return err;
1397}
1398
1399struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1400{
1401 return pblk->l_mg.data_line;
1402}
1403
Javier Gonzálezd624f372017-06-26 11:57:15 +02001404/* For now, always erase next line */
1405struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001406{
1407 return pblk->l_mg.data_next;
1408}
1409
1410int pblk_line_is_full(struct pblk_line *line)
1411{
1412 return (line->left_msecs == 0);
1413}
1414
1415void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1416{
1417 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001418 struct pblk_line_meta *lm = &pblk->lm;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001419 struct list_head *move_list;
1420
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001421 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001422 "pblk: corrupt closed line %d\n", line->id);
1423
1424 spin_lock(&l_mg->free_lock);
1425 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1426 spin_unlock(&l_mg->free_lock);
1427
1428 spin_lock(&l_mg->gc_lock);
1429 spin_lock(&line->lock);
1430 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1431 line->state = PBLK_LINESTATE_CLOSED;
1432 move_list = pblk_line_gc_list(pblk, line);
1433
1434 list_add_tail(&line->list, move_list);
1435
1436 mempool_free(line->map_bitmap, pblk->line_meta_pool);
1437 line->map_bitmap = NULL;
1438 line->smeta = NULL;
1439 line->emeta = NULL;
1440
1441 spin_unlock(&line->lock);
1442 spin_unlock(&l_mg->gc_lock);
1443}
1444
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001445void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1446{
1447 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1448 struct pblk_line_meta *lm = &pblk->lm;
1449 struct pblk_emeta *emeta = line->emeta;
1450 struct line_emeta *emeta_buf = emeta->buf;
1451
1452 /* No need for exact vsc value; avoid a big line lock and tak aprox. */
1453 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1454 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1455
1456 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1457 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1458
1459 spin_lock(&l_mg->close_lock);
1460 spin_lock(&line->lock);
1461 list_add_tail(&line->list, &l_mg->emeta_list);
1462 spin_unlock(&line->lock);
1463 spin_unlock(&l_mg->close_lock);
1464}
1465
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001466void pblk_line_close_ws(struct work_struct *work)
1467{
1468 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1469 ws);
1470 struct pblk *pblk = line_ws->pblk;
1471 struct pblk_line *line = line_ws->line;
1472
1473 pblk_line_close(pblk, line);
1474 mempool_free(line_ws, pblk->line_ws_pool);
1475}
1476
1477void pblk_line_mark_bb(struct work_struct *work)
1478{
1479 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1480 ws);
1481 struct pblk *pblk = line_ws->pblk;
1482 struct nvm_tgt_dev *dev = pblk->dev;
1483 struct ppa_addr *ppa = line_ws->priv;
1484 int ret;
1485
1486 ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
1487 if (ret) {
1488 struct pblk_line *line;
1489 int pos;
1490
1491 line = &pblk->lines[pblk_dev_ppa_to_line(*ppa)];
1492 pos = pblk_dev_ppa_to_pos(&dev->geo, *ppa);
1493
1494 pr_err("pblk: failed to mark bb, line:%d, pos:%d\n",
1495 line->id, pos);
1496 }
1497
1498 kfree(ppa);
1499 mempool_free(line_ws, pblk->line_ws_pool);
1500}
1501
1502void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1503 void (*work)(struct work_struct *))
1504{
1505 struct pblk_line_ws *line_ws;
1506
1507 line_ws = mempool_alloc(pblk->line_ws_pool, GFP_ATOMIC);
1508 if (!line_ws)
1509 return;
1510
1511 line_ws->pblk = pblk;
1512 line_ws->line = line;
1513 line_ws->priv = priv;
1514
1515 INIT_WORK(&line_ws->ws, work);
1516 queue_work(pblk->kw_wq, &line_ws->ws);
1517}
1518
1519void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1520 unsigned long *lun_bitmap)
1521{
1522 struct nvm_tgt_dev *dev = pblk->dev;
1523 struct nvm_geo *geo = &dev->geo;
1524 struct pblk_lun *rlun;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001525 int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001526 int ret;
1527
1528 /*
1529 * Only send one inflight I/O per LUN. Since we map at a page
1530 * granurality, all ppas in the I/O will map to the same LUN
1531 */
1532#ifdef CONFIG_NVM_DEBUG
1533 int i;
1534
1535 for (i = 1; i < nr_ppas; i++)
1536 WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun ||
1537 ppa_list[0].g.ch != ppa_list[i].g.ch);
1538#endif
1539 /* If the LUN has been locked for this same request, do no attempt to
1540 * lock it again
1541 */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001542 if (test_and_set_bit(pos, lun_bitmap))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001543 return;
1544
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001545 rlun = &pblk->luns[pos];
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001546 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
1547 if (ret) {
1548 switch (ret) {
1549 case -ETIME:
1550 pr_err("pblk: lun semaphore timed out\n");
1551 break;
1552 case -EINTR:
1553 pr_err("pblk: lun semaphore timed out\n");
1554 break;
1555 }
1556 }
1557}
1558
1559void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1560 unsigned long *lun_bitmap)
1561{
1562 struct nvm_tgt_dev *dev = pblk->dev;
1563 struct nvm_geo *geo = &dev->geo;
1564 struct pblk_lun *rlun;
1565 int nr_luns = geo->nr_luns;
1566 int bit = -1;
1567
1568 while ((bit = find_next_bit(lun_bitmap, nr_luns, bit + 1)) < nr_luns) {
1569 rlun = &pblk->luns[bit];
1570 up(&rlun->wr_sem);
1571 }
1572
1573 kfree(lun_bitmap);
1574}
1575
1576void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1577{
1578 struct ppa_addr l2p_ppa;
1579
1580 /* logic error: lba out-of-bounds. Ignore update */
1581 if (!(lba < pblk->rl.nr_secs)) {
1582 WARN(1, "pblk: corrupted L2P map request\n");
1583 return;
1584 }
1585
1586 spin_lock(&pblk->trans_lock);
1587 l2p_ppa = pblk_trans_map_get(pblk, lba);
1588
1589 if (!pblk_addr_in_cache(l2p_ppa) && !pblk_ppa_empty(l2p_ppa))
1590 pblk_map_invalidate(pblk, l2p_ppa);
1591
1592 pblk_trans_map_set(pblk, lba, ppa);
1593 spin_unlock(&pblk->trans_lock);
1594}
1595
1596void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1597{
1598#ifdef CONFIG_NVM_DEBUG
1599 /* Callers must ensure that the ppa points to a cache address */
1600 BUG_ON(!pblk_addr_in_cache(ppa));
1601 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1602#endif
1603
1604 pblk_update_map(pblk, lba, ppa);
1605}
1606
1607int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1608 struct pblk_line *gc_line)
1609{
1610 struct ppa_addr l2p_ppa;
1611 int ret = 1;
1612
1613#ifdef CONFIG_NVM_DEBUG
1614 /* Callers must ensure that the ppa points to a cache address */
1615 BUG_ON(!pblk_addr_in_cache(ppa));
1616 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1617#endif
1618
1619 /* logic error: lba out-of-bounds. Ignore update */
1620 if (!(lba < pblk->rl.nr_secs)) {
1621 WARN(1, "pblk: corrupted L2P map request\n");
1622 return 0;
1623 }
1624
1625 spin_lock(&pblk->trans_lock);
1626 l2p_ppa = pblk_trans_map_get(pblk, lba);
1627
1628 /* Prevent updated entries to be overwritten by GC */
1629 if (pblk_addr_in_cache(l2p_ppa) || pblk_ppa_empty(l2p_ppa) ||
1630 pblk_tgt_ppa_to_line(l2p_ppa) != gc_line->id) {
1631 ret = 0;
1632 goto out;
1633 }
1634
1635 pblk_trans_map_set(pblk, lba, ppa);
1636out:
1637 spin_unlock(&pblk->trans_lock);
1638 return ret;
1639}
1640
1641void pblk_update_map_dev(struct pblk *pblk, sector_t lba, struct ppa_addr ppa,
1642 struct ppa_addr entry_line)
1643{
1644 struct ppa_addr l2p_line;
1645
1646#ifdef CONFIG_NVM_DEBUG
1647 /* Callers must ensure that the ppa points to a device address */
1648 BUG_ON(pblk_addr_in_cache(ppa));
1649#endif
1650 /* Invalidate and discard padded entries */
1651 if (lba == ADDR_EMPTY) {
1652#ifdef CONFIG_NVM_DEBUG
1653 atomic_long_inc(&pblk->padded_wb);
1654#endif
1655 pblk_map_invalidate(pblk, ppa);
1656 return;
1657 }
1658
1659 /* logic error: lba out-of-bounds. Ignore update */
1660 if (!(lba < pblk->rl.nr_secs)) {
1661 WARN(1, "pblk: corrupted L2P map request\n");
1662 return;
1663 }
1664
1665 spin_lock(&pblk->trans_lock);
1666 l2p_line = pblk_trans_map_get(pblk, lba);
1667
1668 /* Do not update L2P if the cacheline has been updated. In this case,
1669 * the mapped ppa must be invalidated
1670 */
1671 if (l2p_line.ppa != entry_line.ppa) {
1672 if (!pblk_ppa_empty(ppa))
1673 pblk_map_invalidate(pblk, ppa);
1674 goto out;
1675 }
1676
1677#ifdef CONFIG_NVM_DEBUG
1678 WARN_ON(!pblk_addr_in_cache(l2p_line) && !pblk_ppa_empty(l2p_line));
1679#endif
1680
1681 pblk_trans_map_set(pblk, lba, ppa);
1682out:
1683 spin_unlock(&pblk->trans_lock);
1684}
1685
1686void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
1687 sector_t blba, int nr_secs)
1688{
1689 int i;
1690
1691 spin_lock(&pblk->trans_lock);
1692 for (i = 0; i < nr_secs; i++)
1693 ppas[i] = pblk_trans_map_get(pblk, blba + i);
1694 spin_unlock(&pblk->trans_lock);
1695}
1696
1697void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
1698 u64 *lba_list, int nr_secs)
1699{
1700 sector_t lba;
1701 int i;
1702
1703 spin_lock(&pblk->trans_lock);
1704 for (i = 0; i < nr_secs; i++) {
1705 lba = lba_list[i];
1706 if (lba == ADDR_EMPTY) {
1707 ppas[i].ppa = ADDR_EMPTY;
1708 } else {
1709 /* logic error: lba out-of-bounds. Ignore update */
1710 if (!(lba < pblk->rl.nr_secs)) {
1711 WARN(1, "pblk: corrupted L2P map request\n");
1712 continue;
1713 }
1714 ppas[i] = pblk_trans_map_get(pblk, lba);
1715 }
1716 }
1717 spin_unlock(&pblk->trans_lock);
1718}