blob: 84f3b4912b923d74694f0a1b6918348a71323bb4 [file] [log] [blame]
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-core.c - pblk's core functionality
16 *
17 */
18
Hans Holmberg4c44abf2018-10-09 13:11:52 +020019#define CREATE_TRACE_POINTS
20
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020021#include "pblk.h"
Hans Holmberg4c44abf2018-10-09 13:11:52 +020022#include "pblk-trace.h"
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020023
Javier González8bd40022017-10-13 14:46:44 +020024static void pblk_line_mark_bb(struct work_struct *work)
25{
26 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
27 ws);
28 struct pblk *pblk = line_ws->pblk;
29 struct nvm_tgt_dev *dev = pblk->dev;
30 struct ppa_addr *ppa = line_ws->priv;
31 int ret;
32
Matias Bjørlingaff3fb12018-10-09 13:11:36 +020033 ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
Javier González8bd40022017-10-13 14:46:44 +020034 if (ret) {
35 struct pblk_line *line;
36 int pos;
37
Javier Gonzálezcb216652018-10-09 13:11:42 +020038 line = pblk_ppa_to_line(pblk, *ppa);
Javier Gonzálezb1bcfda2018-01-05 14:16:06 +010039 pos = pblk_ppa_to_pos(&dev->geo, *ppa);
Javier González8bd40022017-10-13 14:46:44 +020040
Matias Bjørling4e495a42018-07-13 10:48:42 +020041 pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
Javier González8bd40022017-10-13 14:46:44 +020042 line->id, pos);
43 }
44
45 kfree(ppa);
Kent Overstreetb906bbb2018-05-20 18:25:50 -040046 mempool_free(line_ws, &pblk->gen_ws_pool);
Javier González8bd40022017-10-13 14:46:44 +020047}
48
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020049static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
Javier González32ef9412018-03-30 00:05:20 +020050 struct ppa_addr ppa_addr)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020051{
52 struct nvm_tgt_dev *dev = pblk->dev;
53 struct nvm_geo *geo = &dev->geo;
Javier González32ef9412018-03-30 00:05:20 +020054 struct ppa_addr *ppa;
55 int pos = pblk_ppa_to_pos(geo, ppa_addr);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020056
Matias Bjørling4e495a42018-07-13 10:48:42 +020057 pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020058 atomic_long_inc(&pblk->erase_failed);
59
Javier Gonzáleza44f53f2017-04-22 01:32:49 +020060 atomic_dec(&line->blk_in_line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020061 if (test_and_set_bit(pos, line->blk_bitmap))
Matias Bjørling4e495a42018-07-13 10:48:42 +020062 pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020063 line->id, pos);
64
Javier González32ef9412018-03-30 00:05:20 +020065 /* Not necessary to mark bad blocks on 2.0 spec. */
66 if (geo->version == NVM_OCSSD_SPEC_20)
67 return;
68
69 ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
70 if (!ppa)
71 return;
72
73 *ppa = ppa_addr;
Javier Gonzálezb84ae4a82017-10-13 14:46:07 +020074 pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
75 GFP_ATOMIC, pblk->bb_wq);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020076}
77
78static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
79{
Javier González32ef9412018-03-30 00:05:20 +020080 struct nvm_tgt_dev *dev = pblk->dev;
81 struct nvm_geo *geo = &dev->geo;
82 struct nvm_chk_meta *chunk;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020083 struct pblk_line *line;
Javier González32ef9412018-03-30 00:05:20 +020084 int pos;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020085
Javier Gonzálezcb216652018-10-09 13:11:42 +020086 line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
Javier González32ef9412018-03-30 00:05:20 +020087 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
88 chunk = &line->chks[pos];
89
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020090 atomic_dec(&line->left_seblks);
91
92 if (rqd->error) {
Hans Holmberg4209c312018-10-09 13:11:55 +020093 trace_pblk_chunk_reset(pblk_disk_name(pblk),
94 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
95
Javier González32ef9412018-03-30 00:05:20 +020096 chunk->state = NVM_CHK_ST_OFFLINE;
97 pblk_mark_bb(pblk, line, rqd->ppa_addr);
98 } else {
Hans Holmberg4209c312018-10-09 13:11:55 +020099 trace_pblk_chunk_reset(pblk_disk_name(pblk),
100 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
101
Javier González32ef9412018-03-30 00:05:20 +0200102 chunk->state = NVM_CHK_ST_FREE;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200103 }
Javier González588726d32017-06-26 11:57:29 +0200104
Hans Holmberg4c44abf2018-10-09 13:11:52 +0200105 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
106 chunk->state);
107
Javier González588726d32017-06-26 11:57:29 +0200108 atomic_dec(&pblk->inflight_io);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200109}
110
111/* Erase completion assumes that only one block is erased at the time */
112static void pblk_end_io_erase(struct nvm_rq *rqd)
113{
114 struct pblk *pblk = rqd->private;
115
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200116 __pblk_end_io_erase(pblk, rqd);
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400117 mempool_free(rqd, &pblk->e_rq_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200118}
119
Javier González32ef9412018-03-30 00:05:20 +0200120/*
121 * Get information for all chunks from the device.
122 *
123 * The caller is responsible for freeing the returned structure
124 */
Matias Bjørlingaff3fb12018-10-09 13:11:36 +0200125struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
Javier González32ef9412018-03-30 00:05:20 +0200126{
127 struct nvm_tgt_dev *dev = pblk->dev;
128 struct nvm_geo *geo = &dev->geo;
129 struct nvm_chk_meta *meta;
130 struct ppa_addr ppa;
131 unsigned long len;
132 int ret;
133
134 ppa.ppa = 0;
135
136 len = geo->all_chunks * sizeof(*meta);
137 meta = kzalloc(len, GFP_KERNEL);
138 if (!meta)
139 return ERR_PTR(-ENOMEM);
140
Matias Bjørlingaff3fb12018-10-09 13:11:36 +0200141 ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
Javier González32ef9412018-03-30 00:05:20 +0200142 if (ret) {
143 kfree(meta);
144 return ERR_PTR(-EIO);
145 }
146
147 return meta;
148}
149
150struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
151 struct nvm_chk_meta *meta,
152 struct ppa_addr ppa)
153{
154 struct nvm_tgt_dev *dev = pblk->dev;
155 struct nvm_geo *geo = &dev->geo;
156 int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
157 int lun_off = ppa.m.pu * geo->num_chk;
158 int chk_off = ppa.m.chk;
159
160 return meta + ch_off + lun_off + chk_off;
161}
162
Javier González0880a9a2017-06-26 11:57:19 +0200163void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
164 u64 paddr)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200165{
166 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
167 struct list_head *move_list = NULL;
168
169 /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
170 * table is modified with reclaimed sectors, a check is done to endure
171 * that newer updates are not overwritten.
172 */
173 spin_lock(&line->lock);
Javier Gonzálezd3401212017-10-13 14:46:14 +0200174 WARN_ON(line->state == PBLK_LINESTATE_FREE);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200175
176 if (test_and_set_bit(paddr, line->invalid_bitmap)) {
177 WARN_ONCE(1, "pblk: double invalidate\n");
178 spin_unlock(&line->lock);
179 return;
180 }
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200181 le32_add_cpu(line->vsc, -1);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200182
183 if (line->state == PBLK_LINESTATE_CLOSED)
184 move_list = pblk_line_gc_list(pblk, line);
185 spin_unlock(&line->lock);
186
187 if (move_list) {
188 spin_lock(&l_mg->gc_lock);
189 spin_lock(&line->lock);
190 /* Prevent moving a line that has just been chosen for GC */
Javier Gonzálezd3401212017-10-13 14:46:14 +0200191 if (line->state == PBLK_LINESTATE_GC) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200192 spin_unlock(&line->lock);
193 spin_unlock(&l_mg->gc_lock);
194 return;
195 }
196 spin_unlock(&line->lock);
197
198 list_move_tail(&line->list, move_list);
199 spin_unlock(&l_mg->gc_lock);
200 }
201}
202
203void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
204{
205 struct pblk_line *line;
206 u64 paddr;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200207
Matias Bjørling880eda52018-07-13 10:48:37 +0200208#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200209 /* Callers must ensure that the ppa points to a device address */
210 BUG_ON(pblk_addr_in_cache(ppa));
211 BUG_ON(pblk_ppa_empty(ppa));
212#endif
213
Javier Gonzálezcb216652018-10-09 13:11:42 +0200214 line = pblk_ppa_to_line(pblk, ppa);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200215 paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
216
217 __pblk_map_invalidate(pblk, line, paddr);
218}
219
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200220static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
221 unsigned int nr_secs)
222{
223 sector_t lba;
224
225 spin_lock(&pblk->trans_lock);
226 for (lba = slba; lba < slba + nr_secs; lba++) {
227 struct ppa_addr ppa;
228
229 ppa = pblk_trans_map_get(pblk, lba);
230
231 if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
232 pblk_map_invalidate(pblk, ppa);
233
234 pblk_ppa_set_empty(&ppa);
235 pblk_trans_map_set(pblk, lba, ppa);
236 }
237 spin_unlock(&pblk->trans_lock);
238}
239
Javier González67bf26a2017-10-13 14:46:20 +0200240/* Caller must guarantee that the request is a valid type */
241struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200242{
243 mempool_t *pool;
244 struct nvm_rq *rqd;
245 int rq_size;
246
Javier González67bf26a2017-10-13 14:46:20 +0200247 switch (type) {
248 case PBLK_WRITE:
249 case PBLK_WRITE_INT:
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400250 pool = &pblk->w_rq_pool;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200251 rq_size = pblk_w_rq_size;
Javier González67bf26a2017-10-13 14:46:20 +0200252 break;
253 case PBLK_READ:
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400254 pool = &pblk->r_rq_pool;
Javier González084ec9b2017-06-26 16:27:13 -0600255 rq_size = pblk_g_rq_size;
Javier González67bf26a2017-10-13 14:46:20 +0200256 break;
257 default:
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400258 pool = &pblk->e_rq_pool;
Javier González67bf26a2017-10-13 14:46:20 +0200259 rq_size = pblk_g_rq_size;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200260 }
261
262 rqd = mempool_alloc(pool, GFP_KERNEL);
263 memset(rqd, 0, rq_size);
264
265 return rqd;
266}
267
Javier González67bf26a2017-10-13 14:46:20 +0200268/* Typically used on completion path. Cannot guarantee request consistency */
269void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200270{
Javier González67bf26a2017-10-13 14:46:20 +0200271 struct nvm_tgt_dev *dev = pblk->dev;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200272 mempool_t *pool;
273
Javier González67bf26a2017-10-13 14:46:20 +0200274 switch (type) {
275 case PBLK_WRITE:
276 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
Gustavo A. R. Silva884b0312018-07-13 10:48:43 +0200277 /* fall through */
Javier González67bf26a2017-10-13 14:46:20 +0200278 case PBLK_WRITE_INT:
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400279 pool = &pblk->w_rq_pool;
Javier González67bf26a2017-10-13 14:46:20 +0200280 break;
281 case PBLK_READ:
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400282 pool = &pblk->r_rq_pool;
Javier González67bf26a2017-10-13 14:46:20 +0200283 break;
284 case PBLK_ERASE:
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400285 pool = &pblk->e_rq_pool;
Javier González67bf26a2017-10-13 14:46:20 +0200286 break;
287 default:
Matias Bjørling4e495a42018-07-13 10:48:42 +0200288 pblk_err(pblk, "trying to free unknown rqd type\n");
Javier González67bf26a2017-10-13 14:46:20 +0200289 return;
290 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200291
Igor Konopkof142ac02018-06-01 16:41:08 +0200292 if (rqd->meta_list)
293 nvm_dev_dma_free(dev->parent, rqd->meta_list,
294 rqd->dma_meta_list);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200295 mempool_free(rqd, pool);
296}
297
298void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
299 int nr_pages)
300{
301 struct bio_vec bv;
302 int i;
303
304 WARN_ON(off + nr_pages != bio->bi_vcnt);
305
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200306 for (i = off; i < nr_pages + off; i++) {
307 bv = bio->bi_io_vec[i];
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400308 mempool_free(bv.bv_page, &pblk->page_bio_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200309 }
310}
311
312int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
313 int nr_pages)
314{
315 struct request_queue *q = pblk->dev->q;
316 struct page *page;
317 int i, ret;
318
319 for (i = 0; i < nr_pages; i++) {
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400320 page = mempool_alloc(&pblk->page_bio_pool, flags);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200321
322 ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
323 if (ret != PBLK_EXPOSED_PAGE_SIZE) {
Matias Bjørling4e495a42018-07-13 10:48:42 +0200324 pblk_err(pblk, "could not add page to bio\n");
Kent Overstreetb906bbb2018-05-20 18:25:50 -0400325 mempool_free(page, &pblk->page_bio_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200326 goto err;
327 }
328 }
329
330 return 0;
331err:
Igor Konopkof142ac02018-06-01 16:41:08 +0200332 pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200333 return -1;
334}
335
Hans Holmbergcc9c9a02018-06-01 16:41:13 +0200336void pblk_write_kick(struct pblk *pblk)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200337{
338 wake_up_process(pblk->writer_ts);
339 mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
340}
341
Kees Cook87c1d2d2017-10-17 21:10:19 -0700342void pblk_write_timer_fn(struct timer_list *t)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200343{
Kees Cook87c1d2d2017-10-17 21:10:19 -0700344 struct pblk *pblk = from_timer(pblk, t, wtimer);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200345
346 /* kick the write thread every tick to flush outstanding data */
347 pblk_write_kick(pblk);
348}
349
350void pblk_write_should_kick(struct pblk *pblk)
351{
352 unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
353
354 if (secs_avail >= pblk->min_write_pgs)
355 pblk_write_kick(pblk);
356}
357
Javier González8bd40022017-10-13 14:46:44 +0200358static void pblk_wait_for_meta(struct pblk *pblk)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200359{
Javier González588726d32017-06-26 11:57:29 +0200360 do {
361 if (!atomic_read(&pblk->inflight_io))
362 break;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200363
Javier González588726d32017-06-26 11:57:29 +0200364 schedule();
365 } while (1);
366}
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200367
Javier González588726d32017-06-26 11:57:29 +0200368static void pblk_flush_writer(struct pblk *pblk)
369{
370 pblk_rb_flush(&pblk->rwb);
371 do {
Javier Gonzálezee8d5c12017-06-30 17:56:40 +0200372 if (!pblk_rb_sync_count(&pblk->rwb))
Javier González588726d32017-06-26 11:57:29 +0200373 break;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200374
Javier Gonzálezee8d5c12017-06-30 17:56:40 +0200375 pblk_write_kick(pblk);
Javier González588726d32017-06-26 11:57:29 +0200376 schedule();
377 } while (1);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200378}
379
380struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
381{
382 struct pblk_line_meta *lm = &pblk->lm;
383 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
384 struct list_head *move_list = NULL;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200385 int vsc = le32_to_cpu(*line->vsc);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200386
Javier González476118c2017-06-26 11:57:26 +0200387 lockdep_assert_held(&line->lock);
388
Hans Holmberg48b8d202018-06-01 16:41:06 +0200389 if (line->w_err_gc->has_write_err) {
390 if (line->gc_group != PBLK_LINEGC_WERR) {
391 line->gc_group = PBLK_LINEGC_WERR;
392 move_list = &l_mg->gc_werr_list;
393 pblk_rl_werr_line_in(&pblk->rl);
394 }
395 } else if (!vsc) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200396 if (line->gc_group != PBLK_LINEGC_FULL) {
397 line->gc_group = PBLK_LINEGC_FULL;
398 move_list = &l_mg->gc_full_list;
399 }
Javier Gonzálezb20ba1b2017-06-26 11:57:27 +0200400 } else if (vsc < lm->high_thrs) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200401 if (line->gc_group != PBLK_LINEGC_HIGH) {
402 line->gc_group = PBLK_LINEGC_HIGH;
403 move_list = &l_mg->gc_high_list;
404 }
Javier Gonzálezb20ba1b2017-06-26 11:57:27 +0200405 } else if (vsc < lm->mid_thrs) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200406 if (line->gc_group != PBLK_LINEGC_MID) {
407 line->gc_group = PBLK_LINEGC_MID;
408 move_list = &l_mg->gc_mid_list;
409 }
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200410 } else if (vsc < line->sec_in_line) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200411 if (line->gc_group != PBLK_LINEGC_LOW) {
412 line->gc_group = PBLK_LINEGC_LOW;
413 move_list = &l_mg->gc_low_list;
414 }
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200415 } else if (vsc == line->sec_in_line) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200416 if (line->gc_group != PBLK_LINEGC_EMPTY) {
417 line->gc_group = PBLK_LINEGC_EMPTY;
418 move_list = &l_mg->gc_empty_list;
419 }
420 } else {
421 line->state = PBLK_LINESTATE_CORRUPT;
Hans Holmbergf2937232018-10-09 13:11:53 +0200422 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
423 line->state);
424
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200425 line->gc_group = PBLK_LINEGC_NONE;
426 move_list = &l_mg->corrupt_list;
Matias Bjørling4e495a42018-07-13 10:48:42 +0200427 pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200428 line->id, vsc,
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200429 line->sec_in_line,
430 lm->high_thrs, lm->mid_thrs);
431 }
432
433 return move_list;
434}
435
436void pblk_discard(struct pblk *pblk, struct bio *bio)
437{
438 sector_t slba = pblk_get_lba(bio);
439 sector_t nr_secs = pblk_get_secs(bio);
440
441 pblk_invalidate_range(pblk, slba, nr_secs);
442}
443
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200444void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
445{
446 atomic_long_inc(&pblk->write_failed);
Matias Bjørling880eda52018-07-13 10:48:37 +0200447#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200448 pblk_print_failed_rqd(pblk, rqd, rqd->error);
449#endif
450}
451
452void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
453{
454 /* Empty page read is not necessarily an error (e.g., L2P recovery) */
455 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
456 atomic_long_inc(&pblk->read_empty);
457 return;
458 }
459
460 switch (rqd->error) {
461 case NVM_RSP_WARN_HIGHECC:
462 atomic_long_inc(&pblk->read_high_ecc);
463 break;
464 case NVM_RSP_ERR_FAILECC:
465 case NVM_RSP_ERR_FAILCRC:
466 atomic_long_inc(&pblk->read_failed);
467 break;
468 default:
Matias Bjørling4e495a42018-07-13 10:48:42 +0200469 pblk_err(pblk, "unknown read error:%d\n", rqd->error);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200470 }
Matias Bjørling880eda52018-07-13 10:48:37 +0200471#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200472 pblk_print_failed_rqd(pblk, rqd, rqd->error);
473#endif
474}
475
Javier Gonzálezc2e9f5d2017-06-26 11:57:14 +0200476void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
477{
478 pblk->sec_per_write = sec_per_write;
479}
480
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200481int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
482{
483 struct nvm_tgt_dev *dev = pblk->dev;
484
Javier González588726d32017-06-26 11:57:29 +0200485 atomic_inc(&pblk->inflight_io);
486
Matias Bjørling880eda52018-07-13 10:48:37 +0200487#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzálezb6730dd42018-06-01 15:04:20 +0200488 if (pblk_check_io(pblk, rqd))
489 return NVM_IO_ERR;
490#endif
491
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200492 return nvm_submit_io(dev, rqd);
493}
494
Hans Holmberg4c44abf2018-10-09 13:11:52 +0200495void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
496{
497 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
498
499 int i;
500
501 for (i = 0; i < rqd->nr_ppas; i++) {
502 struct ppa_addr *ppa = &ppa_list[i];
503 struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
504 u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
505
506 if (caddr == 0)
507 trace_pblk_chunk_state(pblk_disk_name(pblk),
508 ppa, NVM_CHK_ST_OPEN);
509 else if (caddr == chunk->cnlb)
510 trace_pblk_chunk_state(pblk_disk_name(pblk),
511 ppa, NVM_CHK_ST_CLOSED);
512 }
513}
514
Javier González1a94b2d2017-10-13 14:46:47 +0200515int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
516{
517 struct nvm_tgt_dev *dev = pblk->dev;
Hans Holmberg4c44abf2018-10-09 13:11:52 +0200518 int ret;
Javier González1a94b2d2017-10-13 14:46:47 +0200519
Javier González1a94b2d2017-10-13 14:46:47 +0200520 atomic_inc(&pblk->inflight_io);
521
Matias Bjørling880eda52018-07-13 10:48:37 +0200522#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzálezb6730dd42018-06-01 15:04:20 +0200523 if (pblk_check_io(pblk, rqd))
524 return NVM_IO_ERR;
525#endif
526
Hans Holmberg4c44abf2018-10-09 13:11:52 +0200527 ret = nvm_submit_io_sync(dev, rqd);
528
529 if (trace_pblk_chunk_state_enabled() && !ret &&
530 rqd->opcode == NVM_OP_PWRITE)
531 pblk_check_chunk_state_update(pblk, rqd);
532
533 return ret;
Javier González1a94b2d2017-10-13 14:46:47 +0200534}
535
Javier González55e836d2017-10-13 14:46:16 +0200536static void pblk_bio_map_addr_endio(struct bio *bio)
537{
538 bio_put(bio);
539}
540
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200541struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
542 unsigned int nr_secs, unsigned int len,
Javier Gonzálezde54e702017-06-30 17:56:39 +0200543 int alloc_type, gfp_t gfp_mask)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200544{
545 struct nvm_tgt_dev *dev = pblk->dev;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200546 void *kaddr = data;
547 struct page *page;
548 struct bio *bio;
549 int i, ret;
550
Javier Gonzálezde54e702017-06-30 17:56:39 +0200551 if (alloc_type == PBLK_KMALLOC_META)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200552 return bio_map_kern(dev->q, kaddr, len, gfp_mask);
553
554 bio = bio_kmalloc(gfp_mask, nr_secs);
555 if (!bio)
556 return ERR_PTR(-ENOMEM);
557
558 for (i = 0; i < nr_secs; i++) {
559 page = vmalloc_to_page(kaddr);
560 if (!page) {
Matias Bjørling4e495a42018-07-13 10:48:42 +0200561 pblk_err(pblk, "could not map vmalloc bio\n");
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200562 bio_put(bio);
563 bio = ERR_PTR(-ENOMEM);
564 goto out;
565 }
566
567 ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0);
568 if (ret != PAGE_SIZE) {
Matias Bjørling4e495a42018-07-13 10:48:42 +0200569 pblk_err(pblk, "could not add page to bio\n");
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200570 bio_put(bio);
571 bio = ERR_PTR(-ENOMEM);
572 goto out;
573 }
574
575 kaddr += PAGE_SIZE;
576 }
Javier González55e836d2017-10-13 14:46:16 +0200577
578 bio->bi_end_io = pblk_bio_map_addr_endio;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200579out:
580 return bio;
581}
582
583int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
584 unsigned long secs_to_flush)
585{
Javier Gonzálezc2e9f5d2017-06-26 11:57:14 +0200586 int max = pblk->sec_per_write;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200587 int min = pblk->min_write_pgs;
588 int secs_to_sync = 0;
589
590 if (secs_avail >= max)
591 secs_to_sync = max;
592 else if (secs_avail >= min)
593 secs_to_sync = min * (secs_avail / min);
594 else if (secs_to_flush)
595 secs_to_sync = min;
596
597 return secs_to_sync;
598}
599
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200600void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
601{
602 u64 addr;
603 int i;
604
Rakesh Pandite57903f2017-10-13 14:45:56 +0200605 spin_lock(&line->lock);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200606 addr = find_next_zero_bit(line->map_bitmap,
607 pblk->lm.sec_per_line, line->cur_sec);
608 line->cur_sec = addr - nr_secs;
609
610 for (i = 0; i < nr_secs; i++, line->cur_sec--)
611 WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
Rakesh Pandite57903f2017-10-13 14:45:56 +0200612 spin_unlock(&line->lock);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200613}
614
615u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200616{
617 u64 addr;
618 int i;
619
Javier González476118c2017-06-26 11:57:26 +0200620 lockdep_assert_held(&line->lock);
621
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200622 /* logic error: ppa out-of-bounds. Prevent generating bad address */
623 if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
624 WARN(1, "pblk: page allocation out of bounds\n");
625 nr_secs = pblk->lm.sec_per_line - line->cur_sec;
626 }
627
628 line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
629 pblk->lm.sec_per_line, line->cur_sec);
630 for (i = 0; i < nr_secs; i++, line->cur_sec++)
631 WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
632
633 return addr;
634}
635
636u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
637{
638 u64 addr;
639
640 /* Lock needed in case a write fails and a recovery needs to remap
641 * failed write buffer entries
642 */
643 spin_lock(&line->lock);
644 addr = __pblk_alloc_page(pblk, line, nr_secs);
645 line->left_msecs -= nr_secs;
646 WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
647 spin_unlock(&line->lock);
648
649 return addr;
650}
651
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200652u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
653{
654 u64 paddr;
655
656 spin_lock(&line->lock);
657 paddr = find_next_zero_bit(line->map_bitmap,
658 pblk->lm.sec_per_line, line->cur_sec);
659 spin_unlock(&line->lock);
660
661 return paddr;
662}
663
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200664/*
665 * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
666 * taking the per LUN semaphore.
667 */
668static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200669 void *emeta_buf, u64 paddr, int dir)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200670{
671 struct nvm_tgt_dev *dev = pblk->dev;
672 struct nvm_geo *geo = &dev->geo;
Javier Gonzálezde54e702017-06-30 17:56:39 +0200673 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200674 struct pblk_line_meta *lm = &pblk->lm;
Javier González63e38092017-06-26 11:57:24 +0200675 void *ppa_list, *meta_list;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200676 struct bio *bio;
677 struct nvm_rq rqd;
Javier González63e38092017-06-26 11:57:24 +0200678 dma_addr_t dma_ppa_list, dma_meta_list;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200679 int min = pblk->min_write_pgs;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200680 int left_ppas = lm->emeta_sec[0];
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200681 int id = line->id;
682 int rq_ppas, rq_len;
683 int cmd_op, bio_op;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200684 int i, j;
685 int ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200686
Javier Gonzáleze2cddf22017-10-13 14:46:19 +0200687 if (dir == PBLK_WRITE) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200688 bio_op = REQ_OP_WRITE;
689 cmd_op = NVM_OP_PWRITE;
Javier Gonzáleze2cddf22017-10-13 14:46:19 +0200690 } else if (dir == PBLK_READ) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200691 bio_op = REQ_OP_READ;
692 cmd_op = NVM_OP_PREAD;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200693 } else
694 return -EINVAL;
695
Javier González63e38092017-06-26 11:57:24 +0200696 meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
697 &dma_meta_list);
698 if (!meta_list)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200699 return -ENOMEM;
700
Javier González63e38092017-06-26 11:57:24 +0200701 ppa_list = meta_list + pblk_dma_meta_size;
702 dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
703
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200704next_rq:
705 memset(&rqd, 0, sizeof(struct nvm_rq));
706
707 rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
Javier Gonzáleze46f4e42018-03-30 00:05:10 +0200708 rq_len = rq_ppas * geo->csecs;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200709
Javier Gonzálezde54e702017-06-30 17:56:39 +0200710 bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
711 l_mg->emeta_alloc_type, GFP_KERNEL);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200712 if (IS_ERR(bio)) {
713 ret = PTR_ERR(bio);
714 goto free_rqd_dma;
715 }
716
717 bio->bi_iter.bi_sector = 0; /* internal bio */
718 bio_set_op_attrs(bio, bio_op, 0);
719
720 rqd.bio = bio;
Javier González63e38092017-06-26 11:57:24 +0200721 rqd.meta_list = meta_list;
722 rqd.ppa_list = ppa_list;
723 rqd.dma_meta_list = dma_meta_list;
724 rqd.dma_ppa_list = dma_ppa_list;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200725 rqd.opcode = cmd_op;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200726 rqd.nr_ppas = rq_ppas;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200727
Javier Gonzáleze2cddf22017-10-13 14:46:19 +0200728 if (dir == PBLK_WRITE) {
Javier González63e38092017-06-26 11:57:24 +0200729 struct pblk_sec_meta *meta_list = rqd.meta_list;
730
Matias Bjørlingd7b68012018-10-09 13:11:32 +0200731 rqd.is_seq = 1;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200732 for (i = 0; i < rqd.nr_ppas; ) {
733 spin_lock(&line->lock);
734 paddr = __pblk_alloc_page(pblk, line, min);
735 spin_unlock(&line->lock);
Javier González63e38092017-06-26 11:57:24 +0200736 for (j = 0; j < min; j++, i++, paddr++) {
737 meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200738 rqd.ppa_list[i] =
739 addr_to_gen_ppa(pblk, paddr, id);
Javier González63e38092017-06-26 11:57:24 +0200740 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200741 }
742 } else {
743 for (i = 0; i < rqd.nr_ppas; ) {
744 struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
Javier Gonzálezb1bcfda2018-01-05 14:16:06 +0100745 int pos = pblk_ppa_to_pos(geo, ppa);
Javier Gonzálezf9c10152017-06-26 11:57:20 +0200746
747 if (pblk_io_aligned(pblk, rq_ppas))
Matias Bjørlingd7b68012018-10-09 13:11:32 +0200748 rqd.is_seq = 1;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200749
750 while (test_bit(pos, line->blk_bitmap)) {
751 paddr += min;
752 if (pblk_boundary_paddr_checks(pblk, paddr)) {
Matias Bjørling4e495a42018-07-13 10:48:42 +0200753 pblk_err(pblk, "corrupt emeta line:%d\n",
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200754 line->id);
755 bio_put(bio);
756 ret = -EINTR;
757 goto free_rqd_dma;
758 }
759
760 ppa = addr_to_gen_ppa(pblk, paddr, id);
Javier Gonzálezb1bcfda2018-01-05 14:16:06 +0100761 pos = pblk_ppa_to_pos(geo, ppa);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200762 }
763
764 if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
Matias Bjørling4e495a42018-07-13 10:48:42 +0200765 pblk_err(pblk, "corrupt emeta line:%d\n",
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200766 line->id);
767 bio_put(bio);
768 ret = -EINTR;
769 goto free_rqd_dma;
770 }
771
772 for (j = 0; j < min; j++, i++, paddr++)
773 rqd.ppa_list[i] =
774 addr_to_gen_ppa(pblk, paddr, line->id);
775 }
776 }
777
Javier González1a94b2d2017-10-13 14:46:47 +0200778 ret = pblk_submit_io_sync(pblk, &rqd);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200779 if (ret) {
Matias Bjørling4e495a42018-07-13 10:48:42 +0200780 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200781 bio_put(bio);
782 goto free_rqd_dma;
783 }
784
Javier González588726d32017-06-26 11:57:29 +0200785 atomic_dec(&pblk->inflight_io);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200786
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200787 if (rqd.error) {
Javier Gonzáleze2cddf22017-10-13 14:46:19 +0200788 if (dir == PBLK_WRITE)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200789 pblk_log_write_err(pblk, &rqd);
790 else
791 pblk_log_read_err(pblk, &rqd);
792 }
793
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200794 emeta_buf += rq_len;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200795 left_ppas -= rq_ppas;
796 if (left_ppas)
797 goto next_rq;
798free_rqd_dma:
Javier González63e38092017-06-26 11:57:24 +0200799 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200800 return ret;
801}
802
803u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
804{
805 struct nvm_tgt_dev *dev = pblk->dev;
806 struct nvm_geo *geo = &dev->geo;
807 struct pblk_line_meta *lm = &pblk->lm;
808 int bit;
809
810 /* This usually only happens on bad lines */
811 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
812 if (bit >= lm->blk_per_line)
813 return -1;
814
Javier Gonzáleze46f4e42018-03-30 00:05:10 +0200815 return bit * geo->ws_opt;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200816}
817
818static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
819 u64 paddr, int dir)
820{
821 struct nvm_tgt_dev *dev = pblk->dev;
822 struct pblk_line_meta *lm = &pblk->lm;
823 struct bio *bio;
824 struct nvm_rq rqd;
825 __le64 *lba_list = NULL;
826 int i, ret;
827 int cmd_op, bio_op;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200828
Javier Gonzáleze2cddf22017-10-13 14:46:19 +0200829 if (dir == PBLK_WRITE) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200830 bio_op = REQ_OP_WRITE;
831 cmd_op = NVM_OP_PWRITE;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200832 lba_list = emeta_to_lbas(pblk, line->emeta->buf);
Javier González8f554592018-01-05 14:16:16 +0100833 } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200834 bio_op = REQ_OP_READ;
835 cmd_op = NVM_OP_PREAD;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200836 } else
837 return -EINVAL;
838
839 memset(&rqd, 0, sizeof(struct nvm_rq));
840
Javier González63e38092017-06-26 11:57:24 +0200841 rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
842 &rqd.dma_meta_list);
843 if (!rqd.meta_list)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200844 return -ENOMEM;
845
Javier González63e38092017-06-26 11:57:24 +0200846 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
847 rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
848
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200849 bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
850 if (IS_ERR(bio)) {
851 ret = PTR_ERR(bio);
852 goto free_ppa_list;
853 }
854
855 bio->bi_iter.bi_sector = 0; /* internal bio */
856 bio_set_op_attrs(bio, bio_op, 0);
857
858 rqd.bio = bio;
859 rqd.opcode = cmd_op;
Matias Bjørlingd7b68012018-10-09 13:11:32 +0200860 rqd.is_seq = 1;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200861 rqd.nr_ppas = lm->smeta_sec;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200862
863 for (i = 0; i < lm->smeta_sec; i++, paddr++) {
Javier González63e38092017-06-26 11:57:24 +0200864 struct pblk_sec_meta *meta_list = rqd.meta_list;
865
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200866 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
Javier González63e38092017-06-26 11:57:24 +0200867
Javier Gonzáleze2cddf22017-10-13 14:46:19 +0200868 if (dir == PBLK_WRITE) {
Javier Gonzálezf417aa02017-06-30 17:56:34 +0200869 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
Javier González63e38092017-06-26 11:57:24 +0200870
871 meta_list[i].lba = lba_list[paddr] = addr_empty;
872 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200873 }
874
875 /*
876 * This I/O is sent by the write thread when a line is replace. Since
877 * the write thread is the only one sending write and erase commands,
878 * there is no need to take the LUN semaphore.
879 */
Javier González1a94b2d2017-10-13 14:46:47 +0200880 ret = pblk_submit_io_sync(pblk, &rqd);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200881 if (ret) {
Matias Bjørling4e495a42018-07-13 10:48:42 +0200882 pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200883 bio_put(bio);
884 goto free_ppa_list;
885 }
886
Javier González588726d32017-06-26 11:57:29 +0200887 atomic_dec(&pblk->inflight_io);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200888
889 if (rqd.error) {
Hans Holmberg6cf17a22018-06-01 16:41:07 +0200890 if (dir == PBLK_WRITE) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200891 pblk_log_write_err(pblk, &rqd);
Hans Holmberg6cf17a22018-06-01 16:41:07 +0200892 ret = 1;
893 } else if (dir == PBLK_READ)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200894 pblk_log_read_err(pblk, &rqd);
895 }
896
897free_ppa_list:
Javier González63e38092017-06-26 11:57:24 +0200898 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200899
900 return ret;
901}
902
903int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
904{
905 u64 bpaddr = pblk_line_smeta_start(pblk, line);
906
Javier González8f554592018-01-05 14:16:16 +0100907 return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200908}
909
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200910int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
911 void *emeta_buf)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200912{
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200913 return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
Javier Gonzáleze2cddf22017-10-13 14:46:19 +0200914 line->emeta_ssec, PBLK_READ);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200915}
916
917static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
918 struct ppa_addr ppa)
919{
920 rqd->opcode = NVM_OP_ERASE;
921 rqd->ppa_addr = ppa;
922 rqd->nr_ppas = 1;
Matias Bjørlingd7b68012018-10-09 13:11:32 +0200923 rqd->is_seq = 1;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200924 rqd->bio = NULL;
925}
926
927static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
928{
Matias Bjørling4b5d56e2018-10-09 13:11:33 +0200929 struct nvm_rq rqd = {NULL};
930 int ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200931
Hans Holmberg4209c312018-10-09 13:11:55 +0200932 trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
933 PBLK_CHUNK_RESET_START);
934
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200935 pblk_setup_e_rq(pblk, &rqd, ppa);
936
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200937 /* The write thread schedules erases so that it minimizes disturbances
938 * with writes. Thus, there is no need to take the LUN semaphore.
939 */
Javier González1a94b2d2017-10-13 14:46:47 +0200940 ret = pblk_submit_io_sync(pblk, &rqd);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200941 rqd.private = pblk;
942 __pblk_end_io_erase(pblk, &rqd);
943
Javier González588726d32017-06-26 11:57:29 +0200944 return ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200945}
946
947int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
948{
949 struct pblk_line_meta *lm = &pblk->lm;
950 struct ppa_addr ppa;
Javier González588726d32017-06-26 11:57:29 +0200951 int ret, bit = -1;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200952
Javier Gonzáleza44f53f2017-04-22 01:32:49 +0200953 /* Erase only good blocks, one at a time */
954 do {
955 spin_lock(&line->lock);
956 bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
957 bit + 1);
958 if (bit >= lm->blk_per_line) {
959 spin_unlock(&line->lock);
960 break;
961 }
962
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200963 ppa = pblk->luns[bit].bppa; /* set ch and lun */
Javier González69471512018-03-30 00:05:15 +0200964 ppa.a.blk = line->id;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200965
Javier Gonzáleza44f53f2017-04-22 01:32:49 +0200966 atomic_dec(&line->left_eblks);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200967 WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
Javier Gonzáleza44f53f2017-04-22 01:32:49 +0200968 spin_unlock(&line->lock);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200969
Javier González588726d32017-06-26 11:57:29 +0200970 ret = pblk_blk_erase_sync(pblk, ppa);
971 if (ret) {
Matias Bjørling4e495a42018-07-13 10:48:42 +0200972 pblk_err(pblk, "failed to erase line %d\n", line->id);
Javier González588726d32017-06-26 11:57:29 +0200973 return ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200974 }
Javier Gonzáleza44f53f2017-04-22 01:32:49 +0200975 } while (1);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200976
977 return 0;
978}
979
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200980static void pblk_line_setup_metadata(struct pblk_line *line,
981 struct pblk_line_mgmt *l_mg,
982 struct pblk_line_meta *lm)
983{
984 int meta_line;
985
Javier González588726d32017-06-26 11:57:29 +0200986 lockdep_assert_held(&l_mg->free_lock);
987
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200988retry_meta:
989 meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
990 if (meta_line == PBLK_DATA_LINES) {
991 spin_unlock(&l_mg->free_lock);
992 io_schedule();
993 spin_lock(&l_mg->free_lock);
994 goto retry_meta;
995 }
996
997 set_bit(meta_line, &l_mg->meta_bitmap);
998 line->meta_line = meta_line;
999
1000 line->smeta = l_mg->sline_meta[meta_line];
1001 line->emeta = l_mg->eline_meta[meta_line];
1002
1003 memset(line->smeta, 0, lm->smeta_len);
1004 memset(line->emeta->buf, 0, lm->emeta_len[0]);
1005
1006 line->emeta->mem = 0;
1007 atomic_set(&line->emeta->sync, 0);
1008}
1009
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001010/* For now lines are always assumed full lines. Thus, smeta former and current
1011 * lun bitmaps are omitted.
1012 */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001013static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001014 struct pblk_line *cur)
1015{
1016 struct nvm_tgt_dev *dev = pblk->dev;
1017 struct nvm_geo *geo = &dev->geo;
1018 struct pblk_line_meta *lm = &pblk->lm;
1019 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001020 struct pblk_emeta *emeta = line->emeta;
1021 struct line_emeta *emeta_buf = emeta->buf;
1022 struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001023 int nr_blk_line;
1024
1025 /* After erasing the line, new bad blocks might appear and we risk
1026 * having an invalid line
1027 */
1028 nr_blk_line = lm->blk_per_line -
1029 bitmap_weight(line->blk_bitmap, lm->blk_per_line);
1030 if (nr_blk_line < lm->min_blk_line) {
1031 spin_lock(&l_mg->free_lock);
1032 spin_lock(&line->lock);
1033 line->state = PBLK_LINESTATE_BAD;
Hans Holmbergf2937232018-10-09 13:11:53 +02001034 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1035 line->state);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001036 spin_unlock(&line->lock);
1037
1038 list_add_tail(&line->list, &l_mg->bad_list);
1039 spin_unlock(&l_mg->free_lock);
1040
Matias Bjørling4e495a42018-07-13 10:48:42 +02001041 pblk_debug(pblk, "line %d is bad\n", line->id);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001042
1043 return 0;
1044 }
1045
1046 /* Run-time metadata */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001047 line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001048
1049 /* Mark LUNs allocated in this line (all for now) */
1050 bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
1051
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001052 smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1053 memcpy(smeta_buf->header.uuid, pblk->instance_uuid, 16);
1054 smeta_buf->header.id = cpu_to_le32(line->id);
1055 smeta_buf->header.type = cpu_to_le16(line->type);
Hans Holmbergd0ab0b12018-03-30 00:04:51 +02001056 smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
1057 smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001058
1059 /* Start metadata */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001060 smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
Matias Bjørlingfae7fae2018-01-05 14:16:03 +01001061 smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001062
1063 /* Fill metadata among lines */
1064 if (cur) {
1065 memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001066 smeta_buf->prev_id = cpu_to_le32(cur->id);
1067 cur->emeta->buf->next_id = cpu_to_le32(line->id);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001068 } else {
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001069 smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001070 }
1071
1072 /* All smeta must be set at this point */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001073 smeta_buf->header.crc = cpu_to_le32(
1074 pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1075 smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001076
1077 /* End metadata */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001078 memcpy(&emeta_buf->header, &smeta_buf->header,
1079 sizeof(struct line_header));
Hans Holmbergd0ab0b12018-03-30 00:04:51 +02001080
1081 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1082 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1083 emeta_buf->header.crc = cpu_to_le32(
1084 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1085
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001086 emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1087 emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1088 emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1089 emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1090 emeta_buf->crc = cpu_to_le32(0);
1091 emeta_buf->prev_id = smeta_buf->prev_id;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001092
1093 return 1;
1094}
1095
Javier González9cfd5a92018-06-01 16:41:14 +02001096static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1097{
1098 struct pblk_line_meta *lm = &pblk->lm;
Hans Holmberg53d82db2018-10-09 13:11:47 +02001099 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Javier González9cfd5a92018-06-01 16:41:14 +02001100
Hans Holmberg53d82db2018-10-09 13:11:47 +02001101 line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
Javier González9cfd5a92018-06-01 16:41:14 +02001102 if (!line->map_bitmap)
1103 return -ENOMEM;
1104
Hans Holmberg53d82db2018-10-09 13:11:47 +02001105 memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1106
Javier González9cfd5a92018-06-01 16:41:14 +02001107 /* will be initialized using bb info from map_bitmap */
Hans Holmberg53d82db2018-10-09 13:11:47 +02001108 line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
Javier González9cfd5a92018-06-01 16:41:14 +02001109 if (!line->invalid_bitmap) {
Hans Holmberg53d82db2018-10-09 13:11:47 +02001110 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
Javier González9cfd5a92018-06-01 16:41:14 +02001111 line->map_bitmap = NULL;
1112 return -ENOMEM;
1113 }
1114
1115 return 0;
1116}
1117
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001118/* For now lines are always assumed full lines. Thus, smeta former and current
1119 * lun bitmaps are omitted.
1120 */
1121static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1122 int init)
1123{
1124 struct nvm_tgt_dev *dev = pblk->dev;
1125 struct nvm_geo *geo = &dev->geo;
1126 struct pblk_line_meta *lm = &pblk->lm;
1127 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001128 u64 off;
1129 int bit = -1;
Hans Holmbergcfe1c9e2018-03-30 00:04:50 +02001130 int emeta_secs;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001131
1132 line->sec_in_line = lm->sec_per_line;
1133
1134 /* Capture bad block information on line mapping bitmaps */
1135 while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1136 bit + 1)) < lm->blk_per_line) {
Javier Gonzáleze46f4e42018-03-30 00:05:10 +02001137 off = bit * geo->ws_opt;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001138 bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1139 lm->sec_per_line);
1140 bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1141 lm->sec_per_line);
Javier Gonzáleze46f4e42018-03-30 00:05:10 +02001142 line->sec_in_line -= geo->clba;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001143 }
1144
1145 /* Mark smeta metadata sectors as bad sectors */
1146 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
Javier Gonzáleze46f4e42018-03-30 00:05:10 +02001147 off = bit * geo->ws_opt;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001148 bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1149 line->sec_in_line -= lm->smeta_sec;
1150 line->smeta_ssec = off;
1151 line->cur_sec = off + lm->smeta_sec;
1152
Javier Gonzáleze2cddf22017-10-13 14:46:19 +02001153 if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
Matias Bjørling4e495a42018-07-13 10:48:42 +02001154 pblk_debug(pblk, "line smeta I/O failed. Retry\n");
Hans Holmberg6cf17a22018-06-01 16:41:07 +02001155 return 0;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001156 }
1157
1158 bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1159
1160 /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1161 * blocks to make sure that there are enough sectors to store emeta
1162 */
Hans Holmbergcfe1c9e2018-03-30 00:04:50 +02001163 emeta_secs = lm->emeta_sec[0];
1164 off = lm->sec_per_line;
1165 while (emeta_secs) {
Javier Gonzáleze46f4e42018-03-30 00:05:10 +02001166 off -= geo->ws_opt;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001167 if (!test_bit(off, line->invalid_bitmap)) {
Javier Gonzáleze46f4e42018-03-30 00:05:10 +02001168 bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1169 emeta_secs -= geo->ws_opt;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001170 }
1171 }
1172
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001173 line->emeta_ssec = off;
Hans Holmbergcfe1c9e2018-03-30 00:04:50 +02001174 line->sec_in_line -= lm->emeta_sec[0];
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001175 line->nr_valid_lbas = 0;
Javier González0880a9a2017-06-26 11:57:19 +02001176 line->left_msecs = line->sec_in_line;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001177 *line->vsc = cpu_to_le32(line->sec_in_line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001178
1179 if (lm->sec_per_line - line->sec_in_line !=
1180 bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1181 spin_lock(&line->lock);
1182 line->state = PBLK_LINESTATE_BAD;
Hans Holmbergf2937232018-10-09 13:11:53 +02001183 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1184 line->state);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001185 spin_unlock(&line->lock);
1186
1187 list_add_tail(&line->list, &l_mg->bad_list);
Matias Bjørling4e495a42018-07-13 10:48:42 +02001188 pblk_err(pblk, "unexpected line %d is bad\n", line->id);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001189
1190 return 0;
1191 }
1192
1193 return 1;
1194}
1195
Javier González32ef9412018-03-30 00:05:20 +02001196static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1197{
1198 struct pblk_line_meta *lm = &pblk->lm;
1199 struct nvm_tgt_dev *dev = pblk->dev;
1200 struct nvm_geo *geo = &dev->geo;
1201 int blk_to_erase = atomic_read(&line->blk_in_line);
1202 int i;
1203
1204 for (i = 0; i < lm->blk_per_line; i++) {
1205 struct pblk_lun *rlun = &pblk->luns[i];
1206 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1207 int state = line->chks[pos].state;
1208
1209 /* Free chunks should not be erased */
1210 if (state & NVM_CHK_ST_FREE) {
1211 set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1212 line->erase_bitmap);
1213 blk_to_erase--;
1214 }
1215 }
1216
1217 return blk_to_erase;
1218}
1219
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001220static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1221{
1222 struct pblk_line_meta *lm = &pblk->lm;
Javier González1d8b33e2018-06-01 15:04:16 +02001223 int blk_in_line = atomic_read(&line->blk_in_line);
Javier González9cfd5a92018-06-01 16:41:14 +02001224 int blk_to_erase;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001225
Javier González32ef9412018-03-30 00:05:20 +02001226 /* Bad blocks do not need to be erased */
1227 bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1228
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001229 spin_lock(&line->lock);
Javier González32ef9412018-03-30 00:05:20 +02001230
1231 /* If we have not written to this line, we need to mark up free chunks
1232 * as already erased
1233 */
1234 if (line->state == PBLK_LINESTATE_NEW) {
1235 blk_to_erase = pblk_prepare_new_line(pblk, line);
1236 line->state = PBLK_LINESTATE_FREE;
Hans Holmbergf2937232018-10-09 13:11:53 +02001237 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1238 line->state);
Javier González32ef9412018-03-30 00:05:20 +02001239 } else {
Javier González1d8b33e2018-06-01 15:04:16 +02001240 blk_to_erase = blk_in_line;
1241 }
1242
1243 if (blk_in_line < lm->min_blk_line) {
Javier González9cfd5a92018-06-01 16:41:14 +02001244 spin_unlock(&line->lock);
1245 return -EAGAIN;
Javier González32ef9412018-03-30 00:05:20 +02001246 }
1247
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001248 if (line->state != PBLK_LINESTATE_FREE) {
Javier González588726d32017-06-26 11:57:29 +02001249 WARN(1, "pblk: corrupted line %d, state %d\n",
1250 line->id, line->state);
Javier González9cfd5a92018-06-01 16:41:14 +02001251 spin_unlock(&line->lock);
1252 return -EINTR;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001253 }
Javier González588726d32017-06-26 11:57:29 +02001254
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001255 line->state = PBLK_LINESTATE_OPEN;
Hans Holmbergf2937232018-10-09 13:11:53 +02001256 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1257 line->state);
Javier Gonzáleza44f53f2017-04-22 01:32:49 +02001258
Javier González32ef9412018-03-30 00:05:20 +02001259 atomic_set(&line->left_eblks, blk_to_erase);
1260 atomic_set(&line->left_seblks, blk_to_erase);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001261
1262 line->meta_distance = lm->meta_distance;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001263 spin_unlock(&line->lock);
1264
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001265 kref_init(&line->ref);
1266
1267 return 0;
1268}
1269
1270int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1271{
1272 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1273 int ret;
1274
1275 spin_lock(&l_mg->free_lock);
1276 l_mg->data_line = line;
1277 list_del(&line->list);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001278
1279 ret = pblk_line_prepare(pblk, line);
1280 if (ret) {
1281 list_add(&line->list, &l_mg->free_list);
Javier González3dc001f2017-04-22 01:32:45 +02001282 spin_unlock(&l_mg->free_lock);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001283 return ret;
1284 }
Javier González3dc001f2017-04-22 01:32:45 +02001285 spin_unlock(&l_mg->free_lock);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001286
Javier González9cfd5a92018-06-01 16:41:14 +02001287 ret = pblk_line_alloc_bitmaps(pblk, line);
1288 if (ret)
1289 return ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001290
1291 if (!pblk_line_init_bb(pblk, line, 0)) {
1292 list_add(&line->list, &l_mg->free_list);
1293 return -EINTR;
1294 }
1295
Javier González9cfd5a92018-06-01 16:41:14 +02001296 pblk_rl_free_lines_dec(&pblk->rl, line, true);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001297 return 0;
1298}
1299
1300void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1301{
Hans Holmberg53d82db2018-10-09 13:11:47 +02001302 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1303
1304 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001305 line->map_bitmap = NULL;
1306 line->smeta = NULL;
1307 line->emeta = NULL;
1308}
1309
Javier González9cfd5a92018-06-01 16:41:14 +02001310static void pblk_line_reinit(struct pblk_line *line)
1311{
1312 *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1313
1314 line->map_bitmap = NULL;
1315 line->invalid_bitmap = NULL;
1316 line->smeta = NULL;
1317 line->emeta = NULL;
1318}
1319
1320void pblk_line_free(struct pblk_line *line)
1321{
Hans Holmberg53d82db2018-10-09 13:11:47 +02001322 struct pblk *pblk = line->pblk;
1323 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1324
1325 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1326 mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
Javier González9cfd5a92018-06-01 16:41:14 +02001327
1328 pblk_line_reinit(line);
1329}
1330
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001331struct pblk_line *pblk_line_get(struct pblk *pblk)
1332{
1333 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1334 struct pblk_line_meta *lm = &pblk->lm;
Javier González588726d32017-06-26 11:57:29 +02001335 struct pblk_line *line;
1336 int ret, bit;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001337
1338 lockdep_assert_held(&l_mg->free_lock);
1339
Javier González588726d32017-06-26 11:57:29 +02001340retry:
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001341 if (list_empty(&l_mg->free_list)) {
Matias Bjørling4e495a42018-07-13 10:48:42 +02001342 pblk_err(pblk, "no free lines\n");
Javier González588726d32017-06-26 11:57:29 +02001343 return NULL;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001344 }
1345
1346 line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1347 list_del(&line->list);
1348 l_mg->nr_free_lines--;
1349
1350 bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1351 if (unlikely(bit >= lm->blk_per_line)) {
1352 spin_lock(&line->lock);
1353 line->state = PBLK_LINESTATE_BAD;
Hans Holmbergf2937232018-10-09 13:11:53 +02001354 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1355 line->state);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001356 spin_unlock(&line->lock);
1357
1358 list_add_tail(&line->list, &l_mg->bad_list);
1359
Matias Bjørling4e495a42018-07-13 10:48:42 +02001360 pblk_debug(pblk, "line %d is bad\n", line->id);
Javier González588726d32017-06-26 11:57:29 +02001361 goto retry;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001362 }
1363
Javier González588726d32017-06-26 11:57:29 +02001364 ret = pblk_line_prepare(pblk, line);
1365 if (ret) {
Javier González1d8b33e2018-06-01 15:04:16 +02001366 switch (ret) {
1367 case -EAGAIN:
1368 list_add(&line->list, &l_mg->bad_list);
1369 goto retry;
1370 case -EINTR:
Javier González588726d32017-06-26 11:57:29 +02001371 list_add(&line->list, &l_mg->corrupt_list);
1372 goto retry;
Javier González1d8b33e2018-06-01 15:04:16 +02001373 default:
Matias Bjørling4e495a42018-07-13 10:48:42 +02001374 pblk_err(pblk, "failed to prepare line %d\n", line->id);
Javier González588726d32017-06-26 11:57:29 +02001375 list_add(&line->list, &l_mg->free_list);
1376 l_mg->nr_free_lines++;
1377 return NULL;
1378 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001379 }
1380
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001381 return line;
1382}
1383
1384static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1385 struct pblk_line *line)
1386{
1387 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1388 struct pblk_line *retry_line;
1389
Javier González588726d32017-06-26 11:57:29 +02001390retry:
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001391 spin_lock(&l_mg->free_lock);
1392 retry_line = pblk_line_get(pblk);
1393 if (!retry_line) {
Javier Gonzálezbe388d92017-04-22 01:32:48 +02001394 l_mg->data_line = NULL;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001395 spin_unlock(&l_mg->free_lock);
1396 return NULL;
1397 }
1398
Javier González9cfd5a92018-06-01 16:41:14 +02001399 retry_line->map_bitmap = line->map_bitmap;
1400 retry_line->invalid_bitmap = line->invalid_bitmap;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001401 retry_line->smeta = line->smeta;
1402 retry_line->emeta = line->emeta;
1403 retry_line->meta_line = line->meta_line;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001404
Javier González9cfd5a92018-06-01 16:41:14 +02001405 pblk_line_reinit(line);
1406
Javier González3dc001f2017-04-22 01:32:45 +02001407 l_mg->data_line = retry_line;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001408 spin_unlock(&l_mg->free_lock);
1409
Javier Gonzáleza7689932018-01-05 14:16:13 +01001410 pblk_rl_free_lines_dec(&pblk->rl, line, false);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001411
Javier González588726d32017-06-26 11:57:29 +02001412 if (pblk_line_erase(pblk, retry_line))
1413 goto retry;
1414
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001415 return retry_line;
1416}
1417
Javier González588726d32017-06-26 11:57:29 +02001418static void pblk_set_space_limit(struct pblk *pblk)
1419{
1420 struct pblk_rl *rl = &pblk->rl;
1421
1422 atomic_set(&rl->rb_space, 0);
1423}
1424
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001425struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1426{
1427 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1428 struct pblk_line *line;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001429
1430 spin_lock(&l_mg->free_lock);
1431 line = pblk_line_get(pblk);
1432 if (!line) {
1433 spin_unlock(&l_mg->free_lock);
1434 return NULL;
1435 }
1436
1437 line->seq_nr = l_mg->d_seq_nr++;
1438 line->type = PBLK_LINETYPE_DATA;
1439 l_mg->data_line = line;
1440
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001441 pblk_line_setup_metadata(line, l_mg, &pblk->lm);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001442
1443 /* Allocate next line for preparation */
1444 l_mg->data_next = pblk_line_get(pblk);
Javier González588726d32017-06-26 11:57:29 +02001445 if (!l_mg->data_next) {
1446 /* If we cannot get a new line, we need to stop the pipeline.
1447 * Only allow as many writes in as we can store safely and then
1448 * fail gracefully
1449 */
1450 pblk_set_space_limit(pblk);
1451
1452 l_mg->data_next = NULL;
1453 } else {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001454 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1455 l_mg->data_next->type = PBLK_LINETYPE_DATA;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001456 }
1457 spin_unlock(&l_mg->free_lock);
1458
Javier González9cfd5a92018-06-01 16:41:14 +02001459 if (pblk_line_alloc_bitmaps(pblk, line))
1460 return NULL;
1461
Javier González588726d32017-06-26 11:57:29 +02001462 if (pblk_line_erase(pblk, line)) {
1463 line = pblk_line_retry(pblk, line);
1464 if (!line)
1465 return NULL;
1466 }
1467
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001468retry_setup:
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001469 if (!pblk_line_init_metadata(pblk, line, NULL)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001470 line = pblk_line_retry(pblk, line);
1471 if (!line)
1472 return NULL;
1473
1474 goto retry_setup;
1475 }
1476
1477 if (!pblk_line_init_bb(pblk, line, 1)) {
1478 line = pblk_line_retry(pblk, line);
1479 if (!line)
1480 return NULL;
1481
1482 goto retry_setup;
1483 }
1484
Javier Gonzáleza7689932018-01-05 14:16:13 +01001485 pblk_rl_free_lines_dec(&pblk->rl, line, true);
1486
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001487 return line;
1488}
1489
Matias Bjørlingae14cc02018-10-09 13:11:40 +02001490void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1491{
1492 struct pblk_line *line;
1493
Javier Gonzálezcb216652018-10-09 13:11:42 +02001494 line = pblk_ppa_to_line(pblk, ppa);
Matias Bjørlingae14cc02018-10-09 13:11:40 +02001495 kref_put(&line->ref, pblk_line_put_wq);
1496}
1497
1498void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1499{
1500 struct ppa_addr *ppa_list;
1501 int i;
1502
1503 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
1504
1505 for (i = 0; i < rqd->nr_ppas; i++)
1506 pblk_ppa_to_line_put(pblk, ppa_list[i]);
1507}
1508
Javier González588726d32017-06-26 11:57:29 +02001509static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1510{
1511 lockdep_assert_held(&pblk->l_mg.free_lock);
1512
1513 pblk_set_space_limit(pblk);
1514 pblk->state = PBLK_STATE_STOPPING;
Hans Holmberg1b0dd0b2018-10-09 13:11:54 +02001515 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
Javier González588726d32017-06-26 11:57:29 +02001516}
1517
Javier González8bd40022017-10-13 14:46:44 +02001518static void pblk_line_close_meta_sync(struct pblk *pblk)
1519{
1520 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1521 struct pblk_line_meta *lm = &pblk->lm;
1522 struct pblk_line *line, *tline;
1523 LIST_HEAD(list);
1524
1525 spin_lock(&l_mg->close_lock);
1526 if (list_empty(&l_mg->emeta_list)) {
1527 spin_unlock(&l_mg->close_lock);
1528 return;
1529 }
1530
1531 list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1532 spin_unlock(&l_mg->close_lock);
1533
1534 list_for_each_entry_safe(line, tline, &list, list) {
1535 struct pblk_emeta *emeta = line->emeta;
1536
1537 while (emeta->mem < lm->emeta_len[0]) {
1538 int ret;
1539
1540 ret = pblk_submit_meta_io(pblk, line);
1541 if (ret) {
Matias Bjørling4e495a42018-07-13 10:48:42 +02001542 pblk_err(pblk, "sync meta line %d failed (%d)\n",
Javier González8bd40022017-10-13 14:46:44 +02001543 line->id, ret);
1544 return;
1545 }
1546 }
1547 }
1548
1549 pblk_wait_for_meta(pblk);
1550 flush_workqueue(pblk->close_wq);
1551}
1552
Javier Gonzáleza7c9e912018-06-01 15:04:24 +02001553void __pblk_pipeline_flush(struct pblk *pblk)
Javier González588726d32017-06-26 11:57:29 +02001554{
1555 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1556 int ret;
1557
1558 spin_lock(&l_mg->free_lock);
1559 if (pblk->state == PBLK_STATE_RECOVERING ||
1560 pblk->state == PBLK_STATE_STOPPED) {
1561 spin_unlock(&l_mg->free_lock);
1562 return;
1563 }
1564 pblk->state = PBLK_STATE_RECOVERING;
Hans Holmberg1b0dd0b2018-10-09 13:11:54 +02001565 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
Javier González588726d32017-06-26 11:57:29 +02001566 spin_unlock(&l_mg->free_lock);
1567
1568 pblk_flush_writer(pblk);
1569 pblk_wait_for_meta(pblk);
1570
1571 ret = pblk_recov_pad(pblk);
1572 if (ret) {
Matias Bjørling4e495a42018-07-13 10:48:42 +02001573 pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
Javier González588726d32017-06-26 11:57:29 +02001574 return;
1575 }
1576
Javier Gonzálezee8d5c12017-06-30 17:56:40 +02001577 flush_workqueue(pblk->bb_wq);
Javier González588726d32017-06-26 11:57:29 +02001578 pblk_line_close_meta_sync(pblk);
Javier Gonzáleza7c9e912018-06-01 15:04:24 +02001579}
1580
1581void __pblk_pipeline_stop(struct pblk *pblk)
1582{
1583 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Javier González588726d32017-06-26 11:57:29 +02001584
1585 spin_lock(&l_mg->free_lock);
1586 pblk->state = PBLK_STATE_STOPPED;
Hans Holmberg1b0dd0b2018-10-09 13:11:54 +02001587 trace_pblk_state(pblk_disk_name(pblk), pblk->state);
Javier González588726d32017-06-26 11:57:29 +02001588 l_mg->data_line = NULL;
1589 l_mg->data_next = NULL;
1590 spin_unlock(&l_mg->free_lock);
1591}
1592
Javier Gonzáleza7c9e912018-06-01 15:04:24 +02001593void pblk_pipeline_stop(struct pblk *pblk)
1594{
1595 __pblk_pipeline_flush(pblk);
1596 __pblk_pipeline_stop(pblk);
1597}
1598
Javier González21d22872017-10-13 14:46:26 +02001599struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001600{
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001601 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Javier González21d22872017-10-13 14:46:26 +02001602 struct pblk_line *cur, *new = NULL;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001603 unsigned int left_seblks;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001604
1605 cur = l_mg->data_line;
1606 new = l_mg->data_next;
1607 if (!new)
Javier González21d22872017-10-13 14:46:26 +02001608 goto out;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001609 l_mg->data_line = new;
1610
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001611 spin_lock(&l_mg->free_lock);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001612 pblk_line_setup_metadata(new, l_mg, &pblk->lm);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001613 spin_unlock(&l_mg->free_lock);
1614
Javier González588726d32017-06-26 11:57:29 +02001615retry_erase:
1616 left_seblks = atomic_read(&new->left_seblks);
1617 if (left_seblks) {
1618 /* If line is not fully erased, erase it */
1619 if (atomic_read(&new->left_eblks)) {
1620 if (pblk_line_erase(pblk, new))
Javier González21d22872017-10-13 14:46:26 +02001621 goto out;
Javier González588726d32017-06-26 11:57:29 +02001622 } else {
1623 io_schedule();
1624 }
1625 goto retry_erase;
1626 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001627
Javier González9cfd5a92018-06-01 16:41:14 +02001628 if (pblk_line_alloc_bitmaps(pblk, new))
1629 return NULL;
1630
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001631retry_setup:
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001632 if (!pblk_line_init_metadata(pblk, new, cur)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001633 new = pblk_line_retry(pblk, new);
Javier Gonzálezf3236cef2017-04-22 01:32:46 +02001634 if (!new)
Javier González21d22872017-10-13 14:46:26 +02001635 goto out;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001636
1637 goto retry_setup;
1638 }
1639
1640 if (!pblk_line_init_bb(pblk, new, 1)) {
1641 new = pblk_line_retry(pblk, new);
1642 if (!new)
Javier González21d22872017-10-13 14:46:26 +02001643 goto out;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001644
1645 goto retry_setup;
1646 }
1647
Javier Gonzáleza7689932018-01-05 14:16:13 +01001648 pblk_rl_free_lines_dec(&pblk->rl, new, true);
1649
Javier González588726d32017-06-26 11:57:29 +02001650 /* Allocate next line for preparation */
1651 spin_lock(&l_mg->free_lock);
1652 l_mg->data_next = pblk_line_get(pblk);
1653 if (!l_mg->data_next) {
1654 /* If we cannot get a new line, we need to stop the pipeline.
1655 * Only allow as many writes in as we can store safely and then
1656 * fail gracefully
1657 */
1658 pblk_stop_writes(pblk, new);
1659 l_mg->data_next = NULL;
1660 } else {
1661 l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1662 l_mg->data_next->type = PBLK_LINETYPE_DATA;
Javier González588726d32017-06-26 11:57:29 +02001663 }
1664 spin_unlock(&l_mg->free_lock);
1665
Javier González21d22872017-10-13 14:46:26 +02001666out:
1667 return new;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001668}
1669
Javier González7bd4d372017-10-13 14:46:23 +02001670static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001671{
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001672 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
Hans Holmbergd6b992f2017-10-13 14:46:41 +02001673 struct pblk_gc *gc = &pblk->gc;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001674
1675 spin_lock(&line->lock);
1676 WARN_ON(line->state != PBLK_LINESTATE_GC);
1677 line->state = PBLK_LINESTATE_FREE;
Hans Holmbergf2937232018-10-09 13:11:53 +02001678 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1679 line->state);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001680 line->gc_group = PBLK_LINEGC_NONE;
Javier González8e55c072018-06-01 15:04:22 +02001681 pblk_line_free(line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001682
Hans Holmberg48b8d202018-06-01 16:41:06 +02001683 if (line->w_err_gc->has_write_err) {
1684 pblk_rl_werr_line_out(&pblk->rl);
1685 line->w_err_gc->has_write_err = 0;
1686 }
1687
1688 spin_unlock(&line->lock);
Hans Holmbergd6b992f2017-10-13 14:46:41 +02001689 atomic_dec(&gc->pipeline_gc);
1690
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001691 spin_lock(&l_mg->free_lock);
1692 list_add_tail(&line->list, &l_mg->free_list);
1693 l_mg->nr_free_lines++;
1694 spin_unlock(&l_mg->free_lock);
1695
1696 pblk_rl_free_lines_inc(&pblk->rl, line);
1697}
1698
Javier González7bd4d372017-10-13 14:46:23 +02001699static void pblk_line_put_ws(struct work_struct *work)
1700{
1701 struct pblk_line_ws *line_put_ws = container_of(work,
1702 struct pblk_line_ws, ws);
1703 struct pblk *pblk = line_put_ws->pblk;
1704 struct pblk_line *line = line_put_ws->line;
1705
1706 __pblk_line_put(pblk, line);
Kent Overstreetb906bbb2018-05-20 18:25:50 -04001707 mempool_free(line_put_ws, &pblk->gen_ws_pool);
Javier González7bd4d372017-10-13 14:46:23 +02001708}
1709
1710void pblk_line_put(struct kref *ref)
1711{
1712 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1713 struct pblk *pblk = line->pblk;
1714
1715 __pblk_line_put(pblk, line);
1716}
1717
1718void pblk_line_put_wq(struct kref *ref)
1719{
1720 struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1721 struct pblk *pblk = line->pblk;
1722 struct pblk_line_ws *line_put_ws;
1723
Kent Overstreetb906bbb2018-05-20 18:25:50 -04001724 line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
Javier González7bd4d372017-10-13 14:46:23 +02001725 if (!line_put_ws)
1726 return;
1727
1728 line_put_ws->pblk = pblk;
1729 line_put_ws->line = line;
1730 line_put_ws->priv = NULL;
1731
1732 INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1733 queue_work(pblk->r_end_wq, &line_put_ws->ws);
1734}
1735
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001736int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1737{
1738 struct nvm_rq *rqd;
1739 int err;
1740
Javier González67bf26a2017-10-13 14:46:20 +02001741 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001742
1743 pblk_setup_e_rq(pblk, rqd, ppa);
1744
1745 rqd->end_io = pblk_end_io_erase;
1746 rqd->private = pblk;
1747
Hans Holmberg4209c312018-10-09 13:11:55 +02001748 trace_pblk_chunk_reset(pblk_disk_name(pblk),
1749 &ppa, PBLK_CHUNK_RESET_START);
1750
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001751 /* The write thread schedules erases so that it minimizes disturbances
1752 * with writes. Thus, there is no need to take the LUN semaphore.
1753 */
1754 err = pblk_submit_io(pblk, rqd);
1755 if (err) {
1756 struct nvm_tgt_dev *dev = pblk->dev;
1757 struct nvm_geo *geo = &dev->geo;
1758
Matias Bjørling4e495a42018-07-13 10:48:42 +02001759 pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
Javier Gonzálezcb216652018-10-09 13:11:42 +02001760 pblk_ppa_to_line_id(ppa),
Javier Gonzálezb1bcfda2018-01-05 14:16:06 +01001761 pblk_ppa_to_pos(geo, ppa));
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001762 }
1763
1764 return err;
1765}
1766
1767struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1768{
1769 return pblk->l_mg.data_line;
1770}
1771
Javier Gonzálezd624f372017-06-26 11:57:15 +02001772/* For now, always erase next line */
1773struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001774{
1775 return pblk->l_mg.data_next;
1776}
1777
1778int pblk_line_is_full(struct pblk_line *line)
1779{
1780 return (line->left_msecs == 0);
1781}
1782
Javier González588726d32017-06-26 11:57:29 +02001783static void pblk_line_should_sync_meta(struct pblk *pblk)
1784{
1785 if (pblk_rl_is_limit(&pblk->rl))
1786 pblk_line_close_meta_sync(pblk);
1787}
1788
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001789void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1790{
Javier González32ef9412018-03-30 00:05:20 +02001791 struct nvm_tgt_dev *dev = pblk->dev;
1792 struct nvm_geo *geo = &dev->geo;
1793 struct pblk_line_meta *lm = &pblk->lm;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001794 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1795 struct list_head *move_list;
Javier González32ef9412018-03-30 00:05:20 +02001796 int i;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001797
Matias Bjørling880eda52018-07-13 10:48:37 +02001798#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001799 WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001800 "pblk: corrupt closed line %d\n", line->id);
Javier Gonzáleza84ebb82017-06-30 17:56:43 +02001801#endif
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001802
1803 spin_lock(&l_mg->free_lock);
1804 WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1805 spin_unlock(&l_mg->free_lock);
1806
1807 spin_lock(&l_mg->gc_lock);
1808 spin_lock(&line->lock);
1809 WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1810 line->state = PBLK_LINESTATE_CLOSED;
1811 move_list = pblk_line_gc_list(pblk, line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001812 list_add_tail(&line->list, move_list);
1813
Hans Holmberg53d82db2018-10-09 13:11:47 +02001814 mempool_free(line->map_bitmap, l_mg->bitmap_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001815 line->map_bitmap = NULL;
1816 line->smeta = NULL;
1817 line->emeta = NULL;
1818
Javier González32ef9412018-03-30 00:05:20 +02001819 for (i = 0; i < lm->blk_per_line; i++) {
1820 struct pblk_lun *rlun = &pblk->luns[i];
1821 int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1822 int state = line->chks[pos].state;
1823
1824 if (!(state & NVM_CHK_ST_OFFLINE))
1825 state = NVM_CHK_ST_CLOSED;
1826 }
1827
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001828 spin_unlock(&line->lock);
1829 spin_unlock(&l_mg->gc_lock);
Hans Holmbergf2937232018-10-09 13:11:53 +02001830
1831 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1832 line->state);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001833}
1834
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001835void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1836{
1837 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1838 struct pblk_line_meta *lm = &pblk->lm;
1839 struct pblk_emeta *emeta = line->emeta;
1840 struct line_emeta *emeta_buf = emeta->buf;
Hans Holmberg76758392018-03-30 00:04:52 +02001841 struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001842
Javier González588726d32017-06-26 11:57:29 +02001843 /* No need for exact vsc value; avoid a big line lock and take aprox. */
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001844 memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1845 memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1846
Hans Holmberg76758392018-03-30 00:04:52 +02001847 wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1848 wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1849 wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1850
Javier González9cc85bc2018-10-09 13:11:45 +02001851 if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1852 emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1853 memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
1854 emeta_buf->header.id = cpu_to_le32(line->id);
1855 emeta_buf->header.type = cpu_to_le16(line->type);
1856 emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1857 emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1858 emeta_buf->header.crc = cpu_to_le32(
1859 pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1860 }
1861
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001862 emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1863 emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1864
1865 spin_lock(&l_mg->close_lock);
1866 spin_lock(&line->lock);
Hans Holmberg48b8d202018-06-01 16:41:06 +02001867
1868 /* Update the in-memory start address for emeta, in case it has
1869 * shifted due to write errors
1870 */
1871 if (line->emeta_ssec != line->cur_sec)
1872 line->emeta_ssec = line->cur_sec;
1873
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001874 list_add_tail(&line->list, &l_mg->emeta_list);
1875 spin_unlock(&line->lock);
1876 spin_unlock(&l_mg->close_lock);
Javier González588726d32017-06-26 11:57:29 +02001877
1878 pblk_line_should_sync_meta(pblk);
Hans Holmberg48b8d202018-06-01 16:41:06 +02001879}
1880
1881static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1882{
1883 struct pblk_line_meta *lm = &pblk->lm;
1884 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1885 unsigned int lba_list_size = lm->emeta_len[2];
1886 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1887 struct pblk_emeta *emeta = line->emeta;
1888
1889 w_err_gc->lba_list = pblk_malloc(lba_list_size,
1890 l_mg->emeta_alloc_type, GFP_KERNEL);
1891 memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1892 lba_list_size);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +02001893}
1894
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001895void pblk_line_close_ws(struct work_struct *work)
1896{
1897 struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1898 ws);
1899 struct pblk *pblk = line_ws->pblk;
1900 struct pblk_line *line = line_ws->line;
Hans Holmberg48b8d202018-06-01 16:41:06 +02001901 struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1902
1903 /* Write errors makes the emeta start address stored in smeta invalid,
1904 * so keep a copy of the lba list until we've gc'd the line
1905 */
1906 if (w_err_gc->has_write_err)
1907 pblk_save_lba_list(pblk, line);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001908
1909 pblk_line_close(pblk, line);
Kent Overstreetb906bbb2018-05-20 18:25:50 -04001910 mempool_free(line_ws, &pblk->gen_ws_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001911}
1912
Javier Gonzálezb84ae4a82017-10-13 14:46:07 +02001913void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1914 void (*work)(struct work_struct *), gfp_t gfp_mask,
Javier Gonzálezef576492017-06-26 11:57:28 +02001915 struct workqueue_struct *wq)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001916{
1917 struct pblk_line_ws *line_ws;
1918
Kent Overstreetb906bbb2018-05-20 18:25:50 -04001919 line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001920
1921 line_ws->pblk = pblk;
1922 line_ws->line = line;
1923 line_ws->priv = priv;
1924
1925 INIT_WORK(&line_ws->ws, work);
Javier Gonzálezef576492017-06-26 11:57:28 +02001926 queue_work(wq, &line_ws->ws);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001927}
1928
Matias Bjørling43241cf2018-10-09 13:11:51 +02001929static void __pblk_down_chunk(struct pblk *pblk, int pos)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001930{
Javier González3eaa11e2017-07-07 21:08:52 +02001931 struct pblk_lun *rlun = &pblk->luns[pos];
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001932 int ret;
1933
1934 /*
1935 * Only send one inflight I/O per LUN. Since we map at a page
1936 * granurality, all ppas in the I/O will map to the same LUN
1937 */
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001938
Javier González3eaa11e2017-07-07 21:08:52 +02001939 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
Rakesh Panditc5493842017-10-13 14:45:58 +02001940 if (ret == -ETIME || ret == -EINTR)
Matias Bjørling4e495a42018-07-13 10:48:42 +02001941 pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1942 -ret);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001943}
1944
Matias Bjørling43241cf2018-10-09 13:11:51 +02001945void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
Javier González3eaa11e2017-07-07 21:08:52 +02001946{
1947 struct nvm_tgt_dev *dev = pblk->dev;
1948 struct nvm_geo *geo = &dev->geo;
Matias Bjørling43241cf2018-10-09 13:11:51 +02001949 int pos = pblk_ppa_to_pos(geo, ppa);
Javier González3eaa11e2017-07-07 21:08:52 +02001950
Matias Bjørling43241cf2018-10-09 13:11:51 +02001951 __pblk_down_chunk(pblk, pos);
Javier González3eaa11e2017-07-07 21:08:52 +02001952}
1953
Matias Bjørling43241cf2018-10-09 13:11:51 +02001954void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
Javier González3eaa11e2017-07-07 21:08:52 +02001955 unsigned long *lun_bitmap)
1956{
1957 struct nvm_tgt_dev *dev = pblk->dev;
1958 struct nvm_geo *geo = &dev->geo;
Matias Bjørling43241cf2018-10-09 13:11:51 +02001959 int pos = pblk_ppa_to_pos(geo, ppa);
Javier González3eaa11e2017-07-07 21:08:52 +02001960
1961 /* If the LUN has been locked for this same request, do no attempt to
1962 * lock it again
1963 */
1964 if (test_and_set_bit(pos, lun_bitmap))
1965 return;
1966
Matias Bjørling43241cf2018-10-09 13:11:51 +02001967 __pblk_down_chunk(pblk, pos);
Javier González3eaa11e2017-07-07 21:08:52 +02001968}
1969
Matias Bjørling43241cf2018-10-09 13:11:51 +02001970void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
Javier González3eaa11e2017-07-07 21:08:52 +02001971{
1972 struct nvm_tgt_dev *dev = pblk->dev;
1973 struct nvm_geo *geo = &dev->geo;
1974 struct pblk_lun *rlun;
Matias Bjørling43241cf2018-10-09 13:11:51 +02001975 int pos = pblk_ppa_to_pos(geo, ppa);
Javier González3eaa11e2017-07-07 21:08:52 +02001976
1977 rlun = &pblk->luns[pos];
1978 up(&rlun->wr_sem);
1979}
1980
Hans Holmberge99e8022018-10-09 13:11:48 +02001981void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001982{
1983 struct nvm_tgt_dev *dev = pblk->dev;
1984 struct nvm_geo *geo = &dev->geo;
1985 struct pblk_lun *rlun;
Javier Gonzáleza40afad2018-03-30 00:05:14 +02001986 int num_lun = geo->all_luns;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001987 int bit = -1;
1988
Javier Gonzáleza40afad2018-03-30 00:05:14 +02001989 while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001990 rlun = &pblk->luns[bit];
1991 up(&rlun->wr_sem);
1992 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001993}
1994
1995void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1996{
Javier González9f6cb132017-10-13 14:46:12 +02001997 struct ppa_addr ppa_l2p;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001998
1999 /* logic error: lba out-of-bounds. Ignore update */
2000 if (!(lba < pblk->rl.nr_secs)) {
2001 WARN(1, "pblk: corrupted L2P map request\n");
2002 return;
2003 }
2004
2005 spin_lock(&pblk->trans_lock);
Javier González9f6cb132017-10-13 14:46:12 +02002006 ppa_l2p = pblk_trans_map_get(pblk, lba);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002007
Javier González9f6cb132017-10-13 14:46:12 +02002008 if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
2009 pblk_map_invalidate(pblk, ppa_l2p);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002010
2011 pblk_trans_map_set(pblk, lba, ppa);
2012 spin_unlock(&pblk->trans_lock);
2013}
2014
2015void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
2016{
Javier Gonzálezd3401212017-10-13 14:46:14 +02002017
Matias Bjørling880eda52018-07-13 10:48:37 +02002018#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002019 /* Callers must ensure that the ppa points to a cache address */
2020 BUG_ON(!pblk_addr_in_cache(ppa));
2021 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
2022#endif
2023
2024 pblk_update_map(pblk, lba, ppa);
2025}
2026
Javier González9f6cb132017-10-13 14:46:12 +02002027int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
Javier Gonzálezd3401212017-10-13 14:46:14 +02002028 struct pblk_line *gc_line, u64 paddr_gc)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002029{
Javier Gonzálezd3401212017-10-13 14:46:14 +02002030 struct ppa_addr ppa_l2p, ppa_gc;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002031 int ret = 1;
2032
Matias Bjørling880eda52018-07-13 10:48:37 +02002033#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002034 /* Callers must ensure that the ppa points to a cache address */
Javier González9f6cb132017-10-13 14:46:12 +02002035 BUG_ON(!pblk_addr_in_cache(ppa_new));
2036 BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002037#endif
2038
2039 /* logic error: lba out-of-bounds. Ignore update */
2040 if (!(lba < pblk->rl.nr_secs)) {
2041 WARN(1, "pblk: corrupted L2P map request\n");
2042 return 0;
2043 }
2044
2045 spin_lock(&pblk->trans_lock);
Javier González9f6cb132017-10-13 14:46:12 +02002046 ppa_l2p = pblk_trans_map_get(pblk, lba);
Javier Gonzálezd3401212017-10-13 14:46:14 +02002047 ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002048
Javier Gonzálezd3401212017-10-13 14:46:14 +02002049 if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2050 spin_lock(&gc_line->lock);
2051 WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2052 "pblk: corrupted GC update");
2053 spin_unlock(&gc_line->lock);
Javier González9f6cb132017-10-13 14:46:12 +02002054
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002055 ret = 0;
2056 goto out;
2057 }
2058
Javier González9f6cb132017-10-13 14:46:12 +02002059 pblk_trans_map_set(pblk, lba, ppa_new);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002060out:
2061 spin_unlock(&pblk->trans_lock);
2062 return ret;
2063}
2064
Javier González9f6cb132017-10-13 14:46:12 +02002065void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2066 struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002067{
Javier González9f6cb132017-10-13 14:46:12 +02002068 struct ppa_addr ppa_l2p;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002069
Matias Bjørling880eda52018-07-13 10:48:37 +02002070#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002071 /* Callers must ensure that the ppa points to a device address */
Javier González9f6cb132017-10-13 14:46:12 +02002072 BUG_ON(pblk_addr_in_cache(ppa_mapped));
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002073#endif
2074 /* Invalidate and discard padded entries */
2075 if (lba == ADDR_EMPTY) {
Hans Holmberg76758392018-03-30 00:04:52 +02002076 atomic64_inc(&pblk->pad_wa);
Matias Bjørling880eda52018-07-13 10:48:37 +02002077#ifdef CONFIG_NVM_PBLK_DEBUG
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002078 atomic_long_inc(&pblk->padded_wb);
2079#endif
Javier González9f6cb132017-10-13 14:46:12 +02002080 if (!pblk_ppa_empty(ppa_mapped))
2081 pblk_map_invalidate(pblk, ppa_mapped);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002082 return;
2083 }
2084
2085 /* logic error: lba out-of-bounds. Ignore update */
2086 if (!(lba < pblk->rl.nr_secs)) {
2087 WARN(1, "pblk: corrupted L2P map request\n");
2088 return;
2089 }
2090
2091 spin_lock(&pblk->trans_lock);
Javier González9f6cb132017-10-13 14:46:12 +02002092 ppa_l2p = pblk_trans_map_get(pblk, lba);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002093
2094 /* Do not update L2P if the cacheline has been updated. In this case,
2095 * the mapped ppa must be invalidated
2096 */
Javier González9f6cb132017-10-13 14:46:12 +02002097 if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2098 if (!pblk_ppa_empty(ppa_mapped))
2099 pblk_map_invalidate(pblk, ppa_mapped);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002100 goto out;
2101 }
2102
Matias Bjørling880eda52018-07-13 10:48:37 +02002103#ifdef CONFIG_NVM_PBLK_DEBUG
Javier González9f6cb132017-10-13 14:46:12 +02002104 WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002105#endif
2106
Javier González9f6cb132017-10-13 14:46:12 +02002107 pblk_trans_map_set(pblk, lba, ppa_mapped);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002108out:
2109 spin_unlock(&pblk->trans_lock);
2110}
2111
2112void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2113 sector_t blba, int nr_secs)
2114{
2115 int i;
2116
2117 spin_lock(&pblk->trans_lock);
Javier González7bd4d372017-10-13 14:46:23 +02002118 for (i = 0; i < nr_secs; i++) {
2119 struct ppa_addr ppa;
2120
2121 ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2122
2123 /* If the L2P entry maps to a line, the reference is valid */
2124 if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
Javier Gonzálezcb216652018-10-09 13:11:42 +02002125 struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
Javier González7bd4d372017-10-13 14:46:23 +02002126
2127 kref_get(&line->ref);
2128 }
2129 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002130 spin_unlock(&pblk->trans_lock);
2131}
2132
2133void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2134 u64 *lba_list, int nr_secs)
2135{
Javier Gonzálezd3401212017-10-13 14:46:14 +02002136 u64 lba;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002137 int i;
2138
2139 spin_lock(&pblk->trans_lock);
2140 for (i = 0; i < nr_secs; i++) {
2141 lba = lba_list[i];
Javier Gonzálezd3401212017-10-13 14:46:14 +02002142 if (lba != ADDR_EMPTY) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02002143 /* logic error: lba out-of-bounds. Ignore update */
2144 if (!(lba < pblk->rl.nr_secs)) {
2145 WARN(1, "pblk: corrupted L2P map request\n");
2146 continue;
2147 }
2148 ppas[i] = pblk_trans_map_get(pblk, lba);
2149 }
2150 }
2151 spin_unlock(&pblk->trans_lock);
2152}