blob: 3ad9e56d2473412ae3ead5b2946066643ebd541c [file] [log] [blame]
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-write.c - pblk's write path from write buffer to media
16 */
17
18#include "pblk.h"
19
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020020static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21 struct pblk_c_ctx *c_ctx)
22{
23 struct nvm_tgt_dev *dev = pblk->dev;
24 struct bio *original_bio;
25 unsigned long ret;
26 int i;
27
28 for (i = 0; i < c_ctx->nr_valid; i++) {
29 struct pblk_w_ctx *w_ctx;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020030
31 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020032 while ((original_bio = bio_list_pop(&w_ctx->bios)))
33 bio_endio(original_bio);
34 }
35
36#ifdef CONFIG_NVM_DEBUG
Javier González0880a9a2017-06-26 11:57:19 +020037 atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020038#endif
39
40 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
41
Javier González56c76412017-07-07 21:08:53 +020042 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020043
44 bio_put(rqd->bio);
45 pblk_free_rqd(pblk, rqd, WRITE);
46
47 return ret;
48}
49
50static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
51 struct nvm_rq *rqd,
52 struct pblk_c_ctx *c_ctx)
53{
54 list_del(&c_ctx->list);
55 return pblk_end_w_bio(pblk, rqd, c_ctx);
56}
57
58static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
59 struct pblk_c_ctx *c_ctx)
60{
61 struct pblk_c_ctx *c, *r;
62 unsigned long flags;
63 unsigned long pos;
64
65#ifdef CONFIG_NVM_DEBUG
66 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
67#endif
68
69 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
70
71 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
72 if (pos == c_ctx->sentry) {
73 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
74
75retry:
76 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
77 rqd = nvm_rq_from_c_ctx(c);
78 if (c->sentry == pos) {
79 pos = pblk_end_queued_w_bio(pblk, rqd, c);
80 goto retry;
81 }
82 }
83 } else {
84 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
85 list_add_tail(&c_ctx->list, &pblk->compl_list);
86 }
87 pblk_rb_sync_end(&pblk->rwb, &flags);
88}
89
90/* When a write fails, we are not sure whether the block has grown bad or a page
91 * range is more susceptible to write errors. If a high number of pages fail, we
92 * assume that the block is bad and we mark it accordingly. In all cases, we
93 * remap and resubmit the failed entries as fast as possible; if a flush is
94 * waiting on a completion, the whole stack would stall otherwise.
95 */
96static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
97{
98 void *comp_bits = &rqd->ppa_status;
99 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
100 struct pblk_rec_ctx *recovery;
101 struct ppa_addr *ppa_list = rqd->ppa_list;
102 int nr_ppas = rqd->nr_ppas;
103 unsigned int c_entries;
104 int bit, ret;
105
106 if (unlikely(nr_ppas == 1))
107 ppa_list = &rqd->ppa_addr;
108
109 recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
110 if (!recovery) {
111 pr_err("pblk: could not allocate recovery context\n");
112 return;
113 }
114 INIT_LIST_HEAD(&recovery->failed);
115
116 bit = -1;
117 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
118 struct pblk_rb_entry *entry;
119 struct ppa_addr ppa;
120
121 /* Logic error */
122 if (bit > c_ctx->nr_valid) {
Dan Carpenter2a79efd2017-04-15 20:55:52 +0200123 WARN_ONCE(1, "pblk: corrupted write request\n");
Javier González33db9fd2017-04-22 01:32:47 +0200124 mempool_free(recovery, pblk->rec_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200125 goto out;
126 }
127
128 ppa = ppa_list[bit];
129 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
130 if (!entry) {
131 pr_err("pblk: could not scan entry on write failure\n");
Javier González33db9fd2017-04-22 01:32:47 +0200132 mempool_free(recovery, pblk->rec_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200133 goto out;
134 }
135
136 /* The list is filled first and emptied afterwards. No need for
137 * protecting it with a lock
138 */
139 list_add_tail(&entry->index, &recovery->failed);
140 }
141
142 c_entries = find_first_bit(comp_bits, nr_ppas);
143 ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
144 if (ret) {
145 pr_err("pblk: could not recover from write failure\n");
Javier González33db9fd2017-04-22 01:32:47 +0200146 mempool_free(recovery, pblk->rec_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200147 goto out;
148 }
149
150 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
Javier Gonzálezef576492017-06-26 11:57:28 +0200151 queue_work(pblk->close_wq, &recovery->ws_rec);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200152
153out:
154 pblk_complete_write(pblk, rqd, c_ctx);
155}
156
157static void pblk_end_io_write(struct nvm_rq *rqd)
158{
159 struct pblk *pblk = rqd->private;
160 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
161
162 if (rqd->error) {
163 pblk_log_write_err(pblk, rqd);
164 return pblk_end_w_fail(pblk, rqd);
165 }
166#ifdef CONFIG_NVM_DEBUG
167 else
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200168 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200169#endif
170
171 pblk_complete_write(pblk, rqd, c_ctx);
Javier González588726d32017-06-26 11:57:29 +0200172 atomic_dec(&pblk->inflight_io);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200173}
174
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200175static void pblk_end_io_write_meta(struct nvm_rq *rqd)
176{
177 struct pblk *pblk = rqd->private;
178 struct nvm_tgt_dev *dev = pblk->dev;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200179 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
180 struct pblk_line *line = m_ctx->private;
181 struct pblk_emeta *emeta = line->emeta;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200182 int sync;
183
Javier González3eaa11e2017-07-07 21:08:52 +0200184 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200185
186 if (rqd->error) {
187 pblk_log_write_err(pblk, rqd);
Javier Gonzálezee8d5c12017-06-30 17:56:40 +0200188 pr_err("pblk: metadata I/O failed. Line %d\n", line->id);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200189 }
190#ifdef CONFIG_NVM_DEBUG
191 else
192 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
193#endif
194
195 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
196 if (sync == emeta->nr_entries)
Javier Gonzálezef576492017-06-26 11:57:28 +0200197 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws,
198 pblk->close_wq);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200199
200 bio_put(rqd->bio);
Javier González3eaa11e2017-07-07 21:08:52 +0200201 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200202 pblk_free_rqd(pblk, rqd, READ);
Javier González588726d32017-06-26 11:57:29 +0200203
204 atomic_dec(&pblk->inflight_io);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200205}
206
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200207static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200208 unsigned int nr_secs,
209 nvm_end_io_fn(*end_io))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200210{
211 struct nvm_tgt_dev *dev = pblk->dev;
212
213 /* Setup write request */
214 rqd->opcode = NVM_OP_PWRITE;
215 rqd->nr_ppas = nr_secs;
216 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
217 rqd->private = pblk;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200218 rqd->end_io = end_io;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200219
220 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
221 &rqd->dma_meta_list);
222 if (!rqd->meta_list)
223 return -ENOMEM;
224
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200225 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
226 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
227
228 return 0;
229}
230
231static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
Javier Gonzálezd624f372017-06-26 11:57:15 +0200232 struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200233{
234 struct pblk_line_meta *lm = &pblk->lm;
Javier Gonzálezd624f372017-06-26 11:57:15 +0200235 struct pblk_line *e_line = pblk_line_get_erase(pblk);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200236 unsigned int valid = c_ctx->nr_valid;
237 unsigned int padded = c_ctx->nr_padded;
238 unsigned int nr_secs = valid + padded;
239 unsigned long *lun_bitmap;
240 int ret = 0;
241
242 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
Javier Gonzálezd624f372017-06-26 11:57:15 +0200243 if (!lun_bitmap)
244 return -ENOMEM;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200245 c_ctx->lun_bitmap = lun_bitmap;
246
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200247 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200248 if (ret) {
249 kfree(lun_bitmap);
Javier Gonzálezd624f372017-06-26 11:57:15 +0200250 return ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200251 }
252
Javier González588726d32017-06-26 11:57:29 +0200253 if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200254 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
255 else
256 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
Javier Gonzálezd624f372017-06-26 11:57:15 +0200257 valid, erase_ppa);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200258
Javier Gonzálezd624f372017-06-26 11:57:15 +0200259 return 0;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200260}
261
262int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
263 struct pblk_c_ctx *c_ctx)
264{
265 struct pblk_line_meta *lm = &pblk->lm;
266 unsigned long *lun_bitmap;
267 int ret;
268
269 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
270 if (!lun_bitmap)
271 return -ENOMEM;
272
273 c_ctx->lun_bitmap = lun_bitmap;
274
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200275 ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200276 if (ret)
277 return ret;
278
279 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
280
281 rqd->ppa_status = (u64)0;
282 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
283
284 return ret;
285}
286
287static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
288 unsigned int secs_to_flush)
289{
290 int secs_to_sync;
291
292 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
293
294#ifdef CONFIG_NVM_DEBUG
295 if ((!secs_to_sync && secs_to_flush)
296 || (secs_to_sync < 0)
297 || (secs_to_sync > secs_avail && !secs_to_flush)) {
298 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
299 secs_avail, secs_to_sync, secs_to_flush);
300 }
301#endif
302
303 return secs_to_sync;
304}
305
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200306static inline int pblk_valid_meta_ppa(struct pblk *pblk,
307 struct pblk_line *meta_line,
308 struct ppa_addr *ppa_list, int nr_ppas)
309{
310 struct nvm_tgt_dev *dev = pblk->dev;
311 struct nvm_geo *geo = &dev->geo;
312 struct pblk_line *data_line;
313 struct ppa_addr ppa, ppa_opt;
314 u64 paddr;
315 int i;
316
317 data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
318 paddr = pblk_lookup_page(pblk, meta_line);
319 ppa = addr_to_gen_ppa(pblk, paddr, 0);
320
321 if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
322 return 1;
323
324 /* Schedule a metadata I/O that is half the distance from the data I/O
325 * with regards to the number of LUNs forming the pblk instance. This
326 * balances LUN conflicts across every I/O.
327 *
328 * When the LUN configuration changes (e.g., due to GC), this distance
329 * can align, which would result on a LUN deadlock. In this case, modify
330 * the distance to not be optimal, but allow metadata I/Os to succeed.
331 */
332 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
333 if (unlikely(ppa_opt.ppa == ppa.ppa)) {
334 data_line->meta_distance--;
335 return 0;
336 }
337
338 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
339 if (ppa_list[i].g.ch == ppa_opt.g.ch &&
340 ppa_list[i].g.lun == ppa_opt.g.lun)
341 return 1;
342
343 if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
344 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
345 if (ppa_list[i].g.ch == ppa.g.ch &&
346 ppa_list[i].g.lun == ppa.g.lun)
347 return 0;
348
349 return 1;
350 }
351
352 return 0;
353}
354
355int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
356{
357 struct nvm_tgt_dev *dev = pblk->dev;
358 struct nvm_geo *geo = &dev->geo;
359 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
360 struct pblk_line_meta *lm = &pblk->lm;
361 struct pblk_emeta *emeta = meta_line->emeta;
362 struct pblk_g_ctx *m_ctx;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200363 struct bio *bio;
364 struct nvm_rq *rqd;
365 void *data;
366 u64 paddr;
367 int rq_ppas = pblk->min_write_pgs;
368 int id = meta_line->id;
369 int rq_len;
370 int i, j;
371 int ret;
372
373 rqd = pblk_alloc_rqd(pblk, READ);
374 if (IS_ERR(rqd)) {
375 pr_err("pblk: cannot allocate write req.\n");
376 return PTR_ERR(rqd);
377 }
378 m_ctx = nvm_rq_to_pdu(rqd);
379 m_ctx->private = meta_line;
380
381 rq_len = rq_ppas * geo->sec_size;
382 data = ((void *)emeta->buf) + emeta->mem;
383
Javier Gonzálezde54e702017-06-30 17:56:39 +0200384 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
385 l_mg->emeta_alloc_type, GFP_KERNEL);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200386 if (IS_ERR(bio)) {
387 ret = PTR_ERR(bio);
388 goto fail_free_rqd;
389 }
390 bio->bi_iter.bi_sector = 0; /* internal bio */
391 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
392 rqd->bio = bio;
393
394 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
395 if (ret)
396 goto fail_free_bio;
397
398 for (i = 0; i < rqd->nr_ppas; ) {
399 spin_lock(&meta_line->lock);
400 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
401 spin_unlock(&meta_line->lock);
402 for (j = 0; j < rq_ppas; j++, i++, paddr++)
403 rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
404 }
405
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200406 emeta->mem += rq_len;
407 if (emeta->mem >= lm->emeta_len[0]) {
408 spin_lock(&l_mg->close_lock);
409 list_del(&meta_line->list);
410 WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
411 "pblk: corrupt meta line %d\n", meta_line->id);
412 spin_unlock(&l_mg->close_lock);
413 }
414
Javier González3eaa11e2017-07-07 21:08:52 +0200415 pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
416
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200417 ret = pblk_submit_io(pblk, rqd);
418 if (ret) {
419 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
420 goto fail_rollback;
421 }
422
423 return NVM_IO_OK;
424
425fail_rollback:
Javier González3eaa11e2017-07-07 21:08:52 +0200426 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200427 spin_lock(&l_mg->close_lock);
428 pblk_dealloc_page(pblk, meta_line, rq_ppas);
429 list_add(&meta_line->list, &meta_line->list);
430 spin_unlock(&l_mg->close_lock);
Javier González3eaa11e2017-07-07 21:08:52 +0200431
432 nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200433fail_free_bio:
Javier Gonzálezf680f192017-06-26 11:57:21 +0200434 if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
435 bio_put(bio);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200436fail_free_rqd:
437 pblk_free_rqd(pblk, rqd, READ);
438 return ret;
439}
440
441static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
442 int prev_n)
443{
444 struct pblk_line_meta *lm = &pblk->lm;
445 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
446 struct pblk_line *meta_line;
447
448 spin_lock(&l_mg->close_lock);
449retry:
450 if (list_empty(&l_mg->emeta_list)) {
451 spin_unlock(&l_mg->close_lock);
452 return 0;
453 }
454 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
455 if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
456 goto retry;
457 spin_unlock(&l_mg->close_lock);
458
459 if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
460 return 0;
461
462 return pblk_submit_meta_io(pblk, meta_line);
463}
464
Javier Gonzálezd624f372017-06-26 11:57:15 +0200465static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
466{
467 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
468 struct ppa_addr erase_ppa;
469 int err;
470
471 ppa_set_empty(&erase_ppa);
472
473 /* Assign lbas to ppas and populate request structure */
474 err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
475 if (err) {
476 pr_err("pblk: could not setup write request: %d\n", err);
477 return NVM_IO_ERR;
478 }
479
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200480 if (likely(ppa_empty(erase_ppa))) {
481 /* Submit metadata write for previous data line */
482 err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
483 if (err) {
484 pr_err("pblk: metadata I/O submission failed: %d", err);
485 return NVM_IO_ERR;
486 }
Javier Gonzálezd624f372017-06-26 11:57:15 +0200487
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200488 /* Submit data write for current data line */
489 err = pblk_submit_io(pblk, rqd);
490 if (err) {
491 pr_err("pblk: data I/O submission failed: %d\n", err);
492 return NVM_IO_ERR;
493 }
494 } else {
495 /* Submit data write for current data line */
496 err = pblk_submit_io(pblk, rqd);
497 if (err) {
498 pr_err("pblk: data I/O submission failed: %d\n", err);
499 return NVM_IO_ERR;
500 }
Javier Gonzálezd624f372017-06-26 11:57:15 +0200501
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200502 /* Submit available erase for next data line */
503 if (pblk_blk_erase_async(pblk, erase_ppa)) {
504 struct pblk_line *e_line = pblk_line_get_erase(pblk);
505 struct nvm_tgt_dev *dev = pblk->dev;
506 struct nvm_geo *geo = &dev->geo;
507 int bit;
508
509 atomic_inc(&e_line->left_eblks);
510 bit = pblk_ppa_to_pos(geo, erase_ppa);
511 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
512 }
Javier Gonzálezd624f372017-06-26 11:57:15 +0200513 }
514
515 return NVM_IO_OK;
516}
517
518static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
519{
520 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
521 struct bio *bio = rqd->bio;
522
523 if (c_ctx->nr_padded)
524 pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
525}
526
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200527static int pblk_submit_write(struct pblk *pblk)
528{
529 struct bio *bio;
530 struct nvm_rq *rqd;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200531 unsigned int secs_avail, secs_to_sync, secs_to_com;
532 unsigned int secs_to_flush;
533 unsigned long pos;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200534
535 /* If there are no sectors in the cache, flushes (bios without data)
536 * will be cleared on the cache threads
537 */
538 secs_avail = pblk_rb_read_count(&pblk->rwb);
539 if (!secs_avail)
540 return 1;
541
542 secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
543 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
544 return 1;
545
546 rqd = pblk_alloc_rqd(pblk, WRITE);
547 if (IS_ERR(rqd)) {
548 pr_err("pblk: cannot allocate write req.\n");
549 return 1;
550 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200551
552 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
553 if (!bio) {
554 pr_err("pblk: cannot allocate write bio\n");
555 goto fail_free_rqd;
556 }
557 bio->bi_iter.bi_sector = 0; /* internal bio */
558 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
559 rqd->bio = bio;
560
561 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
562 if (secs_to_sync > pblk->max_write_pgs) {
563 pr_err("pblk: bad buffer sync calculation\n");
564 goto fail_put_bio;
565 }
566
567 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
568 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
569
Javier Gonzálezd624f372017-06-26 11:57:15 +0200570 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
571 secs_avail)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200572 pr_err("pblk: corrupted write bio\n");
573 goto fail_put_bio;
574 }
575
Javier Gonzálezd624f372017-06-26 11:57:15 +0200576 if (pblk_submit_io_set(pblk, rqd))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200577 goto fail_free_bio;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200578
579#ifdef CONFIG_NVM_DEBUG
580 atomic_long_add(secs_to_sync, &pblk->sub_writes);
581#endif
582
583 return 0;
584
585fail_free_bio:
Javier Gonzálezd624f372017-06-26 11:57:15 +0200586 pblk_free_write_rqd(pblk, rqd);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200587fail_put_bio:
588 bio_put(bio);
589fail_free_rqd:
590 pblk_free_rqd(pblk, rqd, WRITE);
591
592 return 1;
593}
594
595int pblk_write_ts(void *data)
596{
597 struct pblk *pblk = data;
598
599 while (!kthread_should_stop()) {
600 if (!pblk_submit_write(pblk))
601 continue;
602 set_current_state(TASK_INTERRUPTIBLE);
603 io_schedule();
604 }
605
606 return 0;
607}