blob: 3db2cbe5b788ff5891524d5d7a530dfadb161c71 [file] [log] [blame]
Javier Gonzáleza4bd2172017-04-15 20:55:50 +02001/*
2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
4 * Matias Bjorling <matias@cnexlabs.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * pblk-write.c - pblk's write path from write buffer to media
16 */
17
18#include "pblk.h"
19
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020020static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
21 struct pblk_c_ctx *c_ctx)
22{
23 struct nvm_tgt_dev *dev = pblk->dev;
24 struct bio *original_bio;
25 unsigned long ret;
26 int i;
27
28 for (i = 0; i < c_ctx->nr_valid; i++) {
29 struct pblk_w_ctx *w_ctx;
30 struct ppa_addr p;
31 struct pblk_line *line;
32
33 w_ctx = pblk_rb_w_ctx(&pblk->rwb, c_ctx->sentry + i);
34
35 p = rqd->ppa_list[i];
36 line = &pblk->lines[pblk_dev_ppa_to_line(p)];
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020037
38 while ((original_bio = bio_list_pop(&w_ctx->bios)))
39 bio_endio(original_bio);
40 }
41
42#ifdef CONFIG_NVM_DEBUG
Javier González0880a9a2017-06-26 11:57:19 +020043 atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +020044#endif
45
46 ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
47
48 if (rqd->meta_list)
49 nvm_dev_dma_free(dev->parent, rqd->meta_list,
50 rqd->dma_meta_list);
51
52 bio_put(rqd->bio);
53 pblk_free_rqd(pblk, rqd, WRITE);
54
55 return ret;
56}
57
58static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
59 struct nvm_rq *rqd,
60 struct pblk_c_ctx *c_ctx)
61{
62 list_del(&c_ctx->list);
63 return pblk_end_w_bio(pblk, rqd, c_ctx);
64}
65
66static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
67 struct pblk_c_ctx *c_ctx)
68{
69 struct pblk_c_ctx *c, *r;
70 unsigned long flags;
71 unsigned long pos;
72
73#ifdef CONFIG_NVM_DEBUG
74 atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
75#endif
76
77 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
78
79 pos = pblk_rb_sync_init(&pblk->rwb, &flags);
80 if (pos == c_ctx->sentry) {
81 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
82
83retry:
84 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
85 rqd = nvm_rq_from_c_ctx(c);
86 if (c->sentry == pos) {
87 pos = pblk_end_queued_w_bio(pblk, rqd, c);
88 goto retry;
89 }
90 }
91 } else {
92 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
93 list_add_tail(&c_ctx->list, &pblk->compl_list);
94 }
95 pblk_rb_sync_end(&pblk->rwb, &flags);
96}
97
98/* When a write fails, we are not sure whether the block has grown bad or a page
99 * range is more susceptible to write errors. If a high number of pages fail, we
100 * assume that the block is bad and we mark it accordingly. In all cases, we
101 * remap and resubmit the failed entries as fast as possible; if a flush is
102 * waiting on a completion, the whole stack would stall otherwise.
103 */
104static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
105{
106 void *comp_bits = &rqd->ppa_status;
107 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
108 struct pblk_rec_ctx *recovery;
109 struct ppa_addr *ppa_list = rqd->ppa_list;
110 int nr_ppas = rqd->nr_ppas;
111 unsigned int c_entries;
112 int bit, ret;
113
114 if (unlikely(nr_ppas == 1))
115 ppa_list = &rqd->ppa_addr;
116
117 recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
118 if (!recovery) {
119 pr_err("pblk: could not allocate recovery context\n");
120 return;
121 }
122 INIT_LIST_HEAD(&recovery->failed);
123
124 bit = -1;
125 while ((bit = find_next_bit(comp_bits, nr_ppas, bit + 1)) < nr_ppas) {
126 struct pblk_rb_entry *entry;
127 struct ppa_addr ppa;
128
129 /* Logic error */
130 if (bit > c_ctx->nr_valid) {
Dan Carpenter2a79efd2017-04-15 20:55:52 +0200131 WARN_ONCE(1, "pblk: corrupted write request\n");
Javier González33db9fd2017-04-22 01:32:47 +0200132 mempool_free(recovery, pblk->rec_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200133 goto out;
134 }
135
136 ppa = ppa_list[bit];
137 entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
138 if (!entry) {
139 pr_err("pblk: could not scan entry on write failure\n");
Javier González33db9fd2017-04-22 01:32:47 +0200140 mempool_free(recovery, pblk->rec_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200141 goto out;
142 }
143
144 /* The list is filled first and emptied afterwards. No need for
145 * protecting it with a lock
146 */
147 list_add_tail(&entry->index, &recovery->failed);
148 }
149
150 c_entries = find_first_bit(comp_bits, nr_ppas);
151 ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
152 if (ret) {
153 pr_err("pblk: could not recover from write failure\n");
Javier González33db9fd2017-04-22 01:32:47 +0200154 mempool_free(recovery, pblk->rec_pool);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200155 goto out;
156 }
157
158 INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
159 queue_work(pblk->kw_wq, &recovery->ws_rec);
160
161out:
162 pblk_complete_write(pblk, rqd, c_ctx);
163}
164
165static void pblk_end_io_write(struct nvm_rq *rqd)
166{
167 struct pblk *pblk = rqd->private;
168 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
169
170 if (rqd->error) {
171 pblk_log_write_err(pblk, rqd);
172 return pblk_end_w_fail(pblk, rqd);
173 }
174#ifdef CONFIG_NVM_DEBUG
175 else
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200176 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200177#endif
178
179 pblk_complete_write(pblk, rqd, c_ctx);
180}
181
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200182static void pblk_end_io_write_meta(struct nvm_rq *rqd)
183{
184 struct pblk *pblk = rqd->private;
185 struct nvm_tgt_dev *dev = pblk->dev;
186 struct nvm_geo *geo = &dev->geo;
187 struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
188 struct pblk_line *line = m_ctx->private;
189 struct pblk_emeta *emeta = line->emeta;
190 int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]);
191 struct pblk_lun *rlun = &pblk->luns[pos];
192 int sync;
193
194 up(&rlun->wr_sem);
195
196 if (rqd->error) {
197 pblk_log_write_err(pblk, rqd);
198 pr_err("pblk: metadata I/O failed\n");
199 }
200#ifdef CONFIG_NVM_DEBUG
201 else
202 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
203#endif
204
205 sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
206 if (sync == emeta->nr_entries)
207 pblk_line_run_ws(pblk, line, NULL, pblk_line_close_ws);
208
209 bio_put(rqd->bio);
210 pblk_free_rqd(pblk, rqd, READ);
211}
212
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200213static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200214 unsigned int nr_secs,
215 nvm_end_io_fn(*end_io))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200216{
217 struct nvm_tgt_dev *dev = pblk->dev;
218
219 /* Setup write request */
220 rqd->opcode = NVM_OP_PWRITE;
221 rqd->nr_ppas = nr_secs;
222 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
223 rqd->private = pblk;
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200224 rqd->end_io = end_io;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200225
226 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
227 &rqd->dma_meta_list);
228 if (!rqd->meta_list)
229 return -ENOMEM;
230
231 if (unlikely(nr_secs == 1))
232 return 0;
233
234 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
235 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
236
237 return 0;
238}
239
240static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
Javier Gonzálezd624f372017-06-26 11:57:15 +0200241 struct pblk_c_ctx *c_ctx, struct ppa_addr *erase_ppa)
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200242{
243 struct pblk_line_meta *lm = &pblk->lm;
Javier Gonzálezd624f372017-06-26 11:57:15 +0200244 struct pblk_line *e_line = pblk_line_get_erase(pblk);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200245 unsigned int valid = c_ctx->nr_valid;
246 unsigned int padded = c_ctx->nr_padded;
247 unsigned int nr_secs = valid + padded;
248 unsigned long *lun_bitmap;
249 int ret = 0;
250
251 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
Javier Gonzálezd624f372017-06-26 11:57:15 +0200252 if (!lun_bitmap)
253 return -ENOMEM;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200254 c_ctx->lun_bitmap = lun_bitmap;
255
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200256 ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200257 if (ret) {
258 kfree(lun_bitmap);
Javier Gonzálezd624f372017-06-26 11:57:15 +0200259 return ret;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200260 }
261
Javier Gonzálezd624f372017-06-26 11:57:15 +0200262 if (likely(!atomic_read(&e_line->left_eblks) || !e_line))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200263 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
264 else
265 pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
Javier Gonzálezd624f372017-06-26 11:57:15 +0200266 valid, erase_ppa);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200267
Javier Gonzálezd624f372017-06-26 11:57:15 +0200268 return 0;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200269}
270
271int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
272 struct pblk_c_ctx *c_ctx)
273{
274 struct pblk_line_meta *lm = &pblk->lm;
275 unsigned long *lun_bitmap;
276 int ret;
277
278 lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
279 if (!lun_bitmap)
280 return -ENOMEM;
281
282 c_ctx->lun_bitmap = lun_bitmap;
283
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200284 ret = pblk_alloc_w_rq(pblk, rqd, rqd->nr_ppas, pblk_end_io_write);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200285 if (ret)
286 return ret;
287
288 pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, c_ctx->nr_valid, 0);
289
290 rqd->ppa_status = (u64)0;
291 rqd->flags = pblk_set_progr_mode(pblk, WRITE);
292
293 return ret;
294}
295
296static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
297 unsigned int secs_to_flush)
298{
299 int secs_to_sync;
300
301 secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
302
303#ifdef CONFIG_NVM_DEBUG
304 if ((!secs_to_sync && secs_to_flush)
305 || (secs_to_sync < 0)
306 || (secs_to_sync > secs_avail && !secs_to_flush)) {
307 pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n",
308 secs_avail, secs_to_sync, secs_to_flush);
309 }
310#endif
311
312 return secs_to_sync;
313}
314
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200315static inline int pblk_valid_meta_ppa(struct pblk *pblk,
316 struct pblk_line *meta_line,
317 struct ppa_addr *ppa_list, int nr_ppas)
318{
319 struct nvm_tgt_dev *dev = pblk->dev;
320 struct nvm_geo *geo = &dev->geo;
321 struct pblk_line *data_line;
322 struct ppa_addr ppa, ppa_opt;
323 u64 paddr;
324 int i;
325
326 data_line = &pblk->lines[pblk_dev_ppa_to_line(ppa_list[0])];
327 paddr = pblk_lookup_page(pblk, meta_line);
328 ppa = addr_to_gen_ppa(pblk, paddr, 0);
329
330 if (test_bit(pblk_ppa_to_pos(geo, ppa), data_line->blk_bitmap))
331 return 1;
332
333 /* Schedule a metadata I/O that is half the distance from the data I/O
334 * with regards to the number of LUNs forming the pblk instance. This
335 * balances LUN conflicts across every I/O.
336 *
337 * When the LUN configuration changes (e.g., due to GC), this distance
338 * can align, which would result on a LUN deadlock. In this case, modify
339 * the distance to not be optimal, but allow metadata I/Os to succeed.
340 */
341 ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
342 if (unlikely(ppa_opt.ppa == ppa.ppa)) {
343 data_line->meta_distance--;
344 return 0;
345 }
346
347 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
348 if (ppa_list[i].g.ch == ppa_opt.g.ch &&
349 ppa_list[i].g.lun == ppa_opt.g.lun)
350 return 1;
351
352 if (test_bit(pblk_ppa_to_pos(geo, ppa_opt), data_line->blk_bitmap)) {
353 for (i = 0; i < nr_ppas; i += pblk->min_write_pgs)
354 if (ppa_list[i].g.ch == ppa.g.ch &&
355 ppa_list[i].g.lun == ppa.g.lun)
356 return 0;
357
358 return 1;
359 }
360
361 return 0;
362}
363
364int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
365{
366 struct nvm_tgt_dev *dev = pblk->dev;
367 struct nvm_geo *geo = &dev->geo;
368 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
369 struct pblk_line_meta *lm = &pblk->lm;
370 struct pblk_emeta *emeta = meta_line->emeta;
371 struct pblk_g_ctx *m_ctx;
372 struct pblk_lun *rlun;
373 struct bio *bio;
374 struct nvm_rq *rqd;
375 void *data;
376 u64 paddr;
377 int rq_ppas = pblk->min_write_pgs;
378 int id = meta_line->id;
379 int rq_len;
380 int i, j;
381 int ret;
382
383 rqd = pblk_alloc_rqd(pblk, READ);
384 if (IS_ERR(rqd)) {
385 pr_err("pblk: cannot allocate write req.\n");
386 return PTR_ERR(rqd);
387 }
388 m_ctx = nvm_rq_to_pdu(rqd);
389 m_ctx->private = meta_line;
390
391 rq_len = rq_ppas * geo->sec_size;
392 data = ((void *)emeta->buf) + emeta->mem;
393
394 bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, GFP_KERNEL);
395 if (IS_ERR(bio)) {
396 ret = PTR_ERR(bio);
397 goto fail_free_rqd;
398 }
399 bio->bi_iter.bi_sector = 0; /* internal bio */
400 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
401 rqd->bio = bio;
402
403 ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
404 if (ret)
405 goto fail_free_bio;
406
407 for (i = 0; i < rqd->nr_ppas; ) {
408 spin_lock(&meta_line->lock);
409 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
410 spin_unlock(&meta_line->lock);
411 for (j = 0; j < rq_ppas; j++, i++, paddr++)
412 rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
413 }
414
415 rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])];
416 ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000));
417 if (ret) {
418 pr_err("pblk: lun semaphore timed out (%d)\n", ret);
419 goto fail_free_bio;
420 }
421
422 emeta->mem += rq_len;
423 if (emeta->mem >= lm->emeta_len[0]) {
424 spin_lock(&l_mg->close_lock);
425 list_del(&meta_line->list);
426 WARN(!bitmap_full(meta_line->map_bitmap, lm->sec_per_line),
427 "pblk: corrupt meta line %d\n", meta_line->id);
428 spin_unlock(&l_mg->close_lock);
429 }
430
431 ret = pblk_submit_io(pblk, rqd);
432 if (ret) {
433 pr_err("pblk: emeta I/O submission failed: %d\n", ret);
434 goto fail_rollback;
435 }
436
437 return NVM_IO_OK;
438
439fail_rollback:
440 spin_lock(&l_mg->close_lock);
441 pblk_dealloc_page(pblk, meta_line, rq_ppas);
442 list_add(&meta_line->list, &meta_line->list);
443 spin_unlock(&l_mg->close_lock);
444fail_free_bio:
Javier Gonzálezf680f192017-06-26 11:57:21 +0200445 if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META))
446 bio_put(bio);
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200447fail_free_rqd:
448 pblk_free_rqd(pblk, rqd, READ);
449 return ret;
450}
451
452static int pblk_sched_meta_io(struct pblk *pblk, struct ppa_addr *prev_list,
453 int prev_n)
454{
455 struct pblk_line_meta *lm = &pblk->lm;
456 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
457 struct pblk_line *meta_line;
458
459 spin_lock(&l_mg->close_lock);
460retry:
461 if (list_empty(&l_mg->emeta_list)) {
462 spin_unlock(&l_mg->close_lock);
463 return 0;
464 }
465 meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
466 if (bitmap_full(meta_line->map_bitmap, lm->sec_per_line))
467 goto retry;
468 spin_unlock(&l_mg->close_lock);
469
470 if (!pblk_valid_meta_ppa(pblk, meta_line, prev_list, prev_n))
471 return 0;
472
473 return pblk_submit_meta_io(pblk, meta_line);
474}
475
Javier Gonzálezd624f372017-06-26 11:57:15 +0200476static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
477{
478 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
479 struct ppa_addr erase_ppa;
480 int err;
481
482 ppa_set_empty(&erase_ppa);
483
484 /* Assign lbas to ppas and populate request structure */
485 err = pblk_setup_w_rq(pblk, rqd, c_ctx, &erase_ppa);
486 if (err) {
487 pr_err("pblk: could not setup write request: %d\n", err);
488 return NVM_IO_ERR;
489 }
490
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200491 if (likely(ppa_empty(erase_ppa))) {
492 /* Submit metadata write for previous data line */
493 err = pblk_sched_meta_io(pblk, rqd->ppa_list, rqd->nr_ppas);
494 if (err) {
495 pr_err("pblk: metadata I/O submission failed: %d", err);
496 return NVM_IO_ERR;
497 }
Javier Gonzálezd624f372017-06-26 11:57:15 +0200498
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200499 /* Submit data write for current data line */
500 err = pblk_submit_io(pblk, rqd);
501 if (err) {
502 pr_err("pblk: data I/O submission failed: %d\n", err);
503 return NVM_IO_ERR;
504 }
505 } else {
506 /* Submit data write for current data line */
507 err = pblk_submit_io(pblk, rqd);
508 if (err) {
509 pr_err("pblk: data I/O submission failed: %d\n", err);
510 return NVM_IO_ERR;
511 }
Javier Gonzálezd624f372017-06-26 11:57:15 +0200512
Javier Gonzálezdd2a4342017-06-26 11:57:17 +0200513 /* Submit available erase for next data line */
514 if (pblk_blk_erase_async(pblk, erase_ppa)) {
515 struct pblk_line *e_line = pblk_line_get_erase(pblk);
516 struct nvm_tgt_dev *dev = pblk->dev;
517 struct nvm_geo *geo = &dev->geo;
518 int bit;
519
520 atomic_inc(&e_line->left_eblks);
521 bit = pblk_ppa_to_pos(geo, erase_ppa);
522 WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
523 }
Javier Gonzálezd624f372017-06-26 11:57:15 +0200524 }
525
526 return NVM_IO_OK;
527}
528
529static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
530{
531 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
532 struct bio *bio = rqd->bio;
533
534 if (c_ctx->nr_padded)
535 pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
536}
537
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200538static int pblk_submit_write(struct pblk *pblk)
539{
540 struct bio *bio;
541 struct nvm_rq *rqd;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200542 unsigned int secs_avail, secs_to_sync, secs_to_com;
543 unsigned int secs_to_flush;
544 unsigned long pos;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200545
546 /* If there are no sectors in the cache, flushes (bios without data)
547 * will be cleared on the cache threads
548 */
549 secs_avail = pblk_rb_read_count(&pblk->rwb);
550 if (!secs_avail)
551 return 1;
552
553 secs_to_flush = pblk_rb_sync_point_count(&pblk->rwb);
554 if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
555 return 1;
556
557 rqd = pblk_alloc_rqd(pblk, WRITE);
558 if (IS_ERR(rqd)) {
559 pr_err("pblk: cannot allocate write req.\n");
560 return 1;
561 }
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200562
563 bio = bio_alloc(GFP_KERNEL, pblk->max_write_pgs);
564 if (!bio) {
565 pr_err("pblk: cannot allocate write bio\n");
566 goto fail_free_rqd;
567 }
568 bio->bi_iter.bi_sector = 0; /* internal bio */
569 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
570 rqd->bio = bio;
571
572 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush);
573 if (secs_to_sync > pblk->max_write_pgs) {
574 pr_err("pblk: bad buffer sync calculation\n");
575 goto fail_put_bio;
576 }
577
578 secs_to_com = (secs_to_sync > secs_avail) ? secs_avail : secs_to_sync;
579 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
580
Javier Gonzálezd624f372017-06-26 11:57:15 +0200581 if (pblk_rb_read_to_bio(&pblk->rwb, rqd, bio, pos, secs_to_sync,
582 secs_avail)) {
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200583 pr_err("pblk: corrupted write bio\n");
584 goto fail_put_bio;
585 }
586
Javier Gonzálezd624f372017-06-26 11:57:15 +0200587 if (pblk_submit_io_set(pblk, rqd))
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200588 goto fail_free_bio;
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200589
590#ifdef CONFIG_NVM_DEBUG
591 atomic_long_add(secs_to_sync, &pblk->sub_writes);
592#endif
593
594 return 0;
595
596fail_free_bio:
Javier Gonzálezd624f372017-06-26 11:57:15 +0200597 pblk_free_write_rqd(pblk, rqd);
Javier Gonzáleza4bd2172017-04-15 20:55:50 +0200598fail_put_bio:
599 bio_put(bio);
600fail_free_rqd:
601 pblk_free_rqd(pblk, rqd, WRITE);
602
603 return 1;
604}
605
606int pblk_write_ts(void *data)
607{
608 struct pblk *pblk = data;
609
610 while (!kthread_should_stop()) {
611 if (!pblk_submit_write(pblk))
612 continue;
613 set_current_state(TASK_INTERRUPTIBLE);
614 io_schedule();
615 }
616
617 return 0;
618}