blob: 736e66968bc627711f5cd7e34c0effe743c0c10c [file] [log] [blame]
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001/*
2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
15 */
16
17#include "rrpc.h"
18
19static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
20static DECLARE_RWSEM(rrpc_lock);
21
22static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
23 struct nvm_rq *rqd, unsigned long flags);
24
25#define rrpc_for_each_lun(rrpc, rlun, i) \
26 for ((i) = 0, rlun = &(rrpc)->luns[0]; \
27 (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
28
29static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
30{
31 struct rrpc_block *rblk = a->rblk;
32 unsigned int pg_offset;
33
34 lockdep_assert_held(&rrpc->rev_lock);
35
36 if (a->addr == ADDR_EMPTY || !rblk)
37 return;
38
39 spin_lock(&rblk->lock);
40
Javier Gonzálezafb18e02016-03-03 14:47:53 -070041 div_u64_rem(a->addr, rrpc->dev->sec_per_blk, &pg_offset);
Matias Bjørlingae1519e2015-10-28 19:54:57 +010042 WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
43 rblk->nr_invalid_pages++;
44
45 spin_unlock(&rblk->lock);
46
47 rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
48}
49
50static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
51 unsigned len)
52{
53 sector_t i;
54
55 spin_lock(&rrpc->rev_lock);
56 for (i = slba; i < slba + len; i++) {
57 struct rrpc_addr *gp = &rrpc->trans_map[i];
58
59 rrpc_page_invalidate(rrpc, gp);
60 gp->rblk = NULL;
61 }
62 spin_unlock(&rrpc->rev_lock);
63}
64
65static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
66 sector_t laddr, unsigned int pages)
67{
68 struct nvm_rq *rqd;
69 struct rrpc_inflight_rq *inf;
70
71 rqd = mempool_alloc(rrpc->rq_pool, GFP_ATOMIC);
72 if (!rqd)
73 return ERR_PTR(-ENOMEM);
74
75 inf = rrpc_get_inflight_rq(rqd);
76 if (rrpc_lock_laddr(rrpc, laddr, pages, inf)) {
77 mempool_free(rqd, rrpc->rq_pool);
78 return NULL;
79 }
80
81 return rqd;
82}
83
84static void rrpc_inflight_laddr_release(struct rrpc *rrpc, struct nvm_rq *rqd)
85{
86 struct rrpc_inflight_rq *inf = rrpc_get_inflight_rq(rqd);
87
88 rrpc_unlock_laddr(rrpc, inf);
89
90 mempool_free(rqd, rrpc->rq_pool);
91}
92
93static void rrpc_discard(struct rrpc *rrpc, struct bio *bio)
94{
95 sector_t slba = bio->bi_iter.bi_sector / NR_PHY_IN_LOG;
96 sector_t len = bio->bi_iter.bi_size / RRPC_EXPOSED_PAGE_SIZE;
97 struct nvm_rq *rqd;
98
Wenwei Tao0de24152016-07-07 09:54:07 +020099 while (1) {
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100100 rqd = rrpc_inflight_laddr_acquire(rrpc, slba, len);
Wenwei Tao0de24152016-07-07 09:54:07 +0200101 if (rqd)
102 break;
103
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100104 schedule();
Wenwei Tao0de24152016-07-07 09:54:07 +0200105 }
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100106
107 if (IS_ERR(rqd)) {
108 pr_err("rrpc: unable to acquire inflight IO\n");
109 bio_io_error(bio);
110 return;
111 }
112
113 rrpc_invalidate_range(rrpc, slba, len);
114 rrpc_inflight_laddr_release(rrpc, rqd);
115}
116
117static int block_is_full(struct rrpc *rrpc, struct rrpc_block *rblk)
118{
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700119 return (rblk->next_page == rrpc->dev->sec_per_blk);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100120}
121
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700122/* Calculate relative addr for the given block, considering instantiated LUNs */
123static u64 block_to_rel_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
124{
125 struct nvm_block *blk = rblk->parent;
126 int lun_blk = blk->id % (rrpc->dev->blks_per_lun * rrpc->nr_luns);
127
128 return lun_blk * rrpc->dev->sec_per_blk;
129}
130
131/* Calculate global addr for the given block */
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100132static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100133{
134 struct nvm_block *blk = rblk->parent;
135
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700136 return blk->id * rrpc->dev->sec_per_blk;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100137}
138
Matias Bjørling7386af22015-11-16 15:34:44 +0100139static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev,
140 struct ppa_addr r)
141{
142 struct ppa_addr l;
143 int secs, pgs, blks, luns;
144 sector_t ppa = r.ppa;
145
146 l.ppa = 0;
147
148 div_u64_rem(ppa, dev->sec_per_pg, &secs);
149 l.g.sec = secs;
150
151 sector_div(ppa, dev->sec_per_pg);
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700152 div_u64_rem(ppa, dev->pgs_per_blk, &pgs);
Matias Bjørling7386af22015-11-16 15:34:44 +0100153 l.g.pg = pgs;
154
155 sector_div(ppa, dev->pgs_per_blk);
156 div_u64_rem(ppa, dev->blks_per_lun, &blks);
157 l.g.blk = blks;
158
159 sector_div(ppa, dev->blks_per_lun);
160 div_u64_rem(ppa, dev->luns_per_chnl, &luns);
161 l.g.lun = luns;
162
163 sector_div(ppa, dev->luns_per_chnl);
164 l.g.ch = ppa;
165
166 return l;
167}
168
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100169static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100170{
171 struct ppa_addr paddr;
172
173 paddr.ppa = addr;
Matias Bjørling7386af22015-11-16 15:34:44 +0100174 return linear_to_generic_addr(dev, paddr);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100175}
176
177/* requires lun->lock taken */
178static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk)
179{
180 struct rrpc *rrpc = rlun->rrpc;
181
182 BUG_ON(!rblk);
183
184 if (rlun->cur) {
185 spin_lock(&rlun->cur->lock);
186 WARN_ON(!block_is_full(rrpc, rlun->cur));
187 spin_unlock(&rlun->cur->lock);
188 }
189 rlun->cur = rblk;
190}
191
192static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
193 unsigned long flags)
194{
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100195 struct nvm_lun *lun = rlun->parent;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100196 struct nvm_block *blk;
197 struct rrpc_block *rblk;
198
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100199 spin_lock(&lun->lock);
200 blk = nvm_get_blk_unlocked(rrpc->dev, rlun->parent, flags);
201 if (!blk) {
202 pr_err("nvm: rrpc: cannot get new block from media manager\n");
203 spin_unlock(&lun->lock);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100204 return NULL;
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100205 }
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100206
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700207 rblk = rrpc_get_rblk(rlun, blk->id);
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100208 list_add_tail(&rblk->list, &rlun->open_list);
209 spin_unlock(&lun->lock);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100210
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100211 blk->priv = rblk;
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700212 bitmap_zero(rblk->invalid_pages, rrpc->dev->sec_per_blk);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100213 rblk->next_page = 0;
214 rblk->nr_invalid_pages = 0;
215 atomic_set(&rblk->data_cmnt_size, 0);
216
217 return rblk;
218}
219
220static void rrpc_put_blk(struct rrpc *rrpc, struct rrpc_block *rblk)
221{
Javier Gonzálezff0e4982016-01-12 07:49:33 +0100222 struct rrpc_lun *rlun = rblk->rlun;
223 struct nvm_lun *lun = rlun->parent;
224
225 spin_lock(&lun->lock);
226 nvm_put_blk_unlocked(rrpc->dev, rblk->parent);
227 list_del(&rblk->list);
228 spin_unlock(&lun->lock);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100229}
230
Wenwei Taod3d1a432015-12-06 11:25:44 +0100231static void rrpc_put_blks(struct rrpc *rrpc)
232{
233 struct rrpc_lun *rlun;
234 int i;
235
236 for (i = 0; i < rrpc->nr_luns; i++) {
237 rlun = &rrpc->luns[i];
238 if (rlun->cur)
239 rrpc_put_blk(rrpc, rlun->cur);
240 if (rlun->gc_cur)
241 rrpc_put_blk(rrpc, rlun->gc_cur);
242 }
243}
244
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100245static struct rrpc_lun *get_next_lun(struct rrpc *rrpc)
246{
247 int next = atomic_inc_return(&rrpc->next_lun);
248
249 return &rrpc->luns[next % rrpc->nr_luns];
250}
251
252static void rrpc_gc_kick(struct rrpc *rrpc)
253{
254 struct rrpc_lun *rlun;
255 unsigned int i;
256
257 for (i = 0; i < rrpc->nr_luns; i++) {
258 rlun = &rrpc->luns[i];
259 queue_work(rrpc->krqd_wq, &rlun->ws_gc);
260 }
261}
262
263/*
264 * timed GC every interval.
265 */
266static void rrpc_gc_timer(unsigned long data)
267{
268 struct rrpc *rrpc = (struct rrpc *)data;
269
270 rrpc_gc_kick(rrpc);
271 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
272}
273
274static void rrpc_end_sync_bio(struct bio *bio)
275{
276 struct completion *waiting = bio->bi_private;
277
278 if (bio->bi_error)
279 pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
280
281 complete(waiting);
282}
283
284/*
285 * rrpc_move_valid_pages -- migrate live data off the block
286 * @rrpc: the 'rrpc' structure
287 * @block: the block from which to migrate live pages
288 *
289 * Description:
290 * GC algorithms may call this function to migrate remaining live
291 * pages off the block prior to erasing it. This function blocks
292 * further execution until the operation is complete.
293 */
294static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
295{
296 struct request_queue *q = rrpc->dev->q;
297 struct rrpc_rev_addr *rev;
298 struct nvm_rq *rqd;
299 struct bio *bio;
300 struct page *page;
301 int slot;
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700302 int nr_sec_per_blk = rrpc->dev->sec_per_blk;
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100303 u64 phys_addr;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100304 DECLARE_COMPLETION_ONSTACK(wait);
305
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700306 if (bitmap_full(rblk->invalid_pages, nr_sec_per_blk))
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100307 return 0;
308
309 bio = bio_alloc(GFP_NOIO, 1);
310 if (!bio) {
311 pr_err("nvm: could not alloc bio to gc\n");
312 return -ENOMEM;
313 }
314
315 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
Wenwei Tao16c6d042016-02-04 15:13:23 +0100316 if (!page) {
317 bio_put(bio);
Javier Gonzalez3bfbc6a2016-01-12 07:49:17 +0100318 return -ENOMEM;
Wenwei Tao16c6d042016-02-04 15:13:23 +0100319 }
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100320
321 while ((slot = find_first_zero_bit(rblk->invalid_pages,
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700322 nr_sec_per_blk)) < nr_sec_per_blk) {
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100323
324 /* Lock laddr */
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700325 phys_addr = rblk->parent->id * nr_sec_per_blk + slot;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100326
327try:
328 spin_lock(&rrpc->rev_lock);
329 /* Get logical address from physical to logical table */
330 rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
331 /* already updated by previous regular write */
332 if (rev->addr == ADDR_EMPTY) {
333 spin_unlock(&rrpc->rev_lock);
334 continue;
335 }
336
337 rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
338 if (IS_ERR_OR_NULL(rqd)) {
339 spin_unlock(&rrpc->rev_lock);
340 schedule();
341 goto try;
342 }
343
344 spin_unlock(&rrpc->rev_lock);
345
346 /* Perform read to do GC */
347 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
Mike Christie95fe6c12016-06-05 14:31:48 -0500348 bio_set_op_attrs(bio, REQ_OP_READ, 0);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100349 bio->bi_private = &wait;
350 bio->bi_end_io = rrpc_end_sync_bio;
351
352 /* TODO: may fail when EXP_PG_SIZE > PAGE_SIZE */
353 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
354
355 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
356 pr_err("rrpc: gc read failed.\n");
357 rrpc_inflight_laddr_release(rrpc, rqd);
358 goto finished;
359 }
360 wait_for_completion_io(&wait);
Wenwei Tao2b11c1b2016-01-12 07:49:23 +0100361 if (bio->bi_error) {
362 rrpc_inflight_laddr_release(rrpc, rqd);
363 goto finished;
364 }
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100365
366 bio_reset(bio);
367 reinit_completion(&wait);
368
369 bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
Mike Christie95fe6c12016-06-05 14:31:48 -0500370 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100371 bio->bi_private = &wait;
372 bio->bi_end_io = rrpc_end_sync_bio;
373
374 bio_add_pc_page(q, bio, page, RRPC_EXPOSED_PAGE_SIZE, 0);
375
376 /* turn the command around and write the data back to a new
377 * address
378 */
379 if (rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_GC)) {
380 pr_err("rrpc: gc write failed.\n");
381 rrpc_inflight_laddr_release(rrpc, rqd);
382 goto finished;
383 }
384 wait_for_completion_io(&wait);
385
386 rrpc_inflight_laddr_release(rrpc, rqd);
Wenwei Tao2b11c1b2016-01-12 07:49:23 +0100387 if (bio->bi_error)
388 goto finished;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100389
390 bio_reset(bio);
391 }
392
393finished:
394 mempool_free(page, rrpc->page_pool);
395 bio_put(bio);
396
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700397 if (!bitmap_full(rblk->invalid_pages, nr_sec_per_blk)) {
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100398 pr_err("nvm: failed to garbage collect block\n");
399 return -EIO;
400 }
401
402 return 0;
403}
404
405static void rrpc_block_gc(struct work_struct *work)
406{
407 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
408 ws_gc);
409 struct rrpc *rrpc = gcb->rrpc;
410 struct rrpc_block *rblk = gcb->rblk;
Javier Gonzálezcca87bc2016-05-06 20:03:15 +0200411 struct rrpc_lun *rlun = rblk->rlun;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100412 struct nvm_dev *dev = rrpc->dev;
413
Wenwei Taod0ca7982016-01-12 07:49:24 +0100414 mempool_free(gcb, rrpc->gcb_pool);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100415 pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
416
417 if (rrpc_move_valid_pages(rrpc, rblk))
Wenwei Taod0ca7982016-01-12 07:49:24 +0100418 goto put_back;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100419
Wenwei Taod0ca7982016-01-12 07:49:24 +0100420 if (nvm_erase_blk(dev, rblk->parent))
421 goto put_back;
422
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100423 rrpc_put_blk(rrpc, rblk);
Wenwei Taod0ca7982016-01-12 07:49:24 +0100424
425 return;
426
427put_back:
428 spin_lock(&rlun->lock);
429 list_add_tail(&rblk->prio, &rlun->prio_list);
430 spin_unlock(&rlun->lock);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100431}
432
433/* the block with highest number of invalid pages, will be in the beginning
434 * of the list
435 */
436static struct rrpc_block *rblock_max_invalid(struct rrpc_block *ra,
437 struct rrpc_block *rb)
438{
439 if (ra->nr_invalid_pages == rb->nr_invalid_pages)
440 return ra;
441
442 return (ra->nr_invalid_pages < rb->nr_invalid_pages) ? rb : ra;
443}
444
445/* linearly find the block with highest number of invalid pages
446 * requires lun->lock
447 */
448static struct rrpc_block *block_prio_find_max(struct rrpc_lun *rlun)
449{
450 struct list_head *prio_list = &rlun->prio_list;
451 struct rrpc_block *rblock, *max;
452
453 BUG_ON(list_empty(prio_list));
454
455 max = list_first_entry(prio_list, struct rrpc_block, prio);
456 list_for_each_entry(rblock, prio_list, prio)
457 max = rblock_max_invalid(max, rblock);
458
459 return max;
460}
461
462static void rrpc_lun_gc(struct work_struct *work)
463{
464 struct rrpc_lun *rlun = container_of(work, struct rrpc_lun, ws_gc);
465 struct rrpc *rrpc = rlun->rrpc;
466 struct nvm_lun *lun = rlun->parent;
467 struct rrpc_block_gc *gcb;
468 unsigned int nr_blocks_need;
469
470 nr_blocks_need = rrpc->dev->blks_per_lun / GC_LIMIT_INVERSE;
471
472 if (nr_blocks_need < rrpc->nr_luns)
473 nr_blocks_need = rrpc->nr_luns;
474
Wenwei Taob2629242016-01-12 07:49:25 +0100475 spin_lock(&rlun->lock);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100476 while (nr_blocks_need > lun->nr_free_blocks &&
477 !list_empty(&rlun->prio_list)) {
478 struct rrpc_block *rblock = block_prio_find_max(rlun);
479 struct nvm_block *block = rblock->parent;
480
481 if (!rblock->nr_invalid_pages)
482 break;
483
Wenwei Taob2629242016-01-12 07:49:25 +0100484 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
485 if (!gcb)
486 break;
487
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100488 list_del_init(&rblock->prio);
489
490 BUG_ON(!block_is_full(rrpc, rblock));
491
492 pr_debug("rrpc: selected block '%lu' for GC\n", block->id);
493
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100494 gcb->rrpc = rrpc;
495 gcb->rblk = rblock;
496 INIT_WORK(&gcb->ws_gc, rrpc_block_gc);
497
498 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
499
500 nr_blocks_need--;
501 }
Wenwei Taob2629242016-01-12 07:49:25 +0100502 spin_unlock(&rlun->lock);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100503
504 /* TODO: Hint that request queue can be started again */
505}
506
507static void rrpc_gc_queue(struct work_struct *work)
508{
509 struct rrpc_block_gc *gcb = container_of(work, struct rrpc_block_gc,
510 ws_gc);
511 struct rrpc *rrpc = gcb->rrpc;
512 struct rrpc_block *rblk = gcb->rblk;
Javier Gonzálezcca87bc2016-05-06 20:03:15 +0200513 struct rrpc_lun *rlun = rblk->rlun;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100514 struct nvm_lun *lun = rblk->parent->lun;
Javier González6adb03d2016-02-20 08:52:40 +0100515 struct nvm_block *blk = rblk->parent;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100516
517 spin_lock(&rlun->lock);
518 list_add_tail(&rblk->prio, &rlun->prio_list);
519 spin_unlock(&rlun->lock);
520
Javier González6adb03d2016-02-20 08:52:40 +0100521 spin_lock(&lun->lock);
522 lun->nr_open_blocks--;
523 lun->nr_closed_blocks++;
524 blk->state &= ~NVM_BLK_ST_OPEN;
525 blk->state |= NVM_BLK_ST_CLOSED;
526 list_move_tail(&rblk->list, &rlun->closed_list);
527 spin_unlock(&lun->lock);
528
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100529 mempool_free(gcb, rrpc->gcb_pool);
530 pr_debug("nvm: block '%lu' is full, allow GC (sched)\n",
531 rblk->parent->id);
532}
533
534static const struct block_device_operations rrpc_fops = {
535 .owner = THIS_MODULE,
536};
537
538static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
539{
540 unsigned int i;
541 struct rrpc_lun *rlun, *max_free;
542
543 if (!is_gc)
544 return get_next_lun(rrpc);
545
546 /* during GC, we don't care about RR, instead we want to make
547 * sure that we maintain evenness between the block luns.
548 */
549 max_free = &rrpc->luns[0];
550 /* prevent GC-ing lun from devouring pages of a lun with
551 * little free blocks. We don't take the lock as we only need an
552 * estimate.
553 */
554 rrpc_for_each_lun(rrpc, rlun, i) {
555 if (rlun->parent->nr_free_blocks >
556 max_free->parent->nr_free_blocks)
557 max_free = rlun;
558 }
559
560 return max_free;
561}
562
563static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100564 struct rrpc_block *rblk, u64 paddr)
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100565{
566 struct rrpc_addr *gp;
567 struct rrpc_rev_addr *rev;
568
Matias Bjørling4ece44a2016-02-20 08:52:41 +0100569 BUG_ON(laddr >= rrpc->nr_sects);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100570
571 gp = &rrpc->trans_map[laddr];
572 spin_lock(&rrpc->rev_lock);
573 if (gp->rblk)
574 rrpc_page_invalidate(rrpc, gp);
575
576 gp->addr = paddr;
577 gp->rblk = rblk;
578
579 rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
580 rev->addr = laddr;
581 spin_unlock(&rrpc->rev_lock);
582
583 return gp;
584}
585
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100586static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100587{
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100588 u64 addr = ADDR_EMPTY;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100589
590 spin_lock(&rblk->lock);
591 if (block_is_full(rrpc, rblk))
592 goto out;
593
594 addr = block_to_addr(rrpc, rblk) + rblk->next_page;
595
596 rblk->next_page++;
597out:
598 spin_unlock(&rblk->lock);
599 return addr;
600}
601
602/* Simple round-robin Logical to physical address translation.
603 *
604 * Retrieve the mapping using the active append point. Then update the ap for
605 * the next write to the disk.
606 *
607 * Returns rrpc_addr with the physical address and block. Remember to return to
608 * rrpc->addr_cache when request is finished.
609 */
610static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
611 int is_gc)
612{
613 struct rrpc_lun *rlun;
614 struct rrpc_block *rblk;
615 struct nvm_lun *lun;
Matias Bjørlingb7ceb7d2015-11-02 17:12:27 +0100616 u64 paddr;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100617
618 rlun = rrpc_get_lun_rr(rrpc, is_gc);
619 lun = rlun->parent;
620
621 if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
622 return NULL;
623
624 spin_lock(&rlun->lock);
625
626 rblk = rlun->cur;
627retry:
628 paddr = rrpc_alloc_addr(rrpc, rblk);
629
630 if (paddr == ADDR_EMPTY) {
631 rblk = rrpc_get_blk(rrpc, rlun, 0);
632 if (rblk) {
633 rrpc_set_lun_cur(rlun, rblk);
634 goto retry;
635 }
636
637 if (is_gc) {
638 /* retry from emergency gc block */
639 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
640 if (paddr == ADDR_EMPTY) {
641 rblk = rrpc_get_blk(rrpc, rlun, 1);
642 if (!rblk) {
643 pr_err("rrpc: no more blocks");
644 goto err;
645 }
646
647 rlun->gc_cur = rblk;
648 paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur);
649 }
650 rblk = rlun->gc_cur;
651 }
652 }
653
654 spin_unlock(&rlun->lock);
655 return rrpc_update_map(rrpc, laddr, rblk, paddr);
656err:
657 spin_unlock(&rlun->lock);
658 return NULL;
659}
660
661static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
662{
663 struct rrpc_block_gc *gcb;
664
665 gcb = mempool_alloc(rrpc->gcb_pool, GFP_ATOMIC);
666 if (!gcb) {
667 pr_err("rrpc: unable to queue block for gc.");
668 return;
669 }
670
671 gcb->rrpc = rrpc;
672 gcb->rblk = rblk;
673
674 INIT_WORK(&gcb->ws_gc, rrpc_gc_queue);
675 queue_work(rrpc->kgc_wq, &gcb->ws_gc);
676}
677
678static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
679 sector_t laddr, uint8_t npages)
680{
681 struct rrpc_addr *p;
682 struct rrpc_block *rblk;
683 struct nvm_lun *lun;
684 int cmnt_size, i;
685
686 for (i = 0; i < npages; i++) {
687 p = &rrpc->trans_map[laddr + i];
688 rblk = p->rblk;
689 lun = rblk->parent->lun;
690
691 cmnt_size = atomic_inc_return(&rblk->data_cmnt_size);
Javier Gonzálezafb18e02016-03-03 14:47:53 -0700692 if (unlikely(cmnt_size == rrpc->dev->sec_per_blk))
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100693 rrpc_run_gc(rrpc, rblk);
694 }
695}
696
Matias Bjørling72d256e2016-01-12 07:49:29 +0100697static void rrpc_end_io(struct nvm_rq *rqd)
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100698{
699 struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
700 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
Javier González6d5be952016-05-06 20:03:20 +0200701 uint8_t npages = rqd->nr_ppas;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100702 sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
703
704 if (bio_data_dir(rqd->bio) == WRITE)
705 rrpc_end_io_write(rrpc, rrqd, laddr, npages);
706
Wenwei Tao3cd485b12016-01-12 07:49:15 +0100707 bio_put(rqd->bio);
708
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100709 if (rrqd->flags & NVM_IOTYPE_GC)
Matias Bjørling912761622016-01-12 07:49:21 +0100710 return;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100711
712 rrpc_unlock_rq(rrpc, rqd);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100713
714 if (npages > 1)
715 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100716
717 mempool_free(rqd, rrpc->rq_pool);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100718}
719
720static int rrpc_read_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
721 struct nvm_rq *rqd, unsigned long flags, int npages)
722{
723 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
724 struct rrpc_addr *gp;
725 sector_t laddr = rrpc_get_laddr(bio);
726 int is_gc = flags & NVM_IOTYPE_GC;
727 int i;
728
729 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
730 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
731 return NVM_IO_REQUEUE;
732 }
733
734 for (i = 0; i < npages; i++) {
735 /* We assume that mapping occurs at 4KB granularity */
Matias Bjørling4ece44a2016-02-20 08:52:41 +0100736 BUG_ON(!(laddr + i >= 0 && laddr + i < rrpc->nr_sects));
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100737 gp = &rrpc->trans_map[laddr + i];
738
739 if (gp->rblk) {
740 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
741 gp->addr);
742 } else {
743 BUG_ON(is_gc);
744 rrpc_unlock_laddr(rrpc, r);
745 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
746 rqd->dma_ppa_list);
747 return NVM_IO_DONE;
748 }
749 }
750
751 rqd->opcode = NVM_OP_HBREAD;
752
753 return NVM_IO_OK;
754}
755
756static int rrpc_read_rq(struct rrpc *rrpc, struct bio *bio, struct nvm_rq *rqd,
757 unsigned long flags)
758{
759 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
760 int is_gc = flags & NVM_IOTYPE_GC;
761 sector_t laddr = rrpc_get_laddr(bio);
762 struct rrpc_addr *gp;
763
764 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
765 return NVM_IO_REQUEUE;
766
Matias Bjørling4ece44a2016-02-20 08:52:41 +0100767 BUG_ON(!(laddr >= 0 && laddr < rrpc->nr_sects));
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100768 gp = &rrpc->trans_map[laddr];
769
770 if (gp->rblk) {
771 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, gp->addr);
772 } else {
773 BUG_ON(is_gc);
774 rrpc_unlock_rq(rrpc, rqd);
775 return NVM_IO_DONE;
776 }
777
778 rqd->opcode = NVM_OP_HBREAD;
779 rrqd->addr = gp;
780
781 return NVM_IO_OK;
782}
783
784static int rrpc_write_ppalist_rq(struct rrpc *rrpc, struct bio *bio,
785 struct nvm_rq *rqd, unsigned long flags, int npages)
786{
787 struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
788 struct rrpc_addr *p;
789 sector_t laddr = rrpc_get_laddr(bio);
790 int is_gc = flags & NVM_IOTYPE_GC;
791 int i;
792
793 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd)) {
794 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
795 return NVM_IO_REQUEUE;
796 }
797
798 for (i = 0; i < npages; i++) {
799 /* We assume that mapping occurs at 4KB granularity */
800 p = rrpc_map_page(rrpc, laddr + i, is_gc);
801 if (!p) {
802 BUG_ON(is_gc);
803 rrpc_unlock_laddr(rrpc, r);
804 nvm_dev_dma_free(rrpc->dev, rqd->ppa_list,
805 rqd->dma_ppa_list);
806 rrpc_gc_kick(rrpc);
807 return NVM_IO_REQUEUE;
808 }
809
810 rqd->ppa_list[i] = rrpc_ppa_to_gaddr(rrpc->dev,
811 p->addr);
812 }
813
814 rqd->opcode = NVM_OP_HBWRITE;
815
816 return NVM_IO_OK;
817}
818
819static int rrpc_write_rq(struct rrpc *rrpc, struct bio *bio,
820 struct nvm_rq *rqd, unsigned long flags)
821{
822 struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
823 struct rrpc_addr *p;
824 int is_gc = flags & NVM_IOTYPE_GC;
825 sector_t laddr = rrpc_get_laddr(bio);
826
827 if (!is_gc && rrpc_lock_rq(rrpc, bio, rqd))
828 return NVM_IO_REQUEUE;
829
830 p = rrpc_map_page(rrpc, laddr, is_gc);
831 if (!p) {
832 BUG_ON(is_gc);
833 rrpc_unlock_rq(rrpc, rqd);
834 rrpc_gc_kick(rrpc);
835 return NVM_IO_REQUEUE;
836 }
837
838 rqd->ppa_addr = rrpc_ppa_to_gaddr(rrpc->dev, p->addr);
839 rqd->opcode = NVM_OP_HBWRITE;
840 rrqd->addr = p;
841
842 return NVM_IO_OK;
843}
844
845static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
846 struct nvm_rq *rqd, unsigned long flags, uint8_t npages)
847{
848 if (npages > 1) {
849 rqd->ppa_list = nvm_dev_dma_alloc(rrpc->dev, GFP_KERNEL,
850 &rqd->dma_ppa_list);
851 if (!rqd->ppa_list) {
852 pr_err("rrpc: not able to allocate ppa list\n");
853 return NVM_IO_ERR;
854 }
855
856 if (bio_rw(bio) == WRITE)
857 return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
858 npages);
859
860 return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
861 }
862
863 if (bio_rw(bio) == WRITE)
864 return rrpc_write_rq(rrpc, bio, rqd, flags);
865
866 return rrpc_read_rq(rrpc, bio, rqd, flags);
867}
868
869static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
870 struct nvm_rq *rqd, unsigned long flags)
871{
872 int err;
873 struct rrpc_rq *rrq = nvm_rq_to_pdu(rqd);
874 uint8_t nr_pages = rrpc_get_pages(bio);
875 int bio_size = bio_sectors(bio) << 9;
876
877 if (bio_size < rrpc->dev->sec_size)
878 return NVM_IO_ERR;
879 else if (bio_size > rrpc->dev->max_rq_size)
880 return NVM_IO_ERR;
881
882 err = rrpc_setup_rq(rrpc, bio, rqd, flags, nr_pages);
883 if (err)
884 return err;
885
886 bio_get(bio);
887 rqd->bio = bio;
888 rqd->ins = &rrpc->instance;
Javier González6d5be952016-05-06 20:03:20 +0200889 rqd->nr_ppas = nr_pages;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100890 rrq->flags = flags;
891
892 err = nvm_submit_io(rrpc->dev, rqd);
893 if (err) {
894 pr_err("rrpc: I/O submission failed: %d\n", err);
Wenwei Tao3cd485b12016-01-12 07:49:15 +0100895 bio_put(bio);
Wenwei Taoc27278b2016-01-12 07:49:18 +0100896 if (!(flags & NVM_IOTYPE_GC)) {
897 rrpc_unlock_rq(rrpc, rqd);
Javier González6d5be952016-05-06 20:03:20 +0200898 if (rqd->nr_ppas > 1)
Wenwei Taoc27278b2016-01-12 07:49:18 +0100899 nvm_dev_dma_free(rrpc->dev,
900 rqd->ppa_list, rqd->dma_ppa_list);
901 }
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100902 return NVM_IO_ERR;
903 }
904
905 return NVM_IO_OK;
906}
907
Jens Axboedece1632015-11-05 10:41:16 -0700908static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100909{
910 struct rrpc *rrpc = q->queuedata;
911 struct nvm_rq *rqd;
912 int err;
913
Mike Christie95fe6c12016-06-05 14:31:48 -0500914 if (bio_op(bio) == REQ_OP_DISCARD) {
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100915 rrpc_discard(rrpc, bio);
Jens Axboedece1632015-11-05 10:41:16 -0700916 return BLK_QC_T_NONE;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100917 }
918
919 rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
920 if (!rqd) {
921 pr_err_ratelimited("rrpc: not able to queue bio.");
922 bio_io_error(bio);
Jens Axboedece1632015-11-05 10:41:16 -0700923 return BLK_QC_T_NONE;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100924 }
925 memset(rqd, 0, sizeof(struct nvm_rq));
926
927 err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
928 switch (err) {
929 case NVM_IO_OK:
Jens Axboedece1632015-11-05 10:41:16 -0700930 return BLK_QC_T_NONE;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100931 case NVM_IO_ERR:
932 bio_io_error(bio);
933 break;
934 case NVM_IO_DONE:
935 bio_endio(bio);
936 break;
937 case NVM_IO_REQUEUE:
938 spin_lock(&rrpc->bio_lock);
939 bio_list_add(&rrpc->requeue_bios, bio);
940 spin_unlock(&rrpc->bio_lock);
941 queue_work(rrpc->kgc_wq, &rrpc->ws_requeue);
942 break;
943 }
944
945 mempool_free(rqd, rrpc->rq_pool);
Jens Axboedece1632015-11-05 10:41:16 -0700946 return BLK_QC_T_NONE;
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100947}
948
949static void rrpc_requeue(struct work_struct *work)
950{
951 struct rrpc *rrpc = container_of(work, struct rrpc, ws_requeue);
952 struct bio_list bios;
953 struct bio *bio;
954
955 bio_list_init(&bios);
956
957 spin_lock(&rrpc->bio_lock);
958 bio_list_merge(&bios, &rrpc->requeue_bios);
959 bio_list_init(&rrpc->requeue_bios);
960 spin_unlock(&rrpc->bio_lock);
961
962 while ((bio = bio_list_pop(&bios)))
963 rrpc_make_rq(rrpc->disk->queue, bio);
964}
965
966static void rrpc_gc_free(struct rrpc *rrpc)
967{
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100968 if (rrpc->krqd_wq)
969 destroy_workqueue(rrpc->krqd_wq);
970
971 if (rrpc->kgc_wq)
972 destroy_workqueue(rrpc->kgc_wq);
Matias Bjørlingae1519e2015-10-28 19:54:57 +0100973}
974
975static int rrpc_gc_init(struct rrpc *rrpc)
976{
977 rrpc->krqd_wq = alloc_workqueue("rrpc-lun", WQ_MEM_RECLAIM|WQ_UNBOUND,
978 rrpc->nr_luns);
979 if (!rrpc->krqd_wq)
980 return -ENOMEM;
981
982 rrpc->kgc_wq = alloc_workqueue("rrpc-bg", WQ_MEM_RECLAIM, 1);
983 if (!rrpc->kgc_wq)
984 return -ENOMEM;
985
986 setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
987
988 return 0;
989}
990
991static void rrpc_map_free(struct rrpc *rrpc)
992{
993 vfree(rrpc->rev_trans_map);
994 vfree(rrpc->trans_map);
995}
996
997static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
998{
999 struct rrpc *rrpc = (struct rrpc *)private;
1000 struct nvm_dev *dev = rrpc->dev;
1001 struct rrpc_addr *addr = rrpc->trans_map + slba;
1002 struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001003 u64 elba = slba + nlb;
1004 u64 i;
1005
Matias Bjørling4ece44a2016-02-20 08:52:41 +01001006 if (unlikely(elba > dev->total_secs)) {
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001007 pr_err("nvm: L2P data from device is out of bounds!\n");
1008 return -EINVAL;
1009 }
1010
1011 for (i = 0; i < nlb; i++) {
1012 u64 pba = le64_to_cpu(entries[i]);
Javier Gonzálezafb18e02016-03-03 14:47:53 -07001013 unsigned int mod;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001014 /* LNVM treats address-spaces as silos, LBA and PBA are
1015 * equally large and zero-indexed.
1016 */
Matias Bjørling4ece44a2016-02-20 08:52:41 +01001017 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001018 pr_err("nvm: L2P data entry is out of bounds!\n");
1019 return -EINVAL;
1020 }
1021
1022 /* Address zero is a special one. The first page on a disk is
1023 * protected. As it often holds internal device boot
1024 * information.
1025 */
1026 if (!pba)
1027 continue;
1028
Javier Gonzálezafb18e02016-03-03 14:47:53 -07001029 div_u64_rem(pba, rrpc->nr_sects, &mod);
1030
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001031 addr[i].addr = pba;
Javier Gonzálezafb18e02016-03-03 14:47:53 -07001032 raddr[mod].addr = slba + i;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001033 }
1034
1035 return 0;
1036}
1037
1038static int rrpc_map_init(struct rrpc *rrpc)
1039{
1040 struct nvm_dev *dev = rrpc->dev;
1041 sector_t i;
1042 int ret;
1043
Matias Bjørling4ece44a2016-02-20 08:52:41 +01001044 rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001045 if (!rrpc->trans_map)
1046 return -ENOMEM;
1047
1048 rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
Matias Bjørling4ece44a2016-02-20 08:52:41 +01001049 * rrpc->nr_sects);
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001050 if (!rrpc->rev_trans_map)
1051 return -ENOMEM;
1052
Matias Bjørling4ece44a2016-02-20 08:52:41 +01001053 for (i = 0; i < rrpc->nr_sects; i++) {
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001054 struct rrpc_addr *p = &rrpc->trans_map[i];
1055 struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
1056
1057 p->addr = ADDR_EMPTY;
1058 r->addr = ADDR_EMPTY;
1059 }
1060
1061 if (!dev->ops->get_l2p_tbl)
1062 return 0;
1063
1064 /* Bring up the mapping table from device */
Wenwei Tao909049a72016-05-06 20:03:01 +02001065 ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
1066 rrpc_l2p_update, rrpc);
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001067 if (ret) {
1068 pr_err("nvm: rrpc: could not read L2P table.\n");
1069 return -EINVAL;
1070 }
1071
1072 return 0;
1073}
1074
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001075/* Minimum pages needed within a lun */
1076#define PAGE_POOL_SIZE 16
1077#define ADDR_POOL_SIZE 64
1078
1079static int rrpc_core_init(struct rrpc *rrpc)
1080{
1081 down_write(&rrpc_lock);
1082 if (!rrpc_gcb_cache) {
1083 rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
1084 sizeof(struct rrpc_block_gc), 0, 0, NULL);
1085 if (!rrpc_gcb_cache) {
1086 up_write(&rrpc_lock);
1087 return -ENOMEM;
1088 }
1089
1090 rrpc_rq_cache = kmem_cache_create("rrpc_rq",
1091 sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
1092 0, 0, NULL);
1093 if (!rrpc_rq_cache) {
1094 kmem_cache_destroy(rrpc_gcb_cache);
1095 up_write(&rrpc_lock);
1096 return -ENOMEM;
1097 }
1098 }
1099 up_write(&rrpc_lock);
1100
1101 rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
1102 if (!rrpc->page_pool)
1103 return -ENOMEM;
1104
1105 rrpc->gcb_pool = mempool_create_slab_pool(rrpc->dev->nr_luns,
1106 rrpc_gcb_cache);
1107 if (!rrpc->gcb_pool)
1108 return -ENOMEM;
1109
1110 rrpc->rq_pool = mempool_create_slab_pool(64, rrpc_rq_cache);
1111 if (!rrpc->rq_pool)
1112 return -ENOMEM;
1113
1114 spin_lock_init(&rrpc->inflights.lock);
1115 INIT_LIST_HEAD(&rrpc->inflights.reqs);
1116
1117 return 0;
1118}
1119
1120static void rrpc_core_free(struct rrpc *rrpc)
1121{
1122 mempool_destroy(rrpc->page_pool);
1123 mempool_destroy(rrpc->gcb_pool);
1124 mempool_destroy(rrpc->rq_pool);
1125}
1126
1127static void rrpc_luns_free(struct rrpc *rrpc)
1128{
Wenwei Taoda1e2842016-03-03 15:06:38 +01001129 struct nvm_dev *dev = rrpc->dev;
1130 struct nvm_lun *lun;
1131 struct rrpc_lun *rlun;
1132 int i;
1133
1134 if (!rrpc->luns)
1135 return;
1136
1137 for (i = 0; i < rrpc->nr_luns; i++) {
1138 rlun = &rrpc->luns[i];
1139 lun = rlun->parent;
1140 if (!lun)
1141 break;
1142 dev->mt->release_lun(dev, lun->id);
1143 vfree(rlun->blocks);
1144 }
1145
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001146 kfree(rrpc->luns);
1147}
1148
1149static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
1150{
1151 struct nvm_dev *dev = rrpc->dev;
1152 struct rrpc_lun *rlun;
Wenwei Taoda1e2842016-03-03 15:06:38 +01001153 int i, j, ret = -EINVAL;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001154
Javier Gonzálezafb18e02016-03-03 14:47:53 -07001155 if (dev->sec_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
Wenwei Tao4b79beb2016-01-12 07:49:27 +01001156 pr_err("rrpc: number of pages per block too high.");
1157 return -EINVAL;
1158 }
1159
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001160 spin_lock_init(&rrpc->rev_lock);
1161
1162 rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
1163 GFP_KERNEL);
1164 if (!rrpc->luns)
1165 return -ENOMEM;
1166
1167 /* 1:1 mapping */
1168 for (i = 0; i < rrpc->nr_luns; i++) {
Wenwei Taoda1e2842016-03-03 15:06:38 +01001169 int lunid = lun_begin + i;
1170 struct nvm_lun *lun;
1171
1172 if (dev->mt->reserve_lun(dev, lunid)) {
1173 pr_err("rrpc: lun %u is already allocated\n", lunid);
1174 goto err;
1175 }
1176
1177 lun = dev->mt->get_lun(dev, lunid);
1178 if (!lun)
1179 goto err;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001180
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001181 rlun = &rrpc->luns[i];
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001182 rlun->parent = lun;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001183 rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
1184 rrpc->dev->blks_per_lun);
Wenwei Taoda1e2842016-03-03 15:06:38 +01001185 if (!rlun->blocks) {
1186 ret = -ENOMEM;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001187 goto err;
Wenwei Taoda1e2842016-03-03 15:06:38 +01001188 }
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001189
1190 for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
1191 struct rrpc_block *rblk = &rlun->blocks[j];
1192 struct nvm_block *blk = &lun->blocks[j];
1193
1194 rblk->parent = blk;
Javier Gonzálezd7a64d22016-01-12 07:49:31 +01001195 rblk->rlun = rlun;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001196 INIT_LIST_HEAD(&rblk->prio);
1197 spin_lock_init(&rblk->lock);
1198 }
Wenwei Taoda1e2842016-03-03 15:06:38 +01001199
1200 rlun->rrpc = rrpc;
1201 INIT_LIST_HEAD(&rlun->prio_list);
1202 INIT_LIST_HEAD(&rlun->open_list);
1203 INIT_LIST_HEAD(&rlun->closed_list);
1204
1205 INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
1206 spin_lock_init(&rlun->lock);
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001207 }
1208
1209 return 0;
1210err:
Wenwei Taoda1e2842016-03-03 15:06:38 +01001211 return ret;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001212}
1213
Wenwei Tao4c9dacb2016-03-03 15:06:37 +01001214/* returns 0 on success and stores the beginning address in *begin */
1215static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
1216{
1217 struct nvm_dev *dev = rrpc->dev;
1218 struct nvmm_type *mt = dev->mt;
1219 sector_t size = rrpc->nr_sects * dev->sec_size;
Wenwei Tao909049a72016-05-06 20:03:01 +02001220 int ret;
Wenwei Tao4c9dacb2016-03-03 15:06:37 +01001221
1222 size >>= 9;
1223
Wenwei Tao909049a72016-05-06 20:03:01 +02001224 ret = mt->get_area(dev, begin, size);
1225 if (!ret)
1226 *begin >>= (ilog2(dev->sec_size) - 9);
1227
1228 return ret;
Wenwei Tao4c9dacb2016-03-03 15:06:37 +01001229}
1230
1231static void rrpc_area_free(struct rrpc *rrpc)
1232{
1233 struct nvm_dev *dev = rrpc->dev;
1234 struct nvmm_type *mt = dev->mt;
Wenwei Tao909049a72016-05-06 20:03:01 +02001235 sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
Wenwei Tao4c9dacb2016-03-03 15:06:37 +01001236
Wenwei Tao909049a72016-05-06 20:03:01 +02001237 mt->put_area(dev, begin);
Wenwei Tao4c9dacb2016-03-03 15:06:37 +01001238}
1239
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001240static void rrpc_free(struct rrpc *rrpc)
1241{
1242 rrpc_gc_free(rrpc);
1243 rrpc_map_free(rrpc);
1244 rrpc_core_free(rrpc);
1245 rrpc_luns_free(rrpc);
Wenwei Tao4c9dacb2016-03-03 15:06:37 +01001246 rrpc_area_free(rrpc);
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001247
1248 kfree(rrpc);
1249}
1250
1251static void rrpc_exit(void *private)
1252{
1253 struct rrpc *rrpc = private;
1254
1255 del_timer(&rrpc->gc_timer);
1256
1257 flush_workqueue(rrpc->krqd_wq);
1258 flush_workqueue(rrpc->kgc_wq);
1259
1260 rrpc_free(rrpc);
1261}
1262
1263static sector_t rrpc_capacity(void *private)
1264{
1265 struct rrpc *rrpc = private;
1266 struct nvm_dev *dev = rrpc->dev;
1267 sector_t reserved, provisioned;
1268
1269 /* cur, gc, and two emergency blocks for each lun */
Javier González116f7d42016-05-06 20:03:21 +02001270 reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
Matias Bjørling4ece44a2016-02-20 08:52:41 +01001271 provisioned = rrpc->nr_sects - reserved;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001272
Matias Bjørling4ece44a2016-02-20 08:52:41 +01001273 if (reserved > rrpc->nr_sects) {
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001274 pr_err("rrpc: not enough space available to expose storage.\n");
1275 return 0;
1276 }
1277
1278 sector_div(provisioned, 10);
1279 return provisioned * 9 * NR_PHY_IN_LOG;
1280}
1281
1282/*
1283 * Looks up the logical address from reverse trans map and check if its valid by
1284 * comparing the logical to physical address with the physical address.
1285 * Returns 0 on free, otherwise 1 if in use
1286 */
1287static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
1288{
1289 struct nvm_dev *dev = rrpc->dev;
1290 int offset;
1291 struct rrpc_addr *laddr;
Javier Gonzálezafb18e02016-03-03 14:47:53 -07001292 u64 bpaddr, paddr, pladdr;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001293
Javier Gonzálezafb18e02016-03-03 14:47:53 -07001294 bpaddr = block_to_rel_addr(rrpc, rblk);
1295 for (offset = 0; offset < dev->sec_per_blk; offset++) {
1296 paddr = bpaddr + offset;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001297
1298 pladdr = rrpc->rev_trans_map[paddr].addr;
1299 if (pladdr == ADDR_EMPTY)
1300 continue;
1301
1302 laddr = &rrpc->trans_map[pladdr];
1303
1304 if (paddr == laddr->addr) {
1305 laddr->rblk = rblk;
1306 } else {
1307 set_bit(offset, rblk->invalid_pages);
1308 rblk->nr_invalid_pages++;
1309 }
1310 }
1311}
1312
1313static int rrpc_blocks_init(struct rrpc *rrpc)
1314{
1315 struct rrpc_lun *rlun;
1316 struct rrpc_block *rblk;
1317 int lun_iter, blk_iter;
1318
1319 for (lun_iter = 0; lun_iter < rrpc->nr_luns; lun_iter++) {
1320 rlun = &rrpc->luns[lun_iter];
1321
1322 for (blk_iter = 0; blk_iter < rrpc->dev->blks_per_lun;
1323 blk_iter++) {
1324 rblk = &rlun->blocks[blk_iter];
1325 rrpc_block_map_update(rrpc, rblk);
1326 }
1327 }
1328
1329 return 0;
1330}
1331
1332static int rrpc_luns_configure(struct rrpc *rrpc)
1333{
1334 struct rrpc_lun *rlun;
1335 struct rrpc_block *rblk;
1336 int i;
1337
1338 for (i = 0; i < rrpc->nr_luns; i++) {
1339 rlun = &rrpc->luns[i];
1340
1341 rblk = rrpc_get_blk(rrpc, rlun, 0);
1342 if (!rblk)
Wenwei Taod3d1a432015-12-06 11:25:44 +01001343 goto err;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001344
1345 rrpc_set_lun_cur(rlun, rblk);
1346
1347 /* Emergency gc block */
1348 rblk = rrpc_get_blk(rrpc, rlun, 1);
1349 if (!rblk)
Wenwei Taod3d1a432015-12-06 11:25:44 +01001350 goto err;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001351 rlun->gc_cur = rblk;
1352 }
1353
1354 return 0;
Wenwei Taod3d1a432015-12-06 11:25:44 +01001355err:
1356 rrpc_put_blks(rrpc);
1357 return -EINVAL;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001358}
1359
1360static struct nvm_tgt_type tt_rrpc;
1361
1362static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
1363 int lun_begin, int lun_end)
1364{
1365 struct request_queue *bqueue = dev->q;
1366 struct request_queue *tqueue = tdisk->queue;
1367 struct rrpc *rrpc;
Wenwei Tao4c9dacb2016-03-03 15:06:37 +01001368 sector_t soffset;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001369 int ret;
1370
1371 if (!(dev->identity.dom & NVM_RSP_L2P)) {
1372 pr_err("nvm: rrpc: device does not support l2p (%x)\n",
1373 dev->identity.dom);
1374 return ERR_PTR(-EINVAL);
1375 }
1376
1377 rrpc = kzalloc(sizeof(struct rrpc), GFP_KERNEL);
1378 if (!rrpc)
1379 return ERR_PTR(-ENOMEM);
1380
1381 rrpc->instance.tt = &tt_rrpc;
1382 rrpc->dev = dev;
1383 rrpc->disk = tdisk;
1384
1385 bio_list_init(&rrpc->requeue_bios);
1386 spin_lock_init(&rrpc->bio_lock);
1387 INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
1388
1389 rrpc->nr_luns = lun_end - lun_begin + 1;
Wenwei Tao66e3d072016-05-06 20:03:00 +02001390 rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
1391 rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001392
1393 /* simple round-robin strategy */
1394 atomic_set(&rrpc->next_lun, -1);
1395
Wenwei Tao4c9dacb2016-03-03 15:06:37 +01001396 ret = rrpc_area_init(rrpc, &soffset);
1397 if (ret < 0) {
1398 pr_err("nvm: rrpc: could not initialize area\n");
1399 return ERR_PTR(ret);
1400 }
1401 rrpc->soffset = soffset;
1402
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001403 ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
1404 if (ret) {
1405 pr_err("nvm: rrpc: could not initialize luns\n");
1406 goto err;
1407 }
1408
1409 rrpc->poffset = dev->sec_per_lun * lun_begin;
1410 rrpc->lun_offset = lun_begin;
1411
1412 ret = rrpc_core_init(rrpc);
1413 if (ret) {
1414 pr_err("nvm: rrpc: could not initialize core\n");
1415 goto err;
1416 }
1417
1418 ret = rrpc_map_init(rrpc);
1419 if (ret) {
1420 pr_err("nvm: rrpc: could not initialize maps\n");
1421 goto err;
1422 }
1423
1424 ret = rrpc_blocks_init(rrpc);
1425 if (ret) {
1426 pr_err("nvm: rrpc: could not initialize state for blocks\n");
1427 goto err;
1428 }
1429
1430 ret = rrpc_luns_configure(rrpc);
1431 if (ret) {
1432 pr_err("nvm: rrpc: not enough blocks available in LUNs.\n");
1433 goto err;
1434 }
1435
1436 ret = rrpc_gc_init(rrpc);
1437 if (ret) {
1438 pr_err("nvm: rrpc: could not initialize gc\n");
1439 goto err;
1440 }
1441
1442 /* inherit the size from the underlying device */
1443 blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
1444 blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
1445
1446 pr_info("nvm: rrpc initialized with %u luns and %llu pages.\n",
Matias Bjørling4ece44a2016-02-20 08:52:41 +01001447 rrpc->nr_luns, (unsigned long long)rrpc->nr_sects);
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001448
1449 mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
1450
1451 return rrpc;
1452err:
1453 rrpc_free(rrpc);
1454 return ERR_PTR(ret);
1455}
1456
1457/* round robin, page-based FTL, and cost-based GC */
1458static struct nvm_tgt_type tt_rrpc = {
1459 .name = "rrpc",
1460 .version = {1, 0, 0},
1461
1462 .make_rq = rrpc_make_rq,
1463 .capacity = rrpc_capacity,
1464 .end_io = rrpc_end_io,
1465
1466 .init = rrpc_init,
1467 .exit = rrpc_exit,
1468};
1469
1470static int __init rrpc_module_init(void)
1471{
Simon A. F. Lund6063fe32016-05-06 20:03:02 +02001472 return nvm_register_tgt_type(&tt_rrpc);
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001473}
1474
1475static void rrpc_module_exit(void)
1476{
Simon A. F. Lund6063fe32016-05-06 20:03:02 +02001477 nvm_unregister_tgt_type(&tt_rrpc);
Matias Bjørlingae1519e2015-10-28 19:54:57 +01001478}
1479
1480module_init(rrpc_module_init);
1481module_exit(rrpc_module_exit);
1482MODULE_LICENSE("GPL v2");
1483MODULE_DESCRIPTION("Block-Device Target for Open-Channel SSDs");