blob: e949e3302af4743472ac26c68e9f4d54a27bb11a [file] [log] [blame]
Vishal Verma5212e112015-06-25 04:20:32 -04001/*
2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/highmem.h>
15#include <linux/debugfs.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/mutex.h>
20#include <linux/hdreg.h>
21#include <linux/genhd.h>
22#include <linux/sizes.h>
23#include <linux/ndctl.h>
24#include <linux/fs.h>
25#include <linux/nd.h>
Minchan Kim23c47d22017-11-15 17:33:00 -080026#include <linux/backing-dev.h>
Vishal Verma5212e112015-06-25 04:20:32 -040027#include "btt.h"
28#include "nd.h"
29
30enum log_ent_request {
31 LOG_NEW_ENT = 0,
32 LOG_OLD_ENT
33};
34
Vishal Verma86652d22017-09-05 14:35:39 -060035static struct device *to_dev(struct arena_info *arena)
36{
37 return &arena->nd_btt->dev;
38}
39
Vishal Vermad9b83c72017-08-30 19:36:03 -060040static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
41{
42 return offset + nd_btt->initial_offset;
43}
44
Vishal Verma5212e112015-06-25 04:20:32 -040045static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
Vishal Verma3ae3d672017-05-10 15:01:30 -060046 void *buf, size_t n, unsigned long flags)
Vishal Verma5212e112015-06-25 04:20:32 -040047{
48 struct nd_btt *nd_btt = arena->nd_btt;
49 struct nd_namespace_common *ndns = nd_btt->ndns;
50
Vishal Verma14e49452017-06-28 14:25:00 -060051 /* arena offsets may be shifted from the base of the device */
Vishal Vermad9b83c72017-08-30 19:36:03 -060052 offset = adjust_initial_offset(nd_btt, offset);
Vishal Verma3ae3d672017-05-10 15:01:30 -060053 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
Vishal Verma5212e112015-06-25 04:20:32 -040054}
55
56static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
Vishal Verma3ae3d672017-05-10 15:01:30 -060057 void *buf, size_t n, unsigned long flags)
Vishal Verma5212e112015-06-25 04:20:32 -040058{
59 struct nd_btt *nd_btt = arena->nd_btt;
60 struct nd_namespace_common *ndns = nd_btt->ndns;
61
Vishal Verma14e49452017-06-28 14:25:00 -060062 /* arena offsets may be shifted from the base of the device */
Vishal Vermad9b83c72017-08-30 19:36:03 -060063 offset = adjust_initial_offset(nd_btt, offset);
Vishal Verma3ae3d672017-05-10 15:01:30 -060064 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
Vishal Verma5212e112015-06-25 04:20:32 -040065}
66
67static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
68{
69 int ret;
70
Vishal Vermab177fe82017-05-10 15:01:31 -060071 /*
72 * infooff and info2off should always be at least 512B aligned.
73 * We rely on that to make sure rw_bytes does error clearing
74 * correctly, so make sure that is the case.
75 */
Vishal Verma86652d22017-09-05 14:35:39 -060076 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
77 "arena->infooff: %#llx is unaligned\n", arena->infooff);
78 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
79 "arena->info2off: %#llx is unaligned\n", arena->info2off);
Vishal Vermab177fe82017-05-10 15:01:31 -060080
Vishal Verma5212e112015-06-25 04:20:32 -040081 ret = arena_write_bytes(arena, arena->info2off, super,
Vishal Verma3ae3d672017-05-10 15:01:30 -060082 sizeof(struct btt_sb), 0);
Vishal Verma5212e112015-06-25 04:20:32 -040083 if (ret)
84 return ret;
85
86 return arena_write_bytes(arena, arena->infooff, super,
Vishal Verma3ae3d672017-05-10 15:01:30 -060087 sizeof(struct btt_sb), 0);
Vishal Verma5212e112015-06-25 04:20:32 -040088}
89
90static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
91{
Vishal Verma5212e112015-06-25 04:20:32 -040092 return arena_read_bytes(arena, arena->infooff, super,
Vishal Verma3ae3d672017-05-10 15:01:30 -060093 sizeof(struct btt_sb), 0);
Vishal Verma5212e112015-06-25 04:20:32 -040094}
95
96/*
97 * 'raw' version of btt_map write
98 * Assumptions:
99 * mapping is in little-endian
100 * mapping contains 'E' and 'Z' flags as desired
101 */
Vishal Verma3ae3d672017-05-10 15:01:30 -0600102static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
103 unsigned long flags)
Vishal Verma5212e112015-06-25 04:20:32 -0400104{
105 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
106
Vishal Verma86652d22017-09-05 14:35:39 -0600107 if (unlikely(lba >= arena->external_nlba))
108 dev_err_ratelimited(to_dev(arena),
109 "%s: lba %#x out of range (max: %#x)\n",
110 __func__, lba, arena->external_nlba);
Vishal Verma3ae3d672017-05-10 15:01:30 -0600111 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
Vishal Verma5212e112015-06-25 04:20:32 -0400112}
113
114static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
Vishal Verma3ae3d672017-05-10 15:01:30 -0600115 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
Vishal Verma5212e112015-06-25 04:20:32 -0400116{
117 u32 ze;
118 __le32 mapping_le;
119
120 /*
121 * This 'mapping' is supposed to be just the LBA mapping, without
122 * any flags set, so strip the flag bits.
123 */
Vishal Verma0595d532017-08-30 19:35:59 -0600124 mapping = ent_lba(mapping);
Vishal Verma5212e112015-06-25 04:20:32 -0400125
126 ze = (z_flag << 1) + e_flag;
127 switch (ze) {
128 case 0:
129 /*
130 * We want to set neither of the Z or E flags, and
131 * in the actual layout, this means setting the bit
132 * positions of both to '1' to indicate a 'normal'
133 * map entry
134 */
135 mapping |= MAP_ENT_NORMAL;
136 break;
137 case 1:
138 mapping |= (1 << MAP_ERR_SHIFT);
139 break;
140 case 2:
141 mapping |= (1 << MAP_TRIM_SHIFT);
142 break;
143 default:
144 /*
145 * The case where Z and E are both sent in as '1' could be
146 * construed as a valid 'normal' case, but we decide not to,
147 * to avoid confusion
148 */
Vishal Verma86652d22017-09-05 14:35:39 -0600149 dev_err_ratelimited(to_dev(arena),
150 "Invalid use of Z and E flags\n");
Vishal Verma5212e112015-06-25 04:20:32 -0400151 return -EIO;
152 }
153
154 mapping_le = cpu_to_le32(mapping);
Vishal Verma3ae3d672017-05-10 15:01:30 -0600155 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
Vishal Verma5212e112015-06-25 04:20:32 -0400156}
157
158static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
Vishal Verma3ae3d672017-05-10 15:01:30 -0600159 int *trim, int *error, unsigned long rwb_flags)
Vishal Verma5212e112015-06-25 04:20:32 -0400160{
161 int ret;
162 __le32 in;
163 u32 raw_mapping, postmap, ze, z_flag, e_flag;
164 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
165
Vishal Verma86652d22017-09-05 14:35:39 -0600166 if (unlikely(lba >= arena->external_nlba))
167 dev_err_ratelimited(to_dev(arena),
168 "%s: lba %#x out of range (max: %#x)\n",
169 __func__, lba, arena->external_nlba);
Vishal Verma5212e112015-06-25 04:20:32 -0400170
Vishal Verma3ae3d672017-05-10 15:01:30 -0600171 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
Vishal Verma5212e112015-06-25 04:20:32 -0400172 if (ret)
173 return ret;
174
175 raw_mapping = le32_to_cpu(in);
176
Vishal Verma0595d532017-08-30 19:35:59 -0600177 z_flag = ent_z_flag(raw_mapping);
178 e_flag = ent_e_flag(raw_mapping);
Vishal Verma5212e112015-06-25 04:20:32 -0400179 ze = (z_flag << 1) + e_flag;
Vishal Verma0595d532017-08-30 19:35:59 -0600180 postmap = ent_lba(raw_mapping);
Vishal Verma5212e112015-06-25 04:20:32 -0400181
182 /* Reuse the {z,e}_flag variables for *trim and *error */
183 z_flag = 0;
184 e_flag = 0;
185
186 switch (ze) {
187 case 0:
188 /* Initial state. Return postmap = premap */
189 *mapping = lba;
190 break;
191 case 1:
192 *mapping = postmap;
193 e_flag = 1;
194 break;
195 case 2:
196 *mapping = postmap;
197 z_flag = 1;
198 break;
199 case 3:
200 *mapping = postmap;
201 break;
202 default:
203 return -EIO;
204 }
205
206 if (trim)
207 *trim = z_flag;
208 if (error)
209 *error = e_flag;
210
211 return ret;
212}
213
214static int btt_log_read_pair(struct arena_info *arena, u32 lane,
215 struct log_entry *ent)
216{
Vishal Verma5212e112015-06-25 04:20:32 -0400217 return arena_read_bytes(arena,
218 arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
Vishal Verma3ae3d672017-05-10 15:01:30 -0600219 2 * LOG_ENT_SIZE, 0);
Vishal Verma5212e112015-06-25 04:20:32 -0400220}
221
222static struct dentry *debugfs_root;
223
224static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
225 int idx)
226{
227 char dirname[32];
228 struct dentry *d;
229
230 /* If for some reason, parent bttN was not created, exit */
231 if (!parent)
232 return;
233
234 snprintf(dirname, 32, "arena%d", idx);
235 d = debugfs_create_dir(dirname, parent);
236 if (IS_ERR_OR_NULL(d))
237 return;
238 a->debugfs_dir = d;
239
240 debugfs_create_x64("size", S_IRUGO, d, &a->size);
241 debugfs_create_x64("external_lba_start", S_IRUGO, d,
242 &a->external_lba_start);
243 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
244 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
245 &a->internal_lbasize);
246 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
247 debugfs_create_u32("external_lbasize", S_IRUGO, d,
248 &a->external_lbasize);
249 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
250 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
251 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
252 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
253 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
254 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
255 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
256 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
257 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
258 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
259}
260
261static void btt_debugfs_init(struct btt *btt)
262{
263 int i = 0;
264 struct arena_info *arena;
265
266 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
267 debugfs_root);
268 if (IS_ERR_OR_NULL(btt->debugfs_dir))
269 return;
270
271 list_for_each_entry(arena, &btt->arena_list, list) {
272 arena_debugfs_init(arena, btt->debugfs_dir, i);
273 i++;
274 }
275}
276
277/*
278 * This function accepts two log entries, and uses the
279 * sequence number to find the 'older' entry.
280 * It also updates the sequence number in this old entry to
281 * make it the 'new' one if the mark_flag is set.
282 * Finally, it returns which of the entries was the older one.
283 *
284 * TODO The logic feels a bit kludge-y. make it better..
285 */
286static int btt_log_get_old(struct log_entry *ent)
287{
288 int old;
289
290 /*
291 * the first ever time this is seen, the entry goes into [0]
292 * the next time, the following logic works out to put this
293 * (next) entry into [1]
294 */
295 if (ent[0].seq == 0) {
296 ent[0].seq = cpu_to_le32(1);
297 return 0;
298 }
299
300 if (ent[0].seq == ent[1].seq)
301 return -EINVAL;
302 if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
303 return -EINVAL;
304
305 if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
306 if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
307 old = 0;
308 else
309 old = 1;
310 } else {
311 if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
312 old = 1;
313 else
314 old = 0;
315 }
316
317 return old;
318}
319
Vishal Verma5212e112015-06-25 04:20:32 -0400320/*
321 * This function copies the desired (old/new) log entry into ent if
322 * it is not NULL. It returns the sub-slot number (0 or 1)
323 * where the desired log entry was found. Negative return values
324 * indicate errors.
325 */
326static int btt_log_read(struct arena_info *arena, u32 lane,
327 struct log_entry *ent, int old_flag)
328{
329 int ret;
330 int old_ent, ret_ent;
331 struct log_entry log[2];
332
333 ret = btt_log_read_pair(arena, lane, log);
334 if (ret)
335 return -EIO;
336
337 old_ent = btt_log_get_old(log);
338 if (old_ent < 0 || old_ent > 1) {
Vishal Vermae6be2dc2017-06-30 18:32:51 -0600339 dev_err(to_dev(arena),
Vishal Verma5212e112015-06-25 04:20:32 -0400340 "log corruption (%d): lane %d seq [%d, %d]\n",
341 old_ent, lane, log[0].seq, log[1].seq);
342 /* TODO set error state? */
343 return -EIO;
344 }
345
346 ret_ent = (old_flag ? old_ent : (1 - old_ent));
347
348 if (ent != NULL)
349 memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
350
351 return ret_ent;
352}
353
354/*
355 * This function commits a log entry to media
356 * It does _not_ prepare the freelist entry for the next write
357 * btt_flog_write is the wrapper for updating the freelist elements
358 */
359static int __btt_log_write(struct arena_info *arena, u32 lane,
Vishal Verma3ae3d672017-05-10 15:01:30 -0600360 u32 sub, struct log_entry *ent, unsigned long flags)
Vishal Verma5212e112015-06-25 04:20:32 -0400361{
362 int ret;
363 /*
364 * Ignore the padding in log_entry for calculating log_half.
365 * The entry is 'committed' when we write the sequence number,
366 * and we want to ensure that that is the last thing written.
367 * We don't bother writing the padding as that would be extra
368 * media wear and write amplification
369 */
370 unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
371 u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
372 void *src = ent;
373
374 /* split the 16B write into atomic, durable halves */
Vishal Verma3ae3d672017-05-10 15:01:30 -0600375 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
Vishal Verma5212e112015-06-25 04:20:32 -0400376 if (ret)
377 return ret;
378
379 ns_off += log_half;
380 src += log_half;
Vishal Verma3ae3d672017-05-10 15:01:30 -0600381 return arena_write_bytes(arena, ns_off, src, log_half, flags);
Vishal Verma5212e112015-06-25 04:20:32 -0400382}
383
384static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
385 struct log_entry *ent)
386{
387 int ret;
388
Vishal Verma3ae3d672017-05-10 15:01:30 -0600389 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
Vishal Verma5212e112015-06-25 04:20:32 -0400390 if (ret)
391 return ret;
392
393 /* prepare the next free entry */
394 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
395 if (++(arena->freelist[lane].seq) == 4)
396 arena->freelist[lane].seq = 1;
Vishal Vermad9b83c72017-08-30 19:36:03 -0600397 if (ent_e_flag(ent->old_map))
398 arena->freelist[lane].has_err = 1;
399 arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map));
Vishal Verma5212e112015-06-25 04:20:32 -0400400
401 return ret;
402}
403
404/*
405 * This function initializes the BTT map to the initial state, which is
406 * all-zeroes, and indicates an identity mapping
407 */
408static int btt_map_init(struct arena_info *arena)
409{
410 int ret = -EINVAL;
411 void *zerobuf;
412 size_t offset = 0;
413 size_t chunk_size = SZ_2M;
414 size_t mapsize = arena->logoff - arena->mapoff;
415
416 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
417 if (!zerobuf)
418 return -ENOMEM;
419
Vishal Vermab177fe82017-05-10 15:01:31 -0600420 /*
421 * mapoff should always be at least 512B aligned. We rely on that to
422 * make sure rw_bytes does error clearing correctly, so make sure that
423 * is the case.
424 */
Vishal Verma86652d22017-09-05 14:35:39 -0600425 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
426 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
Vishal Vermab177fe82017-05-10 15:01:31 -0600427
Vishal Verma5212e112015-06-25 04:20:32 -0400428 while (mapsize) {
429 size_t size = min(mapsize, chunk_size);
430
Vishal Verma86652d22017-09-05 14:35:39 -0600431 dev_WARN_ONCE(to_dev(arena), size < 512,
Randy Dunlap04c3c982017-09-08 09:36:57 -0700432 "chunk size: %#zx is unaligned\n", size);
Vishal Verma5212e112015-06-25 04:20:32 -0400433 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
Vishal Verma3ae3d672017-05-10 15:01:30 -0600434 size, 0);
Vishal Verma5212e112015-06-25 04:20:32 -0400435 if (ret)
436 goto free;
437
438 offset += size;
439 mapsize -= size;
440 cond_resched();
441 }
442
443 free:
444 kfree(zerobuf);
445 return ret;
446}
447
448/*
449 * This function initializes the BTT log with 'fake' entries pointing
450 * to the initial reserved set of blocks as being free
451 */
452static int btt_log_init(struct arena_info *arena)
453{
Vishal Vermab177fe82017-05-10 15:01:31 -0600454 size_t logsize = arena->info2off - arena->logoff;
455 size_t chunk_size = SZ_4K, offset = 0;
456 struct log_entry log;
457 void *zerobuf;
Vishal Verma5212e112015-06-25 04:20:32 -0400458 int ret;
459 u32 i;
Vishal Verma5212e112015-06-25 04:20:32 -0400460
Vishal Vermab177fe82017-05-10 15:01:31 -0600461 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
462 if (!zerobuf)
463 return -ENOMEM;
464 /*
465 * logoff should always be at least 512B aligned. We rely on that to
466 * make sure rw_bytes does error clearing correctly, so make sure that
467 * is the case.
468 */
Vishal Verma86652d22017-09-05 14:35:39 -0600469 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
470 "arena->logoff: %#llx is unaligned\n", arena->logoff);
Vishal Vermab177fe82017-05-10 15:01:31 -0600471
472 while (logsize) {
473 size_t size = min(logsize, chunk_size);
474
Vishal Verma86652d22017-09-05 14:35:39 -0600475 dev_WARN_ONCE(to_dev(arena), size < 512,
Randy Dunlap04c3c982017-09-08 09:36:57 -0700476 "chunk size: %#zx is unaligned\n", size);
Vishal Vermab177fe82017-05-10 15:01:31 -0600477 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
478 size, 0);
479 if (ret)
480 goto free;
481
482 offset += size;
483 logsize -= size;
484 cond_resched();
485 }
Vishal Verma5212e112015-06-25 04:20:32 -0400486
487 for (i = 0; i < arena->nfree; i++) {
488 log.lba = cpu_to_le32(i);
489 log.old_map = cpu_to_le32(arena->external_nlba + i);
490 log.new_map = cpu_to_le32(arena->external_nlba + i);
491 log.seq = cpu_to_le32(LOG_SEQ_INIT);
Vishal Verma3ae3d672017-05-10 15:01:30 -0600492 ret = __btt_log_write(arena, i, 0, &log, 0);
Vishal Verma5212e112015-06-25 04:20:32 -0400493 if (ret)
Vishal Vermab177fe82017-05-10 15:01:31 -0600494 goto free;
Vishal Verma5212e112015-06-25 04:20:32 -0400495 }
496
Vishal Vermab177fe82017-05-10 15:01:31 -0600497 free:
498 kfree(zerobuf);
499 return ret;
Vishal Verma5212e112015-06-25 04:20:32 -0400500}
501
Vishal Vermad9b83c72017-08-30 19:36:03 -0600502static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
503{
504 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
505}
506
507static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
508{
509 int ret = 0;
510
511 if (arena->freelist[lane].has_err) {
512 void *zero_page = page_address(ZERO_PAGE(0));
513 u32 lba = arena->freelist[lane].block;
514 u64 nsoff = to_namespace_offset(arena, lba);
515 unsigned long len = arena->sector_size;
516
517 mutex_lock(&arena->err_lock);
518
519 while (len) {
520 unsigned long chunk = min(len, PAGE_SIZE);
521
522 ret = arena_write_bytes(arena, nsoff, zero_page,
523 chunk, 0);
524 if (ret)
525 break;
526 len -= chunk;
527 nsoff += chunk;
528 if (len == 0)
529 arena->freelist[lane].has_err = 0;
530 }
531 mutex_unlock(&arena->err_lock);
532 }
533 return ret;
534}
535
Vishal Verma5212e112015-06-25 04:20:32 -0400536static int btt_freelist_init(struct arena_info *arena)
537{
538 int old, new, ret;
539 u32 i, map_entry;
540 struct log_entry log_new, log_old;
541
542 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
543 GFP_KERNEL);
544 if (!arena->freelist)
545 return -ENOMEM;
546
547 for (i = 0; i < arena->nfree; i++) {
548 old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
549 if (old < 0)
550 return old;
551
552 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
553 if (new < 0)
554 return new;
555
556 /* sub points to the next one to be overwritten */
557 arena->freelist[i].sub = 1 - new;
558 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
559 arena->freelist[i].block = le32_to_cpu(log_new.old_map);
560
Vishal Vermad9b83c72017-08-30 19:36:03 -0600561 /*
562 * FIXME: if error clearing fails during init, we want to make
563 * the BTT read-only
564 */
565 if (ent_e_flag(log_new.old_map)) {
566 ret = arena_clear_freelist_error(arena, i);
567 if (ret)
Vishal Verma86652d22017-09-05 14:35:39 -0600568 dev_err_ratelimited(to_dev(arena),
569 "Unable to clear known errors\n");
Vishal Vermad9b83c72017-08-30 19:36:03 -0600570 }
571
Vishal Verma5212e112015-06-25 04:20:32 -0400572 /* This implies a newly created or untouched flog entry */
573 if (log_new.old_map == log_new.new_map)
574 continue;
575
576 /* Check if map recovery is needed */
577 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
Vishal Verma3ae3d672017-05-10 15:01:30 -0600578 NULL, NULL, 0);
Vishal Verma5212e112015-06-25 04:20:32 -0400579 if (ret)
580 return ret;
581 if ((le32_to_cpu(log_new.new_map) != map_entry) &&
582 (le32_to_cpu(log_new.old_map) == map_entry)) {
583 /*
584 * Last transaction wrote the flog, but wasn't able
585 * to complete the map write. So fix up the map.
586 */
587 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
Vishal Verma3ae3d672017-05-10 15:01:30 -0600588 le32_to_cpu(log_new.new_map), 0, 0, 0);
Vishal Verma5212e112015-06-25 04:20:32 -0400589 if (ret)
590 return ret;
591 }
Vishal Verma5212e112015-06-25 04:20:32 -0400592 }
593
594 return 0;
595}
596
597static int btt_rtt_init(struct arena_info *arena)
598{
599 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
600 if (arena->rtt == NULL)
601 return -ENOMEM;
602
603 return 0;
604}
605
606static int btt_maplocks_init(struct arena_info *arena)
607{
608 u32 i;
609
610 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
611 GFP_KERNEL);
612 if (!arena->map_locks)
613 return -ENOMEM;
614
615 for (i = 0; i < arena->nfree; i++)
616 spin_lock_init(&arena->map_locks[i].lock);
617
618 return 0;
619}
620
621static struct arena_info *alloc_arena(struct btt *btt, size_t size,
622 size_t start, size_t arena_off)
623{
624 struct arena_info *arena;
625 u64 logsize, mapsize, datasize;
626 u64 available = size;
627
628 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
629 if (!arena)
630 return NULL;
631 arena->nd_btt = btt->nd_btt;
Vishal Verma75892002017-08-30 19:36:01 -0600632 arena->sector_size = btt->sector_size;
Vishal Verma5212e112015-06-25 04:20:32 -0400633
634 if (!size)
635 return arena;
636
637 arena->size = size;
638 arena->external_lba_start = start;
639 arena->external_lbasize = btt->lbasize;
640 arena->internal_lbasize = roundup(arena->external_lbasize,
641 INT_LBASIZE_ALIGNMENT);
642 arena->nfree = BTT_DEFAULT_NFREE;
Vishal Verma14e49452017-06-28 14:25:00 -0600643 arena->version_major = btt->nd_btt->version_major;
644 arena->version_minor = btt->nd_btt->version_minor;
Vishal Verma5212e112015-06-25 04:20:32 -0400645
646 if (available % BTT_PG_SIZE)
647 available -= (available % BTT_PG_SIZE);
648
649 /* Two pages are reserved for the super block and its copy */
650 available -= 2 * BTT_PG_SIZE;
651
652 /* The log takes a fixed amount of space based on nfree */
653 logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
654 BTT_PG_SIZE);
655 available -= logsize;
656
657 /* Calculate optimal split between map and data area */
658 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
659 arena->internal_lbasize + MAP_ENT_SIZE);
660 arena->external_nlba = arena->internal_nlba - arena->nfree;
661
662 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
663 datasize = available - mapsize;
664
665 /* 'Absolute' values, relative to start of storage space */
666 arena->infooff = arena_off;
667 arena->dataoff = arena->infooff + BTT_PG_SIZE;
668 arena->mapoff = arena->dataoff + datasize;
669 arena->logoff = arena->mapoff + mapsize;
670 arena->info2off = arena->logoff + logsize;
671 return arena;
672}
673
674static void free_arenas(struct btt *btt)
675{
676 struct arena_info *arena, *next;
677
678 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
679 list_del(&arena->list);
680 kfree(arena->rtt);
681 kfree(arena->map_locks);
682 kfree(arena->freelist);
683 debugfs_remove_recursive(arena->debugfs_dir);
684 kfree(arena);
685 }
686}
687
688/*
Vishal Verma5212e112015-06-25 04:20:32 -0400689 * This function reads an existing valid btt superblock and
690 * populates the corresponding arena_info struct
691 */
692static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
693 u64 arena_off)
694{
695 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
696 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
697 arena->external_nlba = le32_to_cpu(super->external_nlba);
698 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
699 arena->nfree = le32_to_cpu(super->nfree);
700 arena->version_major = le16_to_cpu(super->version_major);
701 arena->version_minor = le16_to_cpu(super->version_minor);
702
703 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
704 le64_to_cpu(super->nextoff));
705 arena->infooff = arena_off;
706 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
707 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
708 arena->logoff = arena_off + le64_to_cpu(super->logoff);
709 arena->info2off = arena_off + le64_to_cpu(super->info2off);
710
Dan Williams5e329402015-07-11 10:02:46 -0400711 arena->size = (le64_to_cpu(super->nextoff) > 0)
712 ? (le64_to_cpu(super->nextoff))
713 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
Vishal Verma5212e112015-06-25 04:20:32 -0400714
715 arena->flags = le32_to_cpu(super->flags);
716}
717
718static int discover_arenas(struct btt *btt)
719{
720 int ret = 0;
721 struct arena_info *arena;
722 struct btt_sb *super;
723 size_t remaining = btt->rawsize;
724 u64 cur_nlba = 0;
725 size_t cur_off = 0;
726 int num_arenas = 0;
727
728 super = kzalloc(sizeof(*super), GFP_KERNEL);
729 if (!super)
730 return -ENOMEM;
731
732 while (remaining) {
733 /* Alloc memory for arena */
734 arena = alloc_arena(btt, 0, 0, 0);
735 if (!arena) {
736 ret = -ENOMEM;
737 goto out_super;
738 }
739
740 arena->infooff = cur_off;
741 ret = btt_info_read(arena, super);
742 if (ret)
743 goto out;
744
Vishal Vermaab45e762015-07-29 14:58:08 -0600745 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
Vishal Verma5212e112015-06-25 04:20:32 -0400746 if (remaining == btt->rawsize) {
747 btt->init_state = INIT_NOTFOUND;
748 dev_info(to_dev(arena), "No existing arenas\n");
749 goto out;
750 } else {
Vishal Vermae6be2dc2017-06-30 18:32:51 -0600751 dev_err(to_dev(arena),
Vishal Verma5212e112015-06-25 04:20:32 -0400752 "Found corrupted metadata!\n");
753 ret = -ENODEV;
754 goto out;
755 }
756 }
757
758 arena->external_lba_start = cur_nlba;
759 parse_arena_meta(arena, super, cur_off);
760
Vishal Vermad9b83c72017-08-30 19:36:03 -0600761 mutex_init(&arena->err_lock);
Vishal Verma5212e112015-06-25 04:20:32 -0400762 ret = btt_freelist_init(arena);
763 if (ret)
764 goto out;
765
766 ret = btt_rtt_init(arena);
767 if (ret)
768 goto out;
769
770 ret = btt_maplocks_init(arena);
771 if (ret)
772 goto out;
773
774 list_add_tail(&arena->list, &btt->arena_list);
775
776 remaining -= arena->size;
777 cur_off += arena->size;
778 cur_nlba += arena->external_nlba;
779 num_arenas++;
780
781 if (arena->nextoff == 0)
782 break;
783 }
784 btt->num_arenas = num_arenas;
785 btt->nlba = cur_nlba;
786 btt->init_state = INIT_READY;
787
788 kfree(super);
789 return ret;
790
791 out:
792 kfree(arena);
793 free_arenas(btt);
794 out_super:
795 kfree(super);
796 return ret;
797}
798
799static int create_arenas(struct btt *btt)
800{
801 size_t remaining = btt->rawsize;
802 size_t cur_off = 0;
803
804 while (remaining) {
805 struct arena_info *arena;
806 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
807
808 remaining -= arena_size;
809 if (arena_size < ARENA_MIN_SIZE)
810 break;
811
812 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
813 if (!arena) {
814 free_arenas(btt);
815 return -ENOMEM;
816 }
817 btt->nlba += arena->external_nlba;
818 if (remaining >= ARENA_MIN_SIZE)
819 arena->nextoff = arena->size;
820 else
821 arena->nextoff = 0;
822 cur_off += arena_size;
823 list_add_tail(&arena->list, &btt->arena_list);
824 }
825
826 return 0;
827}
828
829/*
830 * This function completes arena initialization by writing
831 * all the metadata.
832 * It is only called for an uninitialized arena when a write
833 * to that arena occurs for the first time.
834 */
Vishal Vermafbde1412015-07-29 14:58:07 -0600835static int btt_arena_write_layout(struct arena_info *arena)
Vishal Verma5212e112015-06-25 04:20:32 -0400836{
837 int ret;
Dan Williamse1455742015-07-30 17:57:47 -0400838 u64 sum;
Vishal Verma5212e112015-06-25 04:20:32 -0400839 struct btt_sb *super;
Vishal Vermafbde1412015-07-29 14:58:07 -0600840 struct nd_btt *nd_btt = arena->nd_btt;
Vishal Verma6ec68952015-07-29 14:58:09 -0600841 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
Vishal Verma5212e112015-06-25 04:20:32 -0400842
843 ret = btt_map_init(arena);
844 if (ret)
845 return ret;
846
847 ret = btt_log_init(arena);
848 if (ret)
849 return ret;
850
851 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
852 if (!super)
853 return -ENOMEM;
854
855 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
Vishal Vermafbde1412015-07-29 14:58:07 -0600856 memcpy(super->uuid, nd_btt->uuid, 16);
Vishal Verma6ec68952015-07-29 14:58:09 -0600857 memcpy(super->parent_uuid, parent_uuid, 16);
Vishal Verma5212e112015-06-25 04:20:32 -0400858 super->flags = cpu_to_le32(arena->flags);
859 super->version_major = cpu_to_le16(arena->version_major);
860 super->version_minor = cpu_to_le16(arena->version_minor);
861 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
862 super->external_nlba = cpu_to_le32(arena->external_nlba);
863 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
864 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
865 super->nfree = cpu_to_le32(arena->nfree);
866 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
867 super->nextoff = cpu_to_le64(arena->nextoff);
868 /*
869 * Subtract arena->infooff (arena start) so numbers are relative
870 * to 'this' arena
871 */
872 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
873 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
874 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
875 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
876
877 super->flags = 0;
Dan Williamse1455742015-07-30 17:57:47 -0400878 sum = nd_sb_checksum((struct nd_gen_sb *) super);
879 super->checksum = cpu_to_le64(sum);
Vishal Verma5212e112015-06-25 04:20:32 -0400880
881 ret = btt_info_write(arena, super);
882
883 kfree(super);
884 return ret;
885}
886
887/*
888 * This function completes the initialization for the BTT namespace
889 * such that it is ready to accept IOs
890 */
891static int btt_meta_init(struct btt *btt)
892{
893 int ret = 0;
894 struct arena_info *arena;
895
896 mutex_lock(&btt->init_lock);
897 list_for_each_entry(arena, &btt->arena_list, list) {
Vishal Vermafbde1412015-07-29 14:58:07 -0600898 ret = btt_arena_write_layout(arena);
Vishal Verma5212e112015-06-25 04:20:32 -0400899 if (ret)
900 goto unlock;
901
902 ret = btt_freelist_init(arena);
903 if (ret)
904 goto unlock;
905
906 ret = btt_rtt_init(arena);
907 if (ret)
908 goto unlock;
909
910 ret = btt_maplocks_init(arena);
911 if (ret)
912 goto unlock;
913 }
914
915 btt->init_state = INIT_READY;
916
917 unlock:
918 mutex_unlock(&btt->init_lock);
919 return ret;
920}
921
Vishal Verma41cd8b72015-06-25 04:21:52 -0400922static u32 btt_meta_size(struct btt *btt)
923{
924 return btt->lbasize - btt->sector_size;
925}
926
Vishal Verma5212e112015-06-25 04:20:32 -0400927/*
928 * This function calculates the arena in which the given LBA lies
929 * by doing a linear walk. This is acceptable since we expect only
930 * a few arenas. If we have backing devices that get much larger,
931 * we can construct a balanced binary tree of arenas at init time
932 * so that this range search becomes faster.
933 */
934static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
935 struct arena_info **arena)
936{
937 struct arena_info *arena_list;
938 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
939
940 list_for_each_entry(arena_list, &btt->arena_list, list) {
941 if (lba < arena_list->external_nlba) {
942 *arena = arena_list;
943 *premap = lba;
944 return 0;
945 }
946 lba -= arena_list->external_nlba;
947 }
948
949 return -EIO;
950}
951
952/*
953 * The following (lock_map, unlock_map) are mostly just to improve
954 * readability, since they index into an array of locks
955 */
956static void lock_map(struct arena_info *arena, u32 premap)
957 __acquires(&arena->map_locks[idx].lock)
958{
959 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
960
961 spin_lock(&arena->map_locks[idx].lock);
962}
963
964static void unlock_map(struct arena_info *arena, u32 premap)
965 __releases(&arena->map_locks[idx].lock)
966{
967 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
968
969 spin_unlock(&arena->map_locks[idx].lock);
970}
971
Vishal Verma5212e112015-06-25 04:20:32 -0400972static int btt_data_read(struct arena_info *arena, struct page *page,
973 unsigned int off, u32 lba, u32 len)
974{
975 int ret;
976 u64 nsoff = to_namespace_offset(arena, lba);
977 void *mem = kmap_atomic(page);
978
Vishal Verma3ae3d672017-05-10 15:01:30 -0600979 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
Vishal Verma5212e112015-06-25 04:20:32 -0400980 kunmap_atomic(mem);
981
982 return ret;
983}
984
985static int btt_data_write(struct arena_info *arena, u32 lba,
986 struct page *page, unsigned int off, u32 len)
987{
988 int ret;
989 u64 nsoff = to_namespace_offset(arena, lba);
990 void *mem = kmap_atomic(page);
991
Vishal Verma3ae3d672017-05-10 15:01:30 -0600992 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
Vishal Verma5212e112015-06-25 04:20:32 -0400993 kunmap_atomic(mem);
994
995 return ret;
996}
997
998static void zero_fill_data(struct page *page, unsigned int off, u32 len)
999{
1000 void *mem = kmap_atomic(page);
1001
1002 memset(mem + off, 0, len);
1003 kunmap_atomic(mem);
1004}
1005
Vishal Verma41cd8b72015-06-25 04:21:52 -04001006#ifdef CONFIG_BLK_DEV_INTEGRITY
1007static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1008 struct arena_info *arena, u32 postmap, int rw)
1009{
1010 unsigned int len = btt_meta_size(btt);
1011 u64 meta_nsoff;
1012 int ret = 0;
1013
1014 if (bip == NULL)
1015 return 0;
1016
1017 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1018
1019 while (len) {
1020 unsigned int cur_len;
1021 struct bio_vec bv;
1022 void *mem;
1023
1024 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1025 /*
1026 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1027 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1028 * can use those directly
1029 */
1030
1031 cur_len = min(len, bv.bv_len);
1032 mem = kmap_atomic(bv.bv_page);
1033 if (rw)
1034 ret = arena_write_bytes(arena, meta_nsoff,
Vishal Verma3ae3d672017-05-10 15:01:30 -06001035 mem + bv.bv_offset, cur_len,
1036 NVDIMM_IO_ATOMIC);
Vishal Verma41cd8b72015-06-25 04:21:52 -04001037 else
1038 ret = arena_read_bytes(arena, meta_nsoff,
Vishal Verma3ae3d672017-05-10 15:01:30 -06001039 mem + bv.bv_offset, cur_len,
1040 NVDIMM_IO_ATOMIC);
Vishal Verma41cd8b72015-06-25 04:21:52 -04001041
1042 kunmap_atomic(mem);
1043 if (ret)
1044 return ret;
1045
1046 len -= cur_len;
1047 meta_nsoff += cur_len;
Dmitry Monakhovb1fb2c52017-06-29 11:31:13 -07001048 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1049 return -EIO;
Vishal Verma41cd8b72015-06-25 04:21:52 -04001050 }
1051
1052 return ret;
1053}
1054
1055#else /* CONFIG_BLK_DEV_INTEGRITY */
1056static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1057 struct arena_info *arena, u32 postmap, int rw)
1058{
1059 return 0;
1060}
1061#endif
1062
1063static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1064 struct page *page, unsigned int off, sector_t sector,
1065 unsigned int len)
Vishal Verma5212e112015-06-25 04:20:32 -04001066{
1067 int ret = 0;
1068 int t_flag, e_flag;
1069 struct arena_info *arena = NULL;
1070 u32 lane = 0, premap, postmap;
1071
1072 while (len) {
1073 u32 cur_len;
1074
1075 lane = nd_region_acquire_lane(btt->nd_region);
1076
1077 ret = lba_to_arena(btt, sector, &premap, &arena);
1078 if (ret)
1079 goto out_lane;
1080
1081 cur_len = min(btt->sector_size, len);
1082
Vishal Verma3ae3d672017-05-10 15:01:30 -06001083 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1084 NVDIMM_IO_ATOMIC);
Vishal Verma5212e112015-06-25 04:20:32 -04001085 if (ret)
1086 goto out_lane;
1087
1088 /*
1089 * We loop to make sure that the post map LBA didn't change
1090 * from under us between writing the RTT and doing the actual
1091 * read.
1092 */
1093 while (1) {
1094 u32 new_map;
Vishal Verma13981992017-08-30 19:36:00 -06001095 int new_t, new_e;
Vishal Verma5212e112015-06-25 04:20:32 -04001096
1097 if (t_flag) {
1098 zero_fill_data(page, off, cur_len);
1099 goto out_lane;
1100 }
1101
1102 if (e_flag) {
1103 ret = -EIO;
1104 goto out_lane;
1105 }
1106
1107 arena->rtt[lane] = RTT_VALID | postmap;
1108 /*
1109 * Barrier to make sure this write is not reordered
1110 * to do the verification map_read before the RTT store
1111 */
1112 barrier();
1113
Vishal Verma13981992017-08-30 19:36:00 -06001114 ret = btt_map_read(arena, premap, &new_map, &new_t,
1115 &new_e, NVDIMM_IO_ATOMIC);
Vishal Verma5212e112015-06-25 04:20:32 -04001116 if (ret)
1117 goto out_rtt;
1118
Vishal Verma13981992017-08-30 19:36:00 -06001119 if ((postmap == new_map) && (t_flag == new_t) &&
1120 (e_flag == new_e))
Vishal Verma5212e112015-06-25 04:20:32 -04001121 break;
1122
1123 postmap = new_map;
Vishal Verma13981992017-08-30 19:36:00 -06001124 t_flag = new_t;
1125 e_flag = new_e;
Vishal Verma5212e112015-06-25 04:20:32 -04001126 }
1127
1128 ret = btt_data_read(arena, page, off, postmap, cur_len);
Vishal Vermad9b83c72017-08-30 19:36:03 -06001129 if (ret) {
1130 int rc;
1131
1132 /* Media error - set the e_flag */
1133 rc = btt_map_write(arena, premap, postmap, 0, 1,
1134 NVDIMM_IO_ATOMIC);
Vishal Verma5212e112015-06-25 04:20:32 -04001135 goto out_rtt;
Vishal Vermad9b83c72017-08-30 19:36:03 -06001136 }
Vishal Verma5212e112015-06-25 04:20:32 -04001137
Vishal Verma41cd8b72015-06-25 04:21:52 -04001138 if (bip) {
1139 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1140 if (ret)
1141 goto out_rtt;
1142 }
1143
Vishal Verma5212e112015-06-25 04:20:32 -04001144 arena->rtt[lane] = RTT_INVALID;
1145 nd_region_release_lane(btt->nd_region, lane);
1146
1147 len -= cur_len;
1148 off += cur_len;
1149 sector += btt->sector_size >> SECTOR_SHIFT;
1150 }
1151
1152 return 0;
1153
1154 out_rtt:
1155 arena->rtt[lane] = RTT_INVALID;
1156 out_lane:
1157 nd_region_release_lane(btt->nd_region, lane);
1158 return ret;
1159}
1160
Vishal Vermad9b83c72017-08-30 19:36:03 -06001161/*
1162 * Normally, arena_{read,write}_bytes will take care of the initial offset
1163 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1164 * we need the final, raw namespace offset here
1165 */
1166static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1167 u32 postmap)
1168{
1169 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1170 to_namespace_offset(arena, postmap));
1171 sector_t phys_sector = nsoff >> 9;
1172
1173 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1174}
1175
Vishal Verma41cd8b72015-06-25 04:21:52 -04001176static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1177 sector_t sector, struct page *page, unsigned int off,
1178 unsigned int len)
Vishal Verma5212e112015-06-25 04:20:32 -04001179{
1180 int ret = 0;
1181 struct arena_info *arena = NULL;
1182 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1183 struct log_entry log;
1184 int sub;
1185
1186 while (len) {
1187 u32 cur_len;
Vishal Vermad9b83c72017-08-30 19:36:03 -06001188 int e_flag;
Vishal Verma5212e112015-06-25 04:20:32 -04001189
Vishal Vermad9b83c72017-08-30 19:36:03 -06001190 retry:
Vishal Verma5212e112015-06-25 04:20:32 -04001191 lane = nd_region_acquire_lane(btt->nd_region);
1192
1193 ret = lba_to_arena(btt, sector, &premap, &arena);
1194 if (ret)
1195 goto out_lane;
1196 cur_len = min(btt->sector_size, len);
1197
1198 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1199 ret = -EIO;
1200 goto out_lane;
1201 }
1202
Vishal Vermad9b83c72017-08-30 19:36:03 -06001203 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1204 arena->freelist[lane].has_err = 1;
1205
1206 if (mutex_is_locked(&arena->err_lock)
1207 || arena->freelist[lane].has_err) {
1208 nd_region_release_lane(btt->nd_region, lane);
1209
1210 ret = arena_clear_freelist_error(arena, lane);
1211 if (ret)
1212 return ret;
1213
1214 /* OK to acquire a different lane/free block */
1215 goto retry;
1216 }
1217
Vishal Verma5212e112015-06-25 04:20:32 -04001218 new_postmap = arena->freelist[lane].block;
1219
1220 /* Wait if the new block is being read from */
1221 for (i = 0; i < arena->nfree; i++)
1222 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1223 cpu_relax();
1224
1225
1226 if (new_postmap >= arena->internal_nlba) {
1227 ret = -EIO;
1228 goto out_lane;
Vishal Verma41cd8b72015-06-25 04:21:52 -04001229 }
1230
1231 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
Vishal Verma5212e112015-06-25 04:20:32 -04001232 if (ret)
1233 goto out_lane;
1234
Vishal Verma41cd8b72015-06-25 04:21:52 -04001235 if (bip) {
1236 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1237 WRITE);
1238 if (ret)
1239 goto out_lane;
1240 }
1241
Vishal Verma5212e112015-06-25 04:20:32 -04001242 lock_map(arena, premap);
Vishal Vermad9b83c72017-08-30 19:36:03 -06001243 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
Vishal Verma3ae3d672017-05-10 15:01:30 -06001244 NVDIMM_IO_ATOMIC);
Vishal Verma5212e112015-06-25 04:20:32 -04001245 if (ret)
1246 goto out_map;
1247 if (old_postmap >= arena->internal_nlba) {
1248 ret = -EIO;
1249 goto out_map;
1250 }
Vishal Vermad9b83c72017-08-30 19:36:03 -06001251 if (e_flag)
1252 set_e_flag(old_postmap);
Vishal Verma5212e112015-06-25 04:20:32 -04001253
1254 log.lba = cpu_to_le32(premap);
1255 log.old_map = cpu_to_le32(old_postmap);
1256 log.new_map = cpu_to_le32(new_postmap);
1257 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1258 sub = arena->freelist[lane].sub;
1259 ret = btt_flog_write(arena, lane, sub, &log);
1260 if (ret)
1261 goto out_map;
1262
Vishal Verma1db1f3c2017-08-30 19:35:58 -06001263 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1264 NVDIMM_IO_ATOMIC);
Vishal Verma5212e112015-06-25 04:20:32 -04001265 if (ret)
1266 goto out_map;
1267
1268 unlock_map(arena, premap);
1269 nd_region_release_lane(btt->nd_region, lane);
1270
Vishal Vermad9b83c72017-08-30 19:36:03 -06001271 if (e_flag) {
1272 ret = arena_clear_freelist_error(arena, lane);
1273 if (ret)
1274 return ret;
1275 }
1276
Vishal Verma5212e112015-06-25 04:20:32 -04001277 len -= cur_len;
1278 off += cur_len;
1279 sector += btt->sector_size >> SECTOR_SHIFT;
1280 }
1281
1282 return 0;
1283
1284 out_map:
1285 unlock_map(arena, premap);
1286 out_lane:
1287 nd_region_release_lane(btt->nd_region, lane);
1288 return ret;
1289}
1290
Vishal Verma41cd8b72015-06-25 04:21:52 -04001291static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1292 struct page *page, unsigned int len, unsigned int off,
Jens Axboec11f0c02016-08-05 08:11:04 -06001293 bool is_write, sector_t sector)
Vishal Verma5212e112015-06-25 04:20:32 -04001294{
1295 int ret;
1296
Jens Axboec11f0c02016-08-05 08:11:04 -06001297 if (!is_write) {
Vishal Verma41cd8b72015-06-25 04:21:52 -04001298 ret = btt_read_pg(btt, bip, page, off, sector, len);
Vishal Verma5212e112015-06-25 04:20:32 -04001299 flush_dcache_page(page);
1300 } else {
1301 flush_dcache_page(page);
Vishal Verma41cd8b72015-06-25 04:21:52 -04001302 ret = btt_write_pg(btt, bip, sector, page, off, len);
Vishal Verma5212e112015-06-25 04:20:32 -04001303 }
1304
1305 return ret;
1306}
1307
Jens Axboedece1632015-11-05 10:41:16 -07001308static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
Vishal Verma5212e112015-06-25 04:20:32 -04001309{
Vishal Verma41cd8b72015-06-25 04:21:52 -04001310 struct bio_integrity_payload *bip = bio_integrity(bio);
Vishal Verma5212e112015-06-25 04:20:32 -04001311 struct btt *btt = q->queuedata;
1312 struct bvec_iter iter;
Dan Williamsf0dc0892015-05-16 12:28:53 -04001313 unsigned long start;
Vishal Verma5212e112015-06-25 04:20:32 -04001314 struct bio_vec bvec;
Mike Christieabf54542016-08-04 14:23:34 -06001315 int err = 0;
Dan Williamsf0dc0892015-05-16 12:28:53 -04001316 bool do_acct;
Vishal Verma5212e112015-06-25 04:20:32 -04001317
Dmitry Monakhove23947b2017-06-29 11:31:11 -07001318 if (!bio_integrity_prep(bio))
1319 return BLK_QC_T_NONE;
Vishal Verma41cd8b72015-06-25 04:21:52 -04001320
Dan Williamsf0dc0892015-05-16 12:28:53 -04001321 do_acct = nd_iostat_start(bio, &start);
Vishal Verma5212e112015-06-25 04:20:32 -04001322 bio_for_each_segment(bvec, bio, iter) {
1323 unsigned int len = bvec.bv_len;
1324
Vishal Verma86652d22017-09-05 14:35:39 -06001325 if (len > PAGE_SIZE || len < btt->sector_size ||
1326 len % btt->sector_size) {
1327 dev_err_ratelimited(&btt->nd_btt->dev,
1328 "unaligned bio segment (len: %d)\n", len);
1329 bio->bi_status = BLK_STS_IOERR;
1330 break;
1331 }
Vishal Verma5212e112015-06-25 04:20:32 -04001332
Vishal Verma41cd8b72015-06-25 04:21:52 -04001333 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
Jens Axboec11f0c02016-08-05 08:11:04 -06001334 op_is_write(bio_op(bio)), iter.bi_sector);
Vishal Verma5212e112015-06-25 04:20:32 -04001335 if (err) {
Vishal Vermae6be2dc2017-06-30 18:32:51 -06001336 dev_err(&btt->nd_btt->dev,
Vishal Verma5212e112015-06-25 04:20:32 -04001337 "io error in %s sector %lld, len %d,\n",
Mike Christieabf54542016-08-04 14:23:34 -06001338 (op_is_write(bio_op(bio))) ? "WRITE" :
1339 "READ",
Vishal Verma5212e112015-06-25 04:20:32 -04001340 (unsigned long long) iter.bi_sector, len);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001341 bio->bi_status = errno_to_blk_status(err);
Dan Williamsf0dc0892015-05-16 12:28:53 -04001342 break;
Vishal Verma5212e112015-06-25 04:20:32 -04001343 }
1344 }
Dan Williamsf0dc0892015-05-16 12:28:53 -04001345 if (do_acct)
1346 nd_iostat_end(bio, start);
Vishal Verma5212e112015-06-25 04:20:32 -04001347
Christoph Hellwig4246a0b2015-07-20 15:29:37 +02001348 bio_endio(bio);
Jens Axboedece1632015-11-05 10:41:16 -07001349 return BLK_QC_T_NONE;
Vishal Verma5212e112015-06-25 04:20:32 -04001350}
1351
1352static int btt_rw_page(struct block_device *bdev, sector_t sector,
Jens Axboec11f0c02016-08-05 08:11:04 -06001353 struct page *page, bool is_write)
Vishal Verma5212e112015-06-25 04:20:32 -04001354{
1355 struct btt *btt = bdev->bd_disk->private_data;
Vishal Vermac13c43d2017-06-29 16:59:11 -06001356 int rc;
Huang Ying98cc0932017-09-06 16:22:27 -07001357 unsigned int len;
Vishal Verma5212e112015-06-25 04:20:32 -04001358
Huang Ying98cc0932017-09-06 16:22:27 -07001359 len = hpage_nr_pages(page) * PAGE_SIZE;
1360 rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
Vishal Vermac13c43d2017-06-29 16:59:11 -06001361 if (rc == 0)
1362 page_endio(page, is_write, 0);
1363
1364 return rc;
Vishal Verma5212e112015-06-25 04:20:32 -04001365}
1366
1367
1368static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1369{
1370 /* some standard values */
1371 geo->heads = 1 << 6;
1372 geo->sectors = 1 << 5;
1373 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1374 return 0;
1375}
1376
1377static const struct block_device_operations btt_fops = {
1378 .owner = THIS_MODULE,
1379 .rw_page = btt_rw_page,
1380 .getgeo = btt_getgeo,
Dan Williams58138822015-06-23 20:08:34 -04001381 .revalidate_disk = nvdimm_revalidate_disk,
Vishal Verma5212e112015-06-25 04:20:32 -04001382};
1383
1384static int btt_blk_init(struct btt *btt)
1385{
1386 struct nd_btt *nd_btt = btt->nd_btt;
1387 struct nd_namespace_common *ndns = nd_btt->ndns;
1388
1389 /* create a new disk and request queue for btt */
1390 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1391 if (!btt->btt_queue)
1392 return -ENOMEM;
1393
1394 btt->btt_disk = alloc_disk(0);
1395 if (!btt->btt_disk) {
1396 blk_cleanup_queue(btt->btt_queue);
1397 return -ENOMEM;
1398 }
1399
1400 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
Vishal Verma5212e112015-06-25 04:20:32 -04001401 btt->btt_disk->first_minor = 0;
1402 btt->btt_disk->fops = &btt_fops;
1403 btt->btt_disk->private_data = btt;
1404 btt->btt_disk->queue = btt->btt_queue;
1405 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
Minchan Kim23c47d22017-11-15 17:33:00 -08001406 btt->btt_disk->queue->backing_dev_info->capabilities |=
1407 BDI_CAP_SYNCHRONOUS_IO;
Vishal Verma5212e112015-06-25 04:20:32 -04001408
1409 blk_queue_make_request(btt->btt_queue, btt_make_request);
1410 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1411 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
Vishal Verma5212e112015-06-25 04:20:32 -04001412 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1413 btt->btt_queue->queuedata = btt;
1414
Vishal Verma41cd8b72015-06-25 04:21:52 -04001415 set_capacity(btt->btt_disk, 0);
Dan Williams0d52c7562016-06-15 19:44:20 -07001416 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
Vishal Verma41cd8b72015-06-25 04:21:52 -04001417 if (btt_meta_size(btt)) {
1418 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1419
1420 if (rc) {
1421 del_gendisk(btt->btt_disk);
1422 put_disk(btt->btt_disk);
1423 blk_cleanup_queue(btt->btt_queue);
1424 return rc;
1425 }
1426 }
1427 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
Vishal Vermaabe8b4e2016-07-27 16:38:59 -06001428 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
Dan Williams58138822015-06-23 20:08:34 -04001429 revalidate_disk(btt->btt_disk);
Vishal Verma5212e112015-06-25 04:20:32 -04001430
1431 return 0;
1432}
1433
1434static void btt_blk_cleanup(struct btt *btt)
1435{
Vishal Verma5212e112015-06-25 04:20:32 -04001436 del_gendisk(btt->btt_disk);
1437 put_disk(btt->btt_disk);
1438 blk_cleanup_queue(btt->btt_queue);
1439}
1440
1441/**
1442 * btt_init - initialize a block translation table for the given device
1443 * @nd_btt: device with BTT geometry and backing device info
1444 * @rawsize: raw size in bytes of the backing device
1445 * @lbasize: lba size of the backing device
1446 * @uuid: A uuid for the backing device - this is stored on media
1447 * @maxlane: maximum number of parallel requests the device can handle
1448 *
1449 * Initialize a Block Translation Table on a backing device to provide
1450 * single sector power fail atomicity.
1451 *
1452 * Context:
1453 * Might sleep.
1454 *
1455 * Returns:
1456 * Pointer to a new struct btt on success, NULL on failure.
1457 */
1458static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1459 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1460{
1461 int ret;
1462 struct btt *btt;
Vishal Vermad9b83c72017-08-30 19:36:03 -06001463 struct nd_namespace_io *nsio;
Vishal Verma5212e112015-06-25 04:20:32 -04001464 struct device *dev = &nd_btt->dev;
1465
Dan Williamse32bc722016-03-17 18:23:09 -07001466 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
Vishal Verma5212e112015-06-25 04:20:32 -04001467 if (!btt)
1468 return NULL;
1469
1470 btt->nd_btt = nd_btt;
1471 btt->rawsize = rawsize;
1472 btt->lbasize = lbasize;
1473 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1474 INIT_LIST_HEAD(&btt->arena_list);
1475 mutex_init(&btt->init_lock);
1476 btt->nd_region = nd_region;
Vishal Vermad9b83c72017-08-30 19:36:03 -06001477 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1478 btt->phys_bb = &nsio->bb;
Vishal Verma5212e112015-06-25 04:20:32 -04001479
1480 ret = discover_arenas(btt);
1481 if (ret) {
1482 dev_err(dev, "init: error in arena_discover: %d\n", ret);
Dan Williamse32bc722016-03-17 18:23:09 -07001483 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -04001484 }
1485
Dan Williams58138822015-06-23 20:08:34 -04001486 if (btt->init_state != INIT_READY && nd_region->ro) {
Vishal Vermae6be2dc2017-06-30 18:32:51 -06001487 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
Dan Williams58138822015-06-23 20:08:34 -04001488 dev_name(&nd_region->dev));
Dan Williamse32bc722016-03-17 18:23:09 -07001489 return NULL;
Dan Williams58138822015-06-23 20:08:34 -04001490 } else if (btt->init_state != INIT_READY) {
Vishal Verma5212e112015-06-25 04:20:32 -04001491 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1492 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1493 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1494 btt->num_arenas, rawsize);
1495
1496 ret = create_arenas(btt);
1497 if (ret) {
1498 dev_info(dev, "init: create_arenas: %d\n", ret);
Dan Williamse32bc722016-03-17 18:23:09 -07001499 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -04001500 }
1501
1502 ret = btt_meta_init(btt);
1503 if (ret) {
1504 dev_err(dev, "init: error in meta_init: %d\n", ret);
Dan Williamse32bc722016-03-17 18:23:09 -07001505 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -04001506 }
1507 }
1508
1509 ret = btt_blk_init(btt);
1510 if (ret) {
1511 dev_err(dev, "init: error in blk_init: %d\n", ret);
Dan Williamse32bc722016-03-17 18:23:09 -07001512 return NULL;
Vishal Verma5212e112015-06-25 04:20:32 -04001513 }
1514
1515 btt_debugfs_init(btt);
1516
1517 return btt;
Vishal Verma5212e112015-06-25 04:20:32 -04001518}
1519
1520/**
1521 * btt_fini - de-initialize a BTT
1522 * @btt: the BTT handle that was generated by btt_init
1523 *
1524 * De-initialize a Block Translation Table on device removal
1525 *
1526 * Context:
1527 * Might sleep.
1528 */
1529static void btt_fini(struct btt *btt)
1530{
1531 if (btt) {
1532 btt_blk_cleanup(btt);
1533 free_arenas(btt);
1534 debugfs_remove_recursive(btt->debugfs_dir);
Vishal Verma5212e112015-06-25 04:20:32 -04001535 }
1536}
1537
1538int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1539{
1540 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1541 struct nd_region *nd_region;
Vishal Verma14e49452017-06-28 14:25:00 -06001542 struct btt_sb *btt_sb;
Vishal Verma5212e112015-06-25 04:20:32 -04001543 struct btt *btt;
1544 size_t rawsize;
1545
Dan Williams9dec4892016-04-22 12:26:05 -07001546 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1547 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
Vishal Verma5212e112015-06-25 04:20:32 -04001548 return -ENODEV;
Dan Williams9dec4892016-04-22 12:26:05 -07001549 }
Vishal Verma5212e112015-06-25 04:20:32 -04001550
Vishal Verma14e49452017-06-28 14:25:00 -06001551 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
Christophe Jailleted36b4d2017-08-27 08:30:34 +02001552 if (!btt_sb)
1553 return -ENOMEM;
Vishal Verma14e49452017-06-28 14:25:00 -06001554
1555 /*
1556 * If this returns < 0, that is ok as it just means there wasn't
1557 * an existing BTT, and we're creating a new one. We still need to
1558 * call this as we need the version dependent fields in nd_btt to be
1559 * set correctly based on the holder class
1560 */
1561 nd_btt_version(nd_btt, ndns, btt_sb);
1562
1563 rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
Vishal Verma5212e112015-06-25 04:20:32 -04001564 if (rawsize < ARENA_MIN_SIZE) {
Dan Williams9dec4892016-04-22 12:26:05 -07001565 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
Vishal Verma14e49452017-06-28 14:25:00 -06001566 dev_name(&ndns->dev),
1567 ARENA_MIN_SIZE + nd_btt->initial_offset);
Vishal Verma5212e112015-06-25 04:20:32 -04001568 return -ENXIO;
1569 }
1570 nd_region = to_nd_region(nd_btt->dev.parent);
1571 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1572 nd_region);
1573 if (!btt)
1574 return -ENOMEM;
1575 nd_btt->btt = btt;
1576
1577 return 0;
1578}
1579EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1580
Dan Williams298f2bc2016-03-15 16:41:04 -07001581int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
Vishal Verma5212e112015-06-25 04:20:32 -04001582{
Vishal Verma5212e112015-06-25 04:20:32 -04001583 struct btt *btt = nd_btt->btt;
1584
1585 btt_fini(btt);
1586 nd_btt->btt = NULL;
1587
1588 return 0;
1589}
1590EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1591
1592static int __init nd_btt_init(void)
1593{
NeilBrownff8e92d2016-03-10 08:59:28 +11001594 int rc = 0;
Vishal Verma5212e112015-06-25 04:20:32 -04001595
1596 debugfs_root = debugfs_create_dir("btt", NULL);
NeilBrownff8e92d2016-03-10 08:59:28 +11001597 if (IS_ERR_OR_NULL(debugfs_root))
Vishal Verma5212e112015-06-25 04:20:32 -04001598 rc = -ENXIO;
Vishal Verma5212e112015-06-25 04:20:32 -04001599
1600 return rc;
1601}
1602
1603static void __exit nd_btt_exit(void)
1604{
1605 debugfs_remove_recursive(debugfs_root);
Vishal Verma5212e112015-06-25 04:20:32 -04001606}
1607
1608MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1609MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1610MODULE_LICENSE("GPL v2");
1611module_init(nd_btt_init);
1612module_exit(nd_btt_exit);