blob: 2c98f958fabb80b7a42a29fa257791e8e53a5608 [file] [log] [blame]
Dan Williamsb94d5232015-05-19 22:54:31 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/libnvdimm.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080014#include <linux/badblocks.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040015#include <linux/export.h>
16#include <linux/module.h>
Vishal Verma41cd8b72015-06-25 04:21:52 -040017#include <linux/blkdev.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040018#include <linux/device.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040019#include <linux/ctype.h>
Dan Williams62232e452015-06-08 14:27:06 -040020#include <linux/ndctl.h>
Dan Williams45def222015-04-26 19:26:48 -040021#include <linux/mutex.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040022#include <linux/slab.h>
Dan Williams29b9aa02016-06-06 17:42:38 -070023#include <linux/io.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040024#include "nd-core.h"
Dan Williams4d88a972015-05-31 14:41:48 -040025#include "nd.h"
Dan Williamsb94d5232015-05-19 22:54:31 -040026
Dan Williamse6dfb2d2015-04-25 03:56:17 -040027LIST_HEAD(nvdimm_bus_list);
28DEFINE_MUTEX(nvdimm_bus_list_mutex);
Dan Williamsb94d5232015-05-19 22:54:31 -040029
Dan Williams3d880022015-05-31 15:02:11 -040030void nvdimm_bus_lock(struct device *dev)
31{
32 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
33
34 if (!nvdimm_bus)
35 return;
36 mutex_lock(&nvdimm_bus->reconfig_mutex);
37}
38EXPORT_SYMBOL(nvdimm_bus_lock);
39
40void nvdimm_bus_unlock(struct device *dev)
41{
42 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
43
44 if (!nvdimm_bus)
45 return;
46 mutex_unlock(&nvdimm_bus->reconfig_mutex);
47}
48EXPORT_SYMBOL(nvdimm_bus_unlock);
49
50bool is_nvdimm_bus_locked(struct device *dev)
51{
52 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
53
54 if (!nvdimm_bus)
55 return false;
56 return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
57}
58EXPORT_SYMBOL(is_nvdimm_bus_locked);
59
Dan Williams29b9aa02016-06-06 17:42:38 -070060struct nvdimm_map {
61 struct nvdimm_bus *nvdimm_bus;
62 struct list_head list;
63 resource_size_t offset;
64 unsigned long flags;
65 size_t size;
66 union {
67 void *mem;
68 void __iomem *iomem;
69 };
70 struct kref kref;
71};
72
73static struct nvdimm_map *find_nvdimm_map(struct device *dev,
74 resource_size_t offset)
75{
76 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
77 struct nvdimm_map *nvdimm_map;
78
79 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
80 if (nvdimm_map->offset == offset)
81 return nvdimm_map;
82 return NULL;
83}
84
85static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
86 resource_size_t offset, size_t size, unsigned long flags)
87{
88 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
89 struct nvdimm_map *nvdimm_map;
90
91 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
92 if (!nvdimm_map)
93 return NULL;
94
95 INIT_LIST_HEAD(&nvdimm_map->list);
96 nvdimm_map->nvdimm_bus = nvdimm_bus;
97 nvdimm_map->offset = offset;
98 nvdimm_map->flags = flags;
99 nvdimm_map->size = size;
100 kref_init(&nvdimm_map->kref);
101
102 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
103 goto err_request_region;
104
105 if (flags)
106 nvdimm_map->mem = memremap(offset, size, flags);
107 else
108 nvdimm_map->iomem = ioremap(offset, size);
109
110 if (!nvdimm_map->mem)
111 goto err_map;
112
113 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
114 __func__);
115 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
116
117 return nvdimm_map;
118
119 err_map:
120 release_mem_region(offset, size);
121 err_request_region:
122 kfree(nvdimm_map);
123 return NULL;
124}
125
126static void nvdimm_map_release(struct kref *kref)
127{
128 struct nvdimm_bus *nvdimm_bus;
129 struct nvdimm_map *nvdimm_map;
130
131 nvdimm_map = container_of(kref, struct nvdimm_map, kref);
132 nvdimm_bus = nvdimm_map->nvdimm_bus;
133
134 dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
135 list_del(&nvdimm_map->list);
136 if (nvdimm_map->flags)
137 memunmap(nvdimm_map->mem);
138 else
139 iounmap(nvdimm_map->iomem);
140 release_mem_region(nvdimm_map->offset, nvdimm_map->size);
141 kfree(nvdimm_map);
142}
143
144static void nvdimm_map_put(void *data)
145{
146 struct nvdimm_map *nvdimm_map = data;
147 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
148
149 nvdimm_bus_lock(&nvdimm_bus->dev);
150 kref_put(&nvdimm_map->kref, nvdimm_map_release);
151 nvdimm_bus_unlock(&nvdimm_bus->dev);
152}
153
154/**
155 * devm_nvdimm_memremap - map a resource that is shared across regions
156 * @dev: device that will own a reference to the shared mapping
157 * @offset: physical base address of the mapping
158 * @size: mapping size
159 * @flags: memremap flags, or, if zero, perform an ioremap instead
160 */
161void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
162 size_t size, unsigned long flags)
163{
164 struct nvdimm_map *nvdimm_map;
165
166 nvdimm_bus_lock(dev);
167 nvdimm_map = find_nvdimm_map(dev, offset);
168 if (!nvdimm_map)
169 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
170 else
171 kref_get(&nvdimm_map->kref);
172 nvdimm_bus_unlock(dev);
173
174 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
175 return NULL;
176
177 return nvdimm_map->mem;
178}
179EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
180
Dan Williamseaf96152015-05-01 13:11:27 -0400181u64 nd_fletcher64(void *addr, size_t len, bool le)
182{
183 u32 *buf = addr;
184 u32 lo32 = 0;
185 u64 hi32 = 0;
186 int i;
187
188 for (i = 0; i < len / sizeof(u32); i++) {
189 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
190 hi32 += lo32;
191 }
192
193 return hi32 << 32 | lo32;
194}
195EXPORT_SYMBOL_GPL(nd_fletcher64);
196
Dan Williams45def222015-04-26 19:26:48 -0400197struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
198{
199 /* struct nvdimm_bus definition is private to libnvdimm */
200 return nvdimm_bus->nd_desc;
201}
202EXPORT_SYMBOL_GPL(to_nd_desc);
203
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400204static bool is_uuid_sep(char sep)
205{
206 if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
207 return true;
208 return false;
209}
210
211static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
212 size_t len)
213{
214 const char *str = buf;
215 u8 uuid[16];
216 int i;
217
218 for (i = 0; i < 16; i++) {
219 if (!isxdigit(str[0]) || !isxdigit(str[1])) {
220 dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
221 __func__, i, str - buf, str[0],
222 str + 1 - buf, str[1]);
223 return -EINVAL;
224 }
225
226 uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
227 str += 2;
228 if (is_uuid_sep(*str))
229 str++;
230 }
231
232 memcpy(uuid_out, uuid, sizeof(uuid));
233 return 0;
234}
235
236/**
237 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
238 * @dev: container device for the uuid property
239 * @uuid_out: uuid buffer to replace
240 * @buf: raw sysfs buffer to parse
241 *
242 * Enforce that uuids can only be changed while the device is disabled
243 * (driver detached)
244 * LOCKING: expects device_lock() is held on entry
245 */
246int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
247 size_t len)
248{
249 u8 uuid[16];
250 int rc;
251
252 if (dev->driver)
253 return -EBUSY;
254
255 rc = nd_uuid_parse(dev, uuid, buf, len);
256 if (rc)
257 return rc;
258
259 kfree(*uuid_out);
260 *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
261 if (!(*uuid_out))
262 return -ENOMEM;
263
264 return 0;
265}
266
Dan Williams1b40e092015-05-01 13:34:01 -0400267ssize_t nd_sector_size_show(unsigned long current_lbasize,
268 const unsigned long *supported, char *buf)
269{
270 ssize_t len = 0;
271 int i;
272
273 for (i = 0; supported[i]; i++)
274 if (current_lbasize == supported[i])
275 len += sprintf(buf + len, "[%ld] ", supported[i]);
276 else
277 len += sprintf(buf + len, "%ld ", supported[i]);
278 len += sprintf(buf + len, "\n");
279 return len;
280}
281
282ssize_t nd_sector_size_store(struct device *dev, const char *buf,
283 unsigned long *current_lbasize, const unsigned long *supported)
284{
285 unsigned long lbasize;
286 int rc, i;
287
288 if (dev->driver)
289 return -EBUSY;
290
291 rc = kstrtoul(buf, 0, &lbasize);
292 if (rc)
293 return rc;
294
295 for (i = 0; supported[i]; i++)
296 if (lbasize == supported[i])
297 break;
298
299 if (supported[i]) {
300 *current_lbasize = lbasize;
301 return 0;
302 } else {
303 return -EINVAL;
304 }
305}
306
Dan Williamsf0dc0892015-05-16 12:28:53 -0400307void __nd_iostat_start(struct bio *bio, unsigned long *start)
308{
309 struct gendisk *disk = bio->bi_bdev->bd_disk;
310 const int rw = bio_data_dir(bio);
311 int cpu = part_stat_lock();
312
313 *start = jiffies;
314 part_round_stats(cpu, &disk->part0);
315 part_stat_inc(cpu, &disk->part0, ios[rw]);
316 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
317 part_inc_in_flight(&disk->part0, rw);
318 part_stat_unlock();
319}
320EXPORT_SYMBOL(__nd_iostat_start);
321
322void nd_iostat_end(struct bio *bio, unsigned long start)
323{
324 struct gendisk *disk = bio->bi_bdev->bd_disk;
325 unsigned long duration = jiffies - start;
326 const int rw = bio_data_dir(bio);
327 int cpu = part_stat_lock();
328
329 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
330 part_round_stats(cpu, &disk->part0);
331 part_dec_in_flight(&disk->part0, rw);
332 part_stat_unlock();
333}
334EXPORT_SYMBOL(nd_iostat_end);
335
Dan Williams62232e452015-06-08 14:27:06 -0400336static ssize_t commands_show(struct device *dev,
337 struct device_attribute *attr, char *buf)
338{
339 int cmd, len = 0;
340 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
341 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
342
Dan Williamse3654ec2016-04-28 16:17:07 -0700343 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
Dan Williams62232e452015-06-08 14:27:06 -0400344 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
345 len += sprintf(buf + len, "\n");
346 return len;
347}
348static DEVICE_ATTR_RO(commands);
349
Dan Williams45def222015-04-26 19:26:48 -0400350static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
351{
352 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
353 struct device *parent = nvdimm_bus->dev.parent;
354
355 if (nd_desc->provider_name)
356 return nd_desc->provider_name;
357 else if (parent)
358 return dev_name(parent);
359 else
360 return "unknown";
361}
362
363static ssize_t provider_show(struct device *dev,
364 struct device_attribute *attr, char *buf)
365{
366 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
367
368 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
369}
370static DEVICE_ATTR_RO(provider);
371
Dan Williams4d88a972015-05-31 14:41:48 -0400372static int flush_namespaces(struct device *dev, void *data)
373{
374 device_lock(dev);
375 device_unlock(dev);
376 return 0;
377}
378
379static int flush_regions_dimms(struct device *dev, void *data)
380{
381 device_lock(dev);
382 device_unlock(dev);
383 device_for_each_child(dev, NULL, flush_namespaces);
384 return 0;
385}
386
387static ssize_t wait_probe_show(struct device *dev,
388 struct device_attribute *attr, char *buf)
389{
Dan Williams7ae0fa432016-02-19 12:16:34 -0800390 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
391 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
392 int rc;
393
394 if (nd_desc->flush_probe) {
395 rc = nd_desc->flush_probe(nd_desc);
396 if (rc)
397 return rc;
398 }
Dan Williams4d88a972015-05-31 14:41:48 -0400399 nd_synchronize();
400 device_for_each_child(dev, NULL, flush_regions_dimms);
401 return sprintf(buf, "1\n");
402}
403static DEVICE_ATTR_RO(wait_probe);
404
Dan Williams45def222015-04-26 19:26:48 -0400405static struct attribute *nvdimm_bus_attributes[] = {
Dan Williams62232e452015-06-08 14:27:06 -0400406 &dev_attr_commands.attr,
Dan Williams4d88a972015-05-31 14:41:48 -0400407 &dev_attr_wait_probe.attr,
Dan Williams45def222015-04-26 19:26:48 -0400408 &dev_attr_provider.attr,
409 NULL,
410};
411
412struct attribute_group nvdimm_bus_attribute_group = {
413 .attrs = nvdimm_bus_attributes,
414};
415EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
416
Dan Williamsb95f5f42016-01-04 23:50:23 -0800417static void set_badblock(struct badblocks *bb, sector_t s, int num)
Dan Williams87ba05d2016-01-09 07:48:43 -0800418{
Dan Williamsb95f5f42016-01-04 23:50:23 -0800419 dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
Dan Williams87ba05d2016-01-09 07:48:43 -0800420 (u64) s * 512, (u64) num * 512);
421 /* this isn't an error as the hardware will still throw an exception */
Dan Williamsb95f5f42016-01-04 23:50:23 -0800422 if (badblocks_set(bb, s, num, 1))
423 dev_info_once(bb->dev, "%s: failed for sector %llx\n",
Dan Williams87ba05d2016-01-09 07:48:43 -0800424 __func__, (u64) s);
425}
426
Vishal Verma0caeef62015-12-24 19:21:43 -0700427/**
428 * __add_badblock_range() - Convert a physical address range to bad sectors
Dan Williamsb95f5f42016-01-04 23:50:23 -0800429 * @bb: badblocks instance to populate
Vishal Verma0caeef62015-12-24 19:21:43 -0700430 * @ns_offset: namespace offset where the error range begins (in bytes)
431 * @len: number of bytes of poison to be added
432 *
433 * This assumes that the range provided with (ns_offset, len) is within
434 * the bounds of physical addresses for this namespace, i.e. lies in the
435 * interval [ns_start, ns_start + ns_size)
436 */
Dan Williamsb95f5f42016-01-04 23:50:23 -0800437static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
Vishal Verma0caeef62015-12-24 19:21:43 -0700438{
Dan Williamsb95f5f42016-01-04 23:50:23 -0800439 const unsigned int sector_size = 512;
Vishal Verma0caeef62015-12-24 19:21:43 -0700440 sector_t start_sector;
441 u64 num_sectors;
442 u32 rem;
Vishal Verma0caeef62015-12-24 19:21:43 -0700443
444 start_sector = div_u64(ns_offset, sector_size);
445 num_sectors = div_u64_rem(len, sector_size, &rem);
446 if (rem)
447 num_sectors++;
448
Vishal Verma0caeef62015-12-24 19:21:43 -0700449 if (unlikely(num_sectors > (u64)INT_MAX)) {
450 u64 remaining = num_sectors;
451 sector_t s = start_sector;
452
453 while (remaining) {
454 int done = min_t(u64, remaining, INT_MAX);
455
Dan Williamsb95f5f42016-01-04 23:50:23 -0800456 set_badblock(bb, s, done);
Vishal Verma0caeef62015-12-24 19:21:43 -0700457 remaining -= done;
458 s += done;
459 }
Vishal Verma0caeef62015-12-24 19:21:43 -0700460 } else
Dan Williamsb95f5f42016-01-04 23:50:23 -0800461 set_badblock(bb, start_sector, num_sectors);
Vishal Verma0caeef62015-12-24 19:21:43 -0700462}
463
Dan Williamsa3901802016-04-07 20:02:06 -0700464static void badblocks_populate(struct list_head *poison_list,
465 struct badblocks *bb, const struct resource *res)
Dan Williams5faecf42016-02-17 15:25:36 -0800466{
467 struct nd_poison *pl;
468
469 if (list_empty(poison_list))
470 return;
471
472 list_for_each_entry(pl, poison_list, list) {
473 u64 pl_end = pl->start + pl->length - 1;
474
475 /* Discard intervals with no intersection */
476 if (pl_end < res->start)
477 continue;
478 if (pl->start > res->end)
479 continue;
480 /* Deal with any overlap after start of the namespace */
481 if (pl->start >= res->start) {
482 u64 start = pl->start;
483 u64 len;
484
485 if (pl_end <= res->end)
486 len = pl->length;
487 else
488 len = res->start + resource_size(res)
489 - pl->start;
490 __add_badblock_range(bb, start - res->start, len);
491 continue;
492 }
493 /* Deal with overlap for poison starting before the namespace */
494 if (pl->start < res->start) {
495 u64 len;
496
497 if (pl_end < res->end)
498 len = pl->start + pl->length - res->start;
499 else
500 len = resource_size(res);
501 __add_badblock_range(bb, 0, len);
502 }
503 }
504}
505
Vishal Verma0caeef62015-12-24 19:21:43 -0700506/**
Dan Williamsa3901802016-04-07 20:02:06 -0700507 * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
508 * @region: parent region of the range to interrogate
509 * @bb: badblocks instance to populate
510 * @res: resource range to consider
Vishal Verma0caeef62015-12-24 19:21:43 -0700511 *
Dan Williamsa3901802016-04-07 20:02:06 -0700512 * The poison list generated during bus initialization may contain
513 * multiple, possibly overlapping physical address ranges. Compare each
514 * of these ranges to the resource range currently being initialized,
515 * and add badblocks entries for all matching sub-ranges
Vishal Verma0caeef62015-12-24 19:21:43 -0700516 */
Dan Williamsa3901802016-04-07 20:02:06 -0700517void nvdimm_badblocks_populate(struct nd_region *nd_region,
518 struct badblocks *bb, const struct resource *res)
Vishal Verma0caeef62015-12-24 19:21:43 -0700519{
Vishal Verma0caeef62015-12-24 19:21:43 -0700520 struct nvdimm_bus *nvdimm_bus;
521 struct list_head *poison_list;
Vishal Verma0caeef62015-12-24 19:21:43 -0700522
Dan Williamsa3901802016-04-07 20:02:06 -0700523 if (!is_nd_pmem(&nd_region->dev)) {
524 dev_WARN_ONCE(&nd_region->dev, 1,
525 "%s only valid for pmem regions\n", __func__);
526 return;
527 }
528 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
Vishal Verma0caeef62015-12-24 19:21:43 -0700529 poison_list = &nvdimm_bus->poison_list;
Vishal Verma0caeef62015-12-24 19:21:43 -0700530
Dan Williams5faecf42016-02-17 15:25:36 -0800531 nvdimm_bus_lock(&nvdimm_bus->dev);
Dan Williamsa3901802016-04-07 20:02:06 -0700532 badblocks_populate(poison_list, bb, res);
Dan Williams5faecf42016-02-17 15:25:36 -0800533 nvdimm_bus_unlock(&nvdimm_bus->dev);
Vishal Verma0caeef62015-12-24 19:21:43 -0700534}
Dan Williamsa3901802016-04-07 20:02:06 -0700535EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
Vishal Verma0caeef62015-12-24 19:21:43 -0700536
Dan Williams5faecf42016-02-17 15:25:36 -0800537static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
Vishal Verma0caeef62015-12-24 19:21:43 -0700538{
539 struct nd_poison *pl;
540
541 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
542 if (!pl)
543 return -ENOMEM;
544
545 pl->start = addr;
546 pl->length = length;
547 list_add_tail(&pl->list, &nvdimm_bus->poison_list);
548
549 return 0;
550}
551
Dan Williams5faecf42016-02-17 15:25:36 -0800552static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
Vishal Verma0caeef62015-12-24 19:21:43 -0700553{
554 struct nd_poison *pl;
555
556 if (list_empty(&nvdimm_bus->poison_list))
Dan Williams5faecf42016-02-17 15:25:36 -0800557 return add_poison(nvdimm_bus, addr, length);
Vishal Verma0caeef62015-12-24 19:21:43 -0700558
559 /*
560 * There is a chance this is a duplicate, check for those first.
561 * This will be the common case as ARS_STATUS returns all known
562 * errors in the SPA space, and we can't query it per region
563 */
564 list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
565 if (pl->start == addr) {
566 /* If length has changed, update this list entry */
567 if (pl->length != length)
568 pl->length = length;
569 return 0;
570 }
571
572 /*
573 * If not a duplicate or a simple length update, add the entry as is,
574 * as any overlapping ranges will get resolved when the list is consumed
575 * and converted to badblocks
576 */
Dan Williams5faecf42016-02-17 15:25:36 -0800577 return add_poison(nvdimm_bus, addr, length);
578}
579
580int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
581{
582 int rc;
583
584 nvdimm_bus_lock(&nvdimm_bus->dev);
585 rc = bus_add_poison(nvdimm_bus, addr, length);
586 nvdimm_bus_unlock(&nvdimm_bus->dev);
587
588 return rc;
Vishal Verma0caeef62015-12-24 19:21:43 -0700589}
590EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
591
Vishal Verma41cd8b72015-06-25 04:21:52 -0400592#ifdef CONFIG_BLK_DEV_INTEGRITY
Vishal Verma41cd8b72015-06-25 04:21:52 -0400593int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
594{
Martin K. Petersen0f8087e2015-10-21 13:19:33 -0400595 struct blk_integrity bi;
Vishal Verma41cd8b72015-06-25 04:21:52 -0400596
Vishal Vermafcae6952015-06-25 04:22:39 -0400597 if (meta_size == 0)
598 return 0;
599
Johannes Thumshirn8729bde2016-06-23 11:52:04 +0200600 memset(&bi, 0, sizeof(bi));
601
Martin K. Petersen0f8087e2015-10-21 13:19:33 -0400602 bi.tuple_size = meta_size;
603 bi.tag_size = meta_size;
604
Martin K. Petersen25520d52015-10-21 13:19:49 -0400605 blk_integrity_register(disk, &bi);
Vishal Verma41cd8b72015-06-25 04:21:52 -0400606 blk_queue_max_integrity_segments(disk->queue, 1);
607
608 return 0;
609}
610EXPORT_SYMBOL(nd_integrity_init);
611
612#else /* CONFIG_BLK_DEV_INTEGRITY */
613int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
614{
615 return 0;
616}
617EXPORT_SYMBOL(nd_integrity_init);
618
619#endif
620
Dan Williams45def222015-04-26 19:26:48 -0400621static __init int libnvdimm_init(void)
622{
Dan Williams4d88a972015-05-31 14:41:48 -0400623 int rc;
624
625 rc = nvdimm_bus_init();
626 if (rc)
627 return rc;
628 rc = nvdimm_init();
629 if (rc)
630 goto err_dimm;
Dan Williams3d880022015-05-31 15:02:11 -0400631 rc = nd_region_init();
632 if (rc)
633 goto err_region;
Dan Williams4d88a972015-05-31 14:41:48 -0400634 return 0;
Dan Williams3d880022015-05-31 15:02:11 -0400635 err_region:
636 nvdimm_exit();
Dan Williams4d88a972015-05-31 14:41:48 -0400637 err_dimm:
638 nvdimm_bus_exit();
639 return rc;
Dan Williams45def222015-04-26 19:26:48 -0400640}
641
642static __exit void libnvdimm_exit(void)
643{
644 WARN_ON(!list_empty(&nvdimm_bus_list));
Dan Williams3d880022015-05-31 15:02:11 -0400645 nd_region_exit();
Dan Williams4d88a972015-05-31 14:41:48 -0400646 nvdimm_exit();
Dan Williams45def222015-04-26 19:26:48 -0400647 nvdimm_bus_exit();
Dan Williamsb354aba2016-05-17 20:24:16 -0700648 nd_region_devs_exit();
649 nvdimm_devs_exit();
Dan Williams45def222015-04-26 19:26:48 -0400650}
651
Dan Williamsb94d5232015-05-19 22:54:31 -0400652MODULE_LICENSE("GPL v2");
653MODULE_AUTHOR("Intel Corporation");
Dan Williams45def222015-04-26 19:26:48 -0400654subsys_initcall(libnvdimm_init);
655module_exit(libnvdimm_exit);