blob: 2e2832b83c939a2aeca86610d45ec5fc719072e1 [file] [log] [blame]
Dan Williamsb94d5232015-05-19 22:54:31 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/libnvdimm.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080014#include <linux/badblocks.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040015#include <linux/export.h>
16#include <linux/module.h>
Vishal Verma41cd8b72015-06-25 04:21:52 -040017#include <linux/blkdev.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040018#include <linux/device.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040019#include <linux/ctype.h>
Dan Williams62232e452015-06-08 14:27:06 -040020#include <linux/ndctl.h>
Dan Williams45def222015-04-26 19:26:48 -040021#include <linux/mutex.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040022#include <linux/slab.h>
23#include "nd-core.h"
Dan Williams4d88a972015-05-31 14:41:48 -040024#include "nd.h"
Dan Williamsb94d5232015-05-19 22:54:31 -040025
Dan Williamse6dfb2d2015-04-25 03:56:17 -040026LIST_HEAD(nvdimm_bus_list);
27DEFINE_MUTEX(nvdimm_bus_list_mutex);
Dan Williamsb94d5232015-05-19 22:54:31 -040028static DEFINE_IDA(nd_ida);
29
Dan Williams3d880022015-05-31 15:02:11 -040030void nvdimm_bus_lock(struct device *dev)
31{
32 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
33
34 if (!nvdimm_bus)
35 return;
36 mutex_lock(&nvdimm_bus->reconfig_mutex);
37}
38EXPORT_SYMBOL(nvdimm_bus_lock);
39
40void nvdimm_bus_unlock(struct device *dev)
41{
42 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
43
44 if (!nvdimm_bus)
45 return;
46 mutex_unlock(&nvdimm_bus->reconfig_mutex);
47}
48EXPORT_SYMBOL(nvdimm_bus_unlock);
49
50bool is_nvdimm_bus_locked(struct device *dev)
51{
52 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
53
54 if (!nvdimm_bus)
55 return false;
56 return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
57}
58EXPORT_SYMBOL(is_nvdimm_bus_locked);
59
Dan Williamseaf96152015-05-01 13:11:27 -040060u64 nd_fletcher64(void *addr, size_t len, bool le)
61{
62 u32 *buf = addr;
63 u32 lo32 = 0;
64 u64 hi32 = 0;
65 int i;
66
67 for (i = 0; i < len / sizeof(u32); i++) {
68 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
69 hi32 += lo32;
70 }
71
72 return hi32 << 32 | lo32;
73}
74EXPORT_SYMBOL_GPL(nd_fletcher64);
75
Dan Williamsb94d5232015-05-19 22:54:31 -040076static void nvdimm_bus_release(struct device *dev)
77{
78 struct nvdimm_bus *nvdimm_bus;
79
80 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
81 ida_simple_remove(&nd_ida, nvdimm_bus->id);
82 kfree(nvdimm_bus);
83}
84
Dan Williams45def222015-04-26 19:26:48 -040085struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
86{
87 struct nvdimm_bus *nvdimm_bus;
88
89 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
90 WARN_ON(nvdimm_bus->dev.release != nvdimm_bus_release);
91 return nvdimm_bus;
92}
93EXPORT_SYMBOL_GPL(to_nvdimm_bus);
94
95struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
96{
97 /* struct nvdimm_bus definition is private to libnvdimm */
98 return nvdimm_bus->nd_desc;
99}
100EXPORT_SYMBOL_GPL(to_nd_desc);
101
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400102struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
103{
104 struct device *dev;
105
106 for (dev = nd_dev; dev; dev = dev->parent)
107 if (dev->release == nvdimm_bus_release)
108 break;
109 dev_WARN_ONCE(nd_dev, !dev, "invalid dev, not on nd bus\n");
110 if (dev)
111 return to_nvdimm_bus(dev);
112 return NULL;
113}
114
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400115static bool is_uuid_sep(char sep)
116{
117 if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
118 return true;
119 return false;
120}
121
122static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
123 size_t len)
124{
125 const char *str = buf;
126 u8 uuid[16];
127 int i;
128
129 for (i = 0; i < 16; i++) {
130 if (!isxdigit(str[0]) || !isxdigit(str[1])) {
131 dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
132 __func__, i, str - buf, str[0],
133 str + 1 - buf, str[1]);
134 return -EINVAL;
135 }
136
137 uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
138 str += 2;
139 if (is_uuid_sep(*str))
140 str++;
141 }
142
143 memcpy(uuid_out, uuid, sizeof(uuid));
144 return 0;
145}
146
147/**
148 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
149 * @dev: container device for the uuid property
150 * @uuid_out: uuid buffer to replace
151 * @buf: raw sysfs buffer to parse
152 *
153 * Enforce that uuids can only be changed while the device is disabled
154 * (driver detached)
155 * LOCKING: expects device_lock() is held on entry
156 */
157int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
158 size_t len)
159{
160 u8 uuid[16];
161 int rc;
162
163 if (dev->driver)
164 return -EBUSY;
165
166 rc = nd_uuid_parse(dev, uuid, buf, len);
167 if (rc)
168 return rc;
169
170 kfree(*uuid_out);
171 *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
172 if (!(*uuid_out))
173 return -ENOMEM;
174
175 return 0;
176}
177
Dan Williams1b40e092015-05-01 13:34:01 -0400178ssize_t nd_sector_size_show(unsigned long current_lbasize,
179 const unsigned long *supported, char *buf)
180{
181 ssize_t len = 0;
182 int i;
183
184 for (i = 0; supported[i]; i++)
185 if (current_lbasize == supported[i])
186 len += sprintf(buf + len, "[%ld] ", supported[i]);
187 else
188 len += sprintf(buf + len, "%ld ", supported[i]);
189 len += sprintf(buf + len, "\n");
190 return len;
191}
192
193ssize_t nd_sector_size_store(struct device *dev, const char *buf,
194 unsigned long *current_lbasize, const unsigned long *supported)
195{
196 unsigned long lbasize;
197 int rc, i;
198
199 if (dev->driver)
200 return -EBUSY;
201
202 rc = kstrtoul(buf, 0, &lbasize);
203 if (rc)
204 return rc;
205
206 for (i = 0; supported[i]; i++)
207 if (lbasize == supported[i])
208 break;
209
210 if (supported[i]) {
211 *current_lbasize = lbasize;
212 return 0;
213 } else {
214 return -EINVAL;
215 }
216}
217
Dan Williamsf0dc0892015-05-16 12:28:53 -0400218void __nd_iostat_start(struct bio *bio, unsigned long *start)
219{
220 struct gendisk *disk = bio->bi_bdev->bd_disk;
221 const int rw = bio_data_dir(bio);
222 int cpu = part_stat_lock();
223
224 *start = jiffies;
225 part_round_stats(cpu, &disk->part0);
226 part_stat_inc(cpu, &disk->part0, ios[rw]);
227 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
228 part_inc_in_flight(&disk->part0, rw);
229 part_stat_unlock();
230}
231EXPORT_SYMBOL(__nd_iostat_start);
232
233void nd_iostat_end(struct bio *bio, unsigned long start)
234{
235 struct gendisk *disk = bio->bi_bdev->bd_disk;
236 unsigned long duration = jiffies - start;
237 const int rw = bio_data_dir(bio);
238 int cpu = part_stat_lock();
239
240 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
241 part_round_stats(cpu, &disk->part0);
242 part_dec_in_flight(&disk->part0, rw);
243 part_stat_unlock();
244}
245EXPORT_SYMBOL(nd_iostat_end);
246
Dan Williams62232e452015-06-08 14:27:06 -0400247static ssize_t commands_show(struct device *dev,
248 struct device_attribute *attr, char *buf)
249{
250 int cmd, len = 0;
251 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
252 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
253
254 for_each_set_bit(cmd, &nd_desc->dsm_mask, BITS_PER_LONG)
255 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
256 len += sprintf(buf + len, "\n");
257 return len;
258}
259static DEVICE_ATTR_RO(commands);
260
Dan Williams45def222015-04-26 19:26:48 -0400261static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
262{
263 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
264 struct device *parent = nvdimm_bus->dev.parent;
265
266 if (nd_desc->provider_name)
267 return nd_desc->provider_name;
268 else if (parent)
269 return dev_name(parent);
270 else
271 return "unknown";
272}
273
274static ssize_t provider_show(struct device *dev,
275 struct device_attribute *attr, char *buf)
276{
277 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
278
279 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
280}
281static DEVICE_ATTR_RO(provider);
282
Dan Williams4d88a972015-05-31 14:41:48 -0400283static int flush_namespaces(struct device *dev, void *data)
284{
285 device_lock(dev);
286 device_unlock(dev);
287 return 0;
288}
289
290static int flush_regions_dimms(struct device *dev, void *data)
291{
292 device_lock(dev);
293 device_unlock(dev);
294 device_for_each_child(dev, NULL, flush_namespaces);
295 return 0;
296}
297
298static ssize_t wait_probe_show(struct device *dev,
299 struct device_attribute *attr, char *buf)
300{
301 nd_synchronize();
302 device_for_each_child(dev, NULL, flush_regions_dimms);
303 return sprintf(buf, "1\n");
304}
305static DEVICE_ATTR_RO(wait_probe);
306
Dan Williams45def222015-04-26 19:26:48 -0400307static struct attribute *nvdimm_bus_attributes[] = {
Dan Williams62232e452015-06-08 14:27:06 -0400308 &dev_attr_commands.attr,
Dan Williams4d88a972015-05-31 14:41:48 -0400309 &dev_attr_wait_probe.attr,
Dan Williams45def222015-04-26 19:26:48 -0400310 &dev_attr_provider.attr,
311 NULL,
312};
313
314struct attribute_group nvdimm_bus_attribute_group = {
315 .attrs = nvdimm_bus_attributes,
316};
317EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
318
Dan Williams3d880022015-05-31 15:02:11 -0400319struct nvdimm_bus *__nvdimm_bus_register(struct device *parent,
320 struct nvdimm_bus_descriptor *nd_desc, struct module *module)
Dan Williamsb94d5232015-05-19 22:54:31 -0400321{
322 struct nvdimm_bus *nvdimm_bus;
323 int rc;
324
325 nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
326 if (!nvdimm_bus)
327 return NULL;
Dan Williams45def222015-04-26 19:26:48 -0400328 INIT_LIST_HEAD(&nvdimm_bus->list);
Vishal Verma0caeef62015-12-24 19:21:43 -0700329 INIT_LIST_HEAD(&nvdimm_bus->poison_list);
Dan Williamseaf96152015-05-01 13:11:27 -0400330 init_waitqueue_head(&nvdimm_bus->probe_wait);
Dan Williamsb94d5232015-05-19 22:54:31 -0400331 nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
Dan Williams3d880022015-05-31 15:02:11 -0400332 mutex_init(&nvdimm_bus->reconfig_mutex);
Dan Williamsb94d5232015-05-19 22:54:31 -0400333 if (nvdimm_bus->id < 0) {
334 kfree(nvdimm_bus);
335 return NULL;
336 }
337 nvdimm_bus->nd_desc = nd_desc;
Dan Williams3d880022015-05-31 15:02:11 -0400338 nvdimm_bus->module = module;
Dan Williamsb94d5232015-05-19 22:54:31 -0400339 nvdimm_bus->dev.parent = parent;
340 nvdimm_bus->dev.release = nvdimm_bus_release;
Dan Williams45def222015-04-26 19:26:48 -0400341 nvdimm_bus->dev.groups = nd_desc->attr_groups;
Dan Williamsb94d5232015-05-19 22:54:31 -0400342 dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
343 rc = device_register(&nvdimm_bus->dev);
344 if (rc) {
345 dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
Dan Williams45def222015-04-26 19:26:48 -0400346 goto err;
Dan Williamsb94d5232015-05-19 22:54:31 -0400347 }
348
Dan Williams45def222015-04-26 19:26:48 -0400349 rc = nvdimm_bus_create_ndctl(nvdimm_bus);
350 if (rc)
351 goto err;
352
353 mutex_lock(&nvdimm_bus_list_mutex);
354 list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
355 mutex_unlock(&nvdimm_bus_list_mutex);
356
Dan Williamsb94d5232015-05-19 22:54:31 -0400357 return nvdimm_bus;
Dan Williams45def222015-04-26 19:26:48 -0400358 err:
359 put_device(&nvdimm_bus->dev);
360 return NULL;
Dan Williamsb94d5232015-05-19 22:54:31 -0400361}
Dan Williams3d880022015-05-31 15:02:11 -0400362EXPORT_SYMBOL_GPL(__nvdimm_bus_register);
Dan Williamsb94d5232015-05-19 22:54:31 -0400363
Dan Williamsb95f5f42016-01-04 23:50:23 -0800364static void set_badblock(struct badblocks *bb, sector_t s, int num)
Dan Williams87ba05d2016-01-09 07:48:43 -0800365{
Dan Williamsb95f5f42016-01-04 23:50:23 -0800366 dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
Dan Williams87ba05d2016-01-09 07:48:43 -0800367 (u64) s * 512, (u64) num * 512);
368 /* this isn't an error as the hardware will still throw an exception */
Dan Williamsb95f5f42016-01-04 23:50:23 -0800369 if (badblocks_set(bb, s, num, 1))
370 dev_info_once(bb->dev, "%s: failed for sector %llx\n",
Dan Williams87ba05d2016-01-09 07:48:43 -0800371 __func__, (u64) s);
372}
373
Vishal Verma0caeef62015-12-24 19:21:43 -0700374/**
375 * __add_badblock_range() - Convert a physical address range to bad sectors
Dan Williamsb95f5f42016-01-04 23:50:23 -0800376 * @bb: badblocks instance to populate
Vishal Verma0caeef62015-12-24 19:21:43 -0700377 * @ns_offset: namespace offset where the error range begins (in bytes)
378 * @len: number of bytes of poison to be added
379 *
380 * This assumes that the range provided with (ns_offset, len) is within
381 * the bounds of physical addresses for this namespace, i.e. lies in the
382 * interval [ns_start, ns_start + ns_size)
383 */
Dan Williamsb95f5f42016-01-04 23:50:23 -0800384static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
Vishal Verma0caeef62015-12-24 19:21:43 -0700385{
Dan Williamsb95f5f42016-01-04 23:50:23 -0800386 const unsigned int sector_size = 512;
Vishal Verma0caeef62015-12-24 19:21:43 -0700387 sector_t start_sector;
388 u64 num_sectors;
389 u32 rem;
Vishal Verma0caeef62015-12-24 19:21:43 -0700390
391 start_sector = div_u64(ns_offset, sector_size);
392 num_sectors = div_u64_rem(len, sector_size, &rem);
393 if (rem)
394 num_sectors++;
395
Vishal Verma0caeef62015-12-24 19:21:43 -0700396 if (unlikely(num_sectors > (u64)INT_MAX)) {
397 u64 remaining = num_sectors;
398 sector_t s = start_sector;
399
400 while (remaining) {
401 int done = min_t(u64, remaining, INT_MAX);
402
Dan Williamsb95f5f42016-01-04 23:50:23 -0800403 set_badblock(bb, s, done);
Vishal Verma0caeef62015-12-24 19:21:43 -0700404 remaining -= done;
405 s += done;
406 }
Vishal Verma0caeef62015-12-24 19:21:43 -0700407 } else
Dan Williamsb95f5f42016-01-04 23:50:23 -0800408 set_badblock(bb, start_sector, num_sectors);
Vishal Verma0caeef62015-12-24 19:21:43 -0700409}
410
411/**
412 * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks
Vishal Verma0caeef62015-12-24 19:21:43 -0700413 * @ndns: the namespace containing poison ranges
Dan Williamsb95f5f42016-01-04 23:50:23 -0800414 * @bb: badblocks instance to populate
415 * @offset: offset at the start of the namespace before 'sector 0'
Vishal Verma0caeef62015-12-24 19:21:43 -0700416 *
417 * The poison list generated during NFIT initialization may contain multiple,
418 * possibly overlapping ranges in the SPA (System Physical Address) space.
419 * Compare each of these ranges to the namespace currently being initialized,
420 * and add badblocks to the gendisk for all matching sub-ranges
Vishal Verma0caeef62015-12-24 19:21:43 -0700421 */
Dan Williamsb95f5f42016-01-04 23:50:23 -0800422void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns,
423 struct badblocks *bb, resource_size_t offset)
Vishal Verma0caeef62015-12-24 19:21:43 -0700424{
425 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
426 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
427 struct nvdimm_bus *nvdimm_bus;
428 struct list_head *poison_list;
429 u64 ns_start, ns_end, ns_size;
430 struct nd_poison *pl;
Vishal Verma0caeef62015-12-24 19:21:43 -0700431
432 ns_size = nvdimm_namespace_capacity(ndns) - offset;
433 ns_start = nsio->res.start + offset;
434 ns_end = nsio->res.end;
435
436 nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent);
437 poison_list = &nvdimm_bus->poison_list;
438 if (list_empty(poison_list))
Dan Williamsb95f5f42016-01-04 23:50:23 -0800439 return;
Vishal Verma0caeef62015-12-24 19:21:43 -0700440
441 list_for_each_entry(pl, poison_list, list) {
442 u64 pl_end = pl->start + pl->length - 1;
443
444 /* Discard intervals with no intersection */
445 if (pl_end < ns_start)
446 continue;
447 if (pl->start > ns_end)
448 continue;
449 /* Deal with any overlap after start of the namespace */
450 if (pl->start >= ns_start) {
451 u64 start = pl->start;
452 u64 len;
453
454 if (pl_end <= ns_end)
455 len = pl->length;
456 else
457 len = ns_start + ns_size - pl->start;
Dan Williamsb95f5f42016-01-04 23:50:23 -0800458 __add_badblock_range(bb, start - ns_start, len);
Vishal Verma0caeef62015-12-24 19:21:43 -0700459 continue;
460 }
461 /* Deal with overlap for poison starting before the namespace */
462 if (pl->start < ns_start) {
463 u64 len;
464
465 if (pl_end < ns_end)
466 len = pl->start + pl->length - ns_start;
467 else
468 len = ns_size;
Dan Williamsb95f5f42016-01-04 23:50:23 -0800469 __add_badblock_range(bb, 0, len);
Vishal Verma0caeef62015-12-24 19:21:43 -0700470 }
471 }
Vishal Verma0caeef62015-12-24 19:21:43 -0700472}
473EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison);
474
475static int __add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
476{
477 struct nd_poison *pl;
478
479 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
480 if (!pl)
481 return -ENOMEM;
482
483 pl->start = addr;
484 pl->length = length;
485 list_add_tail(&pl->list, &nvdimm_bus->poison_list);
486
487 return 0;
488}
489
490int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
491{
492 struct nd_poison *pl;
493
494 if (list_empty(&nvdimm_bus->poison_list))
495 return __add_poison(nvdimm_bus, addr, length);
496
497 /*
498 * There is a chance this is a duplicate, check for those first.
499 * This will be the common case as ARS_STATUS returns all known
500 * errors in the SPA space, and we can't query it per region
501 */
502 list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
503 if (pl->start == addr) {
504 /* If length has changed, update this list entry */
505 if (pl->length != length)
506 pl->length = length;
507 return 0;
508 }
509
510 /*
511 * If not a duplicate or a simple length update, add the entry as is,
512 * as any overlapping ranges will get resolved when the list is consumed
513 * and converted to badblocks
514 */
515 return __add_poison(nvdimm_bus, addr, length);
516}
517EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
518
519static void free_poison_list(struct list_head *poison_list)
520{
521 struct nd_poison *pl, *next;
522
523 list_for_each_entry_safe(pl, next, poison_list, list) {
524 list_del(&pl->list);
525 kfree(pl);
526 }
527 list_del_init(poison_list);
528}
529
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400530static int child_unregister(struct device *dev, void *data)
531{
532 /*
533 * the singular ndctl class device per bus needs to be
534 * "device_destroy"ed, so skip it here
535 *
536 * i.e. remove classless children
537 */
538 if (dev->class)
539 /* pass */;
540 else
Dan Williams4d88a972015-05-31 14:41:48 -0400541 nd_device_unregister(dev, ND_SYNC);
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400542 return 0;
543}
544
Dan Williamsb94d5232015-05-19 22:54:31 -0400545void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
546{
547 if (!nvdimm_bus)
548 return;
Dan Williams45def222015-04-26 19:26:48 -0400549
550 mutex_lock(&nvdimm_bus_list_mutex);
551 list_del_init(&nvdimm_bus->list);
552 mutex_unlock(&nvdimm_bus_list_mutex);
553
Dan Williams4d88a972015-05-31 14:41:48 -0400554 nd_synchronize();
Dan Williamse6dfb2d2015-04-25 03:56:17 -0400555 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
Vishal Verma0caeef62015-12-24 19:21:43 -0700556 free_poison_list(&nvdimm_bus->poison_list);
Dan Williams45def222015-04-26 19:26:48 -0400557 nvdimm_bus_destroy_ndctl(nvdimm_bus);
558
Dan Williamsb94d5232015-05-19 22:54:31 -0400559 device_unregister(&nvdimm_bus->dev);
560}
561EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
562
Vishal Verma41cd8b72015-06-25 04:21:52 -0400563#ifdef CONFIG_BLK_DEV_INTEGRITY
Vishal Verma41cd8b72015-06-25 04:21:52 -0400564int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
565{
Martin K. Petersen0f8087e2015-10-21 13:19:33 -0400566 struct blk_integrity bi;
Vishal Verma41cd8b72015-06-25 04:21:52 -0400567
Vishal Vermafcae6952015-06-25 04:22:39 -0400568 if (meta_size == 0)
569 return 0;
570
Dan Williams4125a092015-10-21 13:20:29 -0400571 bi.profile = NULL;
Martin K. Petersen0f8087e2015-10-21 13:19:33 -0400572 bi.tuple_size = meta_size;
573 bi.tag_size = meta_size;
574
Martin K. Petersen25520d52015-10-21 13:19:49 -0400575 blk_integrity_register(disk, &bi);
Vishal Verma41cd8b72015-06-25 04:21:52 -0400576 blk_queue_max_integrity_segments(disk->queue, 1);
577
578 return 0;
579}
580EXPORT_SYMBOL(nd_integrity_init);
581
582#else /* CONFIG_BLK_DEV_INTEGRITY */
583int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
584{
585 return 0;
586}
587EXPORT_SYMBOL(nd_integrity_init);
588
589#endif
590
Dan Williams45def222015-04-26 19:26:48 -0400591static __init int libnvdimm_init(void)
592{
Dan Williams4d88a972015-05-31 14:41:48 -0400593 int rc;
594
595 rc = nvdimm_bus_init();
596 if (rc)
597 return rc;
598 rc = nvdimm_init();
599 if (rc)
600 goto err_dimm;
Dan Williams3d880022015-05-31 15:02:11 -0400601 rc = nd_region_init();
602 if (rc)
603 goto err_region;
Dan Williams4d88a972015-05-31 14:41:48 -0400604 return 0;
Dan Williams3d880022015-05-31 15:02:11 -0400605 err_region:
606 nvdimm_exit();
Dan Williams4d88a972015-05-31 14:41:48 -0400607 err_dimm:
608 nvdimm_bus_exit();
609 return rc;
Dan Williams45def222015-04-26 19:26:48 -0400610}
611
612static __exit void libnvdimm_exit(void)
613{
614 WARN_ON(!list_empty(&nvdimm_bus_list));
Dan Williams3d880022015-05-31 15:02:11 -0400615 nd_region_exit();
Dan Williams4d88a972015-05-31 14:41:48 -0400616 nvdimm_exit();
Dan Williams45def222015-04-26 19:26:48 -0400617 nvdimm_bus_exit();
618}
619
Dan Williamsb94d5232015-05-19 22:54:31 -0400620MODULE_LICENSE("GPL v2");
621MODULE_AUTHOR("Intel Corporation");
Dan Williams45def222015-04-26 19:26:48 -0400622subsys_initcall(libnvdimm_init);
623module_exit(libnvdimm_exit);