blob: 42e40db4651bd4b1658b6fc397a6142b9723d768 [file] [log] [blame]
Dan Williamsb94d5232015-05-19 22:54:31 -04001/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/libnvdimm.h>
Dan Williamsb95f5f42016-01-04 23:50:23 -080014#include <linux/badblocks.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040015#include <linux/export.h>
16#include <linux/module.h>
Vishal Verma41cd8b72015-06-25 04:21:52 -040017#include <linux/blkdev.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040018#include <linux/device.h>
Dan Williamsbf9bccc2015-06-17 17:14:46 -040019#include <linux/ctype.h>
Dan Williams62232e452015-06-08 14:27:06 -040020#include <linux/ndctl.h>
Dan Williams45def222015-04-26 19:26:48 -040021#include <linux/mutex.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040022#include <linux/slab.h>
Dan Williams29b9aa02016-06-06 17:42:38 -070023#include <linux/io.h>
Dan Williamsb94d5232015-05-19 22:54:31 -040024#include "nd-core.h"
Dan Williams4d88a972015-05-31 14:41:48 -040025#include "nd.h"
Dan Williamsb94d5232015-05-19 22:54:31 -040026
Dan Williamse6dfb2d2015-04-25 03:56:17 -040027LIST_HEAD(nvdimm_bus_list);
28DEFINE_MUTEX(nvdimm_bus_list_mutex);
Dan Williamsb94d5232015-05-19 22:54:31 -040029
Dan Williams3d880022015-05-31 15:02:11 -040030void nvdimm_bus_lock(struct device *dev)
31{
32 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
33
34 if (!nvdimm_bus)
35 return;
36 mutex_lock(&nvdimm_bus->reconfig_mutex);
37}
38EXPORT_SYMBOL(nvdimm_bus_lock);
39
40void nvdimm_bus_unlock(struct device *dev)
41{
42 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
43
44 if (!nvdimm_bus)
45 return;
46 mutex_unlock(&nvdimm_bus->reconfig_mutex);
47}
48EXPORT_SYMBOL(nvdimm_bus_unlock);
49
50bool is_nvdimm_bus_locked(struct device *dev)
51{
52 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
53
54 if (!nvdimm_bus)
55 return false;
56 return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
57}
58EXPORT_SYMBOL(is_nvdimm_bus_locked);
59
Dan Williams29b9aa02016-06-06 17:42:38 -070060struct nvdimm_map {
61 struct nvdimm_bus *nvdimm_bus;
62 struct list_head list;
63 resource_size_t offset;
64 unsigned long flags;
65 size_t size;
66 union {
67 void *mem;
68 void __iomem *iomem;
69 };
70 struct kref kref;
71};
72
73static struct nvdimm_map *find_nvdimm_map(struct device *dev,
74 resource_size_t offset)
75{
76 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
77 struct nvdimm_map *nvdimm_map;
78
79 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
80 if (nvdimm_map->offset == offset)
81 return nvdimm_map;
82 return NULL;
83}
84
85static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
86 resource_size_t offset, size_t size, unsigned long flags)
87{
88 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
89 struct nvdimm_map *nvdimm_map;
90
91 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
92 if (!nvdimm_map)
93 return NULL;
94
95 INIT_LIST_HEAD(&nvdimm_map->list);
96 nvdimm_map->nvdimm_bus = nvdimm_bus;
97 nvdimm_map->offset = offset;
98 nvdimm_map->flags = flags;
99 nvdimm_map->size = size;
100 kref_init(&nvdimm_map->kref);
101
102 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
103 goto err_request_region;
104
105 if (flags)
106 nvdimm_map->mem = memremap(offset, size, flags);
107 else
108 nvdimm_map->iomem = ioremap(offset, size);
109
110 if (!nvdimm_map->mem)
111 goto err_map;
112
113 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
114 __func__);
115 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
116
117 return nvdimm_map;
118
119 err_map:
120 release_mem_region(offset, size);
121 err_request_region:
122 kfree(nvdimm_map);
123 return NULL;
124}
125
126static void nvdimm_map_release(struct kref *kref)
127{
128 struct nvdimm_bus *nvdimm_bus;
129 struct nvdimm_map *nvdimm_map;
130
131 nvdimm_map = container_of(kref, struct nvdimm_map, kref);
132 nvdimm_bus = nvdimm_map->nvdimm_bus;
133
134 dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
135 list_del(&nvdimm_map->list);
136 if (nvdimm_map->flags)
137 memunmap(nvdimm_map->mem);
138 else
139 iounmap(nvdimm_map->iomem);
140 release_mem_region(nvdimm_map->offset, nvdimm_map->size);
141 kfree(nvdimm_map);
142}
143
144static void nvdimm_map_put(void *data)
145{
146 struct nvdimm_map *nvdimm_map = data;
147 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
148
149 nvdimm_bus_lock(&nvdimm_bus->dev);
150 kref_put(&nvdimm_map->kref, nvdimm_map_release);
151 nvdimm_bus_unlock(&nvdimm_bus->dev);
152}
153
154/**
155 * devm_nvdimm_memremap - map a resource that is shared across regions
156 * @dev: device that will own a reference to the shared mapping
157 * @offset: physical base address of the mapping
158 * @size: mapping size
159 * @flags: memremap flags, or, if zero, perform an ioremap instead
160 */
161void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
162 size_t size, unsigned long flags)
163{
164 struct nvdimm_map *nvdimm_map;
165
166 nvdimm_bus_lock(dev);
167 nvdimm_map = find_nvdimm_map(dev, offset);
168 if (!nvdimm_map)
169 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
170 else
171 kref_get(&nvdimm_map->kref);
172 nvdimm_bus_unlock(dev);
173
174 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
175 return NULL;
176
177 return nvdimm_map->mem;
178}
179EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
180
Dan Williamseaf96152015-05-01 13:11:27 -0400181u64 nd_fletcher64(void *addr, size_t len, bool le)
182{
183 u32 *buf = addr;
184 u32 lo32 = 0;
185 u64 hi32 = 0;
186 int i;
187
188 for (i = 0; i < len / sizeof(u32); i++) {
189 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
190 hi32 += lo32;
191 }
192
193 return hi32 << 32 | lo32;
194}
195EXPORT_SYMBOL_GPL(nd_fletcher64);
196
Dan Williams45def222015-04-26 19:26:48 -0400197struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
198{
199 /* struct nvdimm_bus definition is private to libnvdimm */
200 return nvdimm_bus->nd_desc;
201}
202EXPORT_SYMBOL_GPL(to_nd_desc);
203
Vishal Verma37b137f2016-07-23 21:51:42 -0700204struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
205{
206 /* struct nvdimm_bus definition is private to libnvdimm */
207 return &nvdimm_bus->dev;
208}
209EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
210
Dan Williamsbf9bccc2015-06-17 17:14:46 -0400211static bool is_uuid_sep(char sep)
212{
213 if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
214 return true;
215 return false;
216}
217
218static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
219 size_t len)
220{
221 const char *str = buf;
222 u8 uuid[16];
223 int i;
224
225 for (i = 0; i < 16; i++) {
226 if (!isxdigit(str[0]) || !isxdigit(str[1])) {
227 dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
228 __func__, i, str - buf, str[0],
229 str + 1 - buf, str[1]);
230 return -EINVAL;
231 }
232
233 uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
234 str += 2;
235 if (is_uuid_sep(*str))
236 str++;
237 }
238
239 memcpy(uuid_out, uuid, sizeof(uuid));
240 return 0;
241}
242
243/**
244 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
245 * @dev: container device for the uuid property
246 * @uuid_out: uuid buffer to replace
247 * @buf: raw sysfs buffer to parse
248 *
249 * Enforce that uuids can only be changed while the device is disabled
250 * (driver detached)
251 * LOCKING: expects device_lock() is held on entry
252 */
253int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
254 size_t len)
255{
256 u8 uuid[16];
257 int rc;
258
259 if (dev->driver)
260 return -EBUSY;
261
262 rc = nd_uuid_parse(dev, uuid, buf, len);
263 if (rc)
264 return rc;
265
266 kfree(*uuid_out);
267 *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
268 if (!(*uuid_out))
269 return -ENOMEM;
270
271 return 0;
272}
273
Dan Williams1b40e092015-05-01 13:34:01 -0400274ssize_t nd_sector_size_show(unsigned long current_lbasize,
275 const unsigned long *supported, char *buf)
276{
277 ssize_t len = 0;
278 int i;
279
280 for (i = 0; supported[i]; i++)
281 if (current_lbasize == supported[i])
282 len += sprintf(buf + len, "[%ld] ", supported[i]);
283 else
284 len += sprintf(buf + len, "%ld ", supported[i]);
285 len += sprintf(buf + len, "\n");
286 return len;
287}
288
289ssize_t nd_sector_size_store(struct device *dev, const char *buf,
290 unsigned long *current_lbasize, const unsigned long *supported)
291{
292 unsigned long lbasize;
293 int rc, i;
294
295 if (dev->driver)
296 return -EBUSY;
297
298 rc = kstrtoul(buf, 0, &lbasize);
299 if (rc)
300 return rc;
301
302 for (i = 0; supported[i]; i++)
303 if (lbasize == supported[i])
304 break;
305
306 if (supported[i]) {
307 *current_lbasize = lbasize;
308 return 0;
309 } else {
310 return -EINVAL;
311 }
312}
313
Dan Williamsf0dc0892015-05-16 12:28:53 -0400314void __nd_iostat_start(struct bio *bio, unsigned long *start)
315{
316 struct gendisk *disk = bio->bi_bdev->bd_disk;
317 const int rw = bio_data_dir(bio);
318 int cpu = part_stat_lock();
319
320 *start = jiffies;
321 part_round_stats(cpu, &disk->part0);
322 part_stat_inc(cpu, &disk->part0, ios[rw]);
323 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
324 part_inc_in_flight(&disk->part0, rw);
325 part_stat_unlock();
326}
327EXPORT_SYMBOL(__nd_iostat_start);
328
329void nd_iostat_end(struct bio *bio, unsigned long start)
330{
331 struct gendisk *disk = bio->bi_bdev->bd_disk;
332 unsigned long duration = jiffies - start;
333 const int rw = bio_data_dir(bio);
334 int cpu = part_stat_lock();
335
336 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
337 part_round_stats(cpu, &disk->part0);
338 part_dec_in_flight(&disk->part0, rw);
339 part_stat_unlock();
340}
341EXPORT_SYMBOL(nd_iostat_end);
342
Dan Williams62232e452015-06-08 14:27:06 -0400343static ssize_t commands_show(struct device *dev,
344 struct device_attribute *attr, char *buf)
345{
346 int cmd, len = 0;
347 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
348 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
349
Dan Williamse3654ec2016-04-28 16:17:07 -0700350 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
Dan Williams62232e452015-06-08 14:27:06 -0400351 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
352 len += sprintf(buf + len, "\n");
353 return len;
354}
355static DEVICE_ATTR_RO(commands);
356
Dan Williams45def222015-04-26 19:26:48 -0400357static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
358{
359 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
360 struct device *parent = nvdimm_bus->dev.parent;
361
362 if (nd_desc->provider_name)
363 return nd_desc->provider_name;
364 else if (parent)
365 return dev_name(parent);
366 else
367 return "unknown";
368}
369
370static ssize_t provider_show(struct device *dev,
371 struct device_attribute *attr, char *buf)
372{
373 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
374
375 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
376}
377static DEVICE_ATTR_RO(provider);
378
Dan Williams4d88a972015-05-31 14:41:48 -0400379static int flush_namespaces(struct device *dev, void *data)
380{
381 device_lock(dev);
382 device_unlock(dev);
383 return 0;
384}
385
386static int flush_regions_dimms(struct device *dev, void *data)
387{
388 device_lock(dev);
389 device_unlock(dev);
390 device_for_each_child(dev, NULL, flush_namespaces);
391 return 0;
392}
393
394static ssize_t wait_probe_show(struct device *dev,
395 struct device_attribute *attr, char *buf)
396{
Dan Williams7ae0fa432016-02-19 12:16:34 -0800397 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
398 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
399 int rc;
400
401 if (nd_desc->flush_probe) {
402 rc = nd_desc->flush_probe(nd_desc);
403 if (rc)
404 return rc;
405 }
Dan Williams4d88a972015-05-31 14:41:48 -0400406 nd_synchronize();
407 device_for_each_child(dev, NULL, flush_regions_dimms);
408 return sprintf(buf, "1\n");
409}
410static DEVICE_ATTR_RO(wait_probe);
411
Dan Williams45def222015-04-26 19:26:48 -0400412static struct attribute *nvdimm_bus_attributes[] = {
Dan Williams62232e452015-06-08 14:27:06 -0400413 &dev_attr_commands.attr,
Dan Williams4d88a972015-05-31 14:41:48 -0400414 &dev_attr_wait_probe.attr,
Dan Williams45def222015-04-26 19:26:48 -0400415 &dev_attr_provider.attr,
416 NULL,
417};
418
419struct attribute_group nvdimm_bus_attribute_group = {
420 .attrs = nvdimm_bus_attributes,
421};
422EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
423
Dan Williamsb95f5f42016-01-04 23:50:23 -0800424static void set_badblock(struct badblocks *bb, sector_t s, int num)
Dan Williams87ba05d2016-01-09 07:48:43 -0800425{
Dan Williamsb95f5f42016-01-04 23:50:23 -0800426 dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
Dan Williams87ba05d2016-01-09 07:48:43 -0800427 (u64) s * 512, (u64) num * 512);
428 /* this isn't an error as the hardware will still throw an exception */
Dan Williamsb95f5f42016-01-04 23:50:23 -0800429 if (badblocks_set(bb, s, num, 1))
430 dev_info_once(bb->dev, "%s: failed for sector %llx\n",
Dan Williams87ba05d2016-01-09 07:48:43 -0800431 __func__, (u64) s);
432}
433
Vishal Verma0caeef62015-12-24 19:21:43 -0700434/**
435 * __add_badblock_range() - Convert a physical address range to bad sectors
Dan Williamsb95f5f42016-01-04 23:50:23 -0800436 * @bb: badblocks instance to populate
Vishal Verma0caeef62015-12-24 19:21:43 -0700437 * @ns_offset: namespace offset where the error range begins (in bytes)
438 * @len: number of bytes of poison to be added
439 *
440 * This assumes that the range provided with (ns_offset, len) is within
441 * the bounds of physical addresses for this namespace, i.e. lies in the
442 * interval [ns_start, ns_start + ns_size)
443 */
Dan Williamsb95f5f42016-01-04 23:50:23 -0800444static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
Vishal Verma0caeef62015-12-24 19:21:43 -0700445{
Dan Williamsb95f5f42016-01-04 23:50:23 -0800446 const unsigned int sector_size = 512;
Vishal Verma0caeef62015-12-24 19:21:43 -0700447 sector_t start_sector;
448 u64 num_sectors;
449 u32 rem;
Vishal Verma0caeef62015-12-24 19:21:43 -0700450
451 start_sector = div_u64(ns_offset, sector_size);
452 num_sectors = div_u64_rem(len, sector_size, &rem);
453 if (rem)
454 num_sectors++;
455
Vishal Verma0caeef62015-12-24 19:21:43 -0700456 if (unlikely(num_sectors > (u64)INT_MAX)) {
457 u64 remaining = num_sectors;
458 sector_t s = start_sector;
459
460 while (remaining) {
461 int done = min_t(u64, remaining, INT_MAX);
462
Dan Williamsb95f5f42016-01-04 23:50:23 -0800463 set_badblock(bb, s, done);
Vishal Verma0caeef62015-12-24 19:21:43 -0700464 remaining -= done;
465 s += done;
466 }
Vishal Verma0caeef62015-12-24 19:21:43 -0700467 } else
Dan Williamsb95f5f42016-01-04 23:50:23 -0800468 set_badblock(bb, start_sector, num_sectors);
Vishal Verma0caeef62015-12-24 19:21:43 -0700469}
470
Dan Williamsa3901802016-04-07 20:02:06 -0700471static void badblocks_populate(struct list_head *poison_list,
472 struct badblocks *bb, const struct resource *res)
Dan Williams5faecf42016-02-17 15:25:36 -0800473{
474 struct nd_poison *pl;
475
476 if (list_empty(poison_list))
477 return;
478
479 list_for_each_entry(pl, poison_list, list) {
480 u64 pl_end = pl->start + pl->length - 1;
481
482 /* Discard intervals with no intersection */
483 if (pl_end < res->start)
484 continue;
485 if (pl->start > res->end)
486 continue;
487 /* Deal with any overlap after start of the namespace */
488 if (pl->start >= res->start) {
489 u64 start = pl->start;
490 u64 len;
491
492 if (pl_end <= res->end)
493 len = pl->length;
494 else
495 len = res->start + resource_size(res)
496 - pl->start;
497 __add_badblock_range(bb, start - res->start, len);
498 continue;
499 }
500 /* Deal with overlap for poison starting before the namespace */
501 if (pl->start < res->start) {
502 u64 len;
503
504 if (pl_end < res->end)
505 len = pl->start + pl->length - res->start;
506 else
507 len = resource_size(res);
508 __add_badblock_range(bb, 0, len);
509 }
510 }
511}
512
Vishal Verma0caeef62015-12-24 19:21:43 -0700513/**
Dan Williamsa3901802016-04-07 20:02:06 -0700514 * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
515 * @region: parent region of the range to interrogate
516 * @bb: badblocks instance to populate
517 * @res: resource range to consider
Vishal Verma0caeef62015-12-24 19:21:43 -0700518 *
Dan Williamsa3901802016-04-07 20:02:06 -0700519 * The poison list generated during bus initialization may contain
520 * multiple, possibly overlapping physical address ranges. Compare each
521 * of these ranges to the resource range currently being initialized,
522 * and add badblocks entries for all matching sub-ranges
Vishal Verma0caeef62015-12-24 19:21:43 -0700523 */
Dan Williamsa3901802016-04-07 20:02:06 -0700524void nvdimm_badblocks_populate(struct nd_region *nd_region,
525 struct badblocks *bb, const struct resource *res)
Vishal Verma0caeef62015-12-24 19:21:43 -0700526{
Vishal Verma0caeef62015-12-24 19:21:43 -0700527 struct nvdimm_bus *nvdimm_bus;
528 struct list_head *poison_list;
Vishal Verma0caeef62015-12-24 19:21:43 -0700529
Dan Williamsa3901802016-04-07 20:02:06 -0700530 if (!is_nd_pmem(&nd_region->dev)) {
531 dev_WARN_ONCE(&nd_region->dev, 1,
532 "%s only valid for pmem regions\n", __func__);
533 return;
534 }
535 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
Vishal Verma0caeef62015-12-24 19:21:43 -0700536 poison_list = &nvdimm_bus->poison_list;
Vishal Verma0caeef62015-12-24 19:21:43 -0700537
Dan Williams5faecf42016-02-17 15:25:36 -0800538 nvdimm_bus_lock(&nvdimm_bus->dev);
Dan Williamsa3901802016-04-07 20:02:06 -0700539 badblocks_populate(poison_list, bb, res);
Dan Williams5faecf42016-02-17 15:25:36 -0800540 nvdimm_bus_unlock(&nvdimm_bus->dev);
Vishal Verma0caeef62015-12-24 19:21:43 -0700541}
Dan Williamsa3901802016-04-07 20:02:06 -0700542EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
Vishal Verma0caeef62015-12-24 19:21:43 -0700543
Vishal Vermae0461142016-09-30 17:19:31 -0600544static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
545 gfp_t flags)
Vishal Verma0caeef62015-12-24 19:21:43 -0700546{
547 struct nd_poison *pl;
548
Vishal Vermae0461142016-09-30 17:19:31 -0600549 pl = kzalloc(sizeof(*pl), flags);
Vishal Verma0caeef62015-12-24 19:21:43 -0700550 if (!pl)
551 return -ENOMEM;
552
553 pl->start = addr;
554 pl->length = length;
555 list_add_tail(&pl->list, &nvdimm_bus->poison_list);
556
557 return 0;
558}
559
Dan Williams5faecf42016-02-17 15:25:36 -0800560static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
Vishal Verma0caeef62015-12-24 19:21:43 -0700561{
562 struct nd_poison *pl;
563
564 if (list_empty(&nvdimm_bus->poison_list))
Vishal Vermae0461142016-09-30 17:19:31 -0600565 return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
Vishal Verma0caeef62015-12-24 19:21:43 -0700566
567 /*
568 * There is a chance this is a duplicate, check for those first.
569 * This will be the common case as ARS_STATUS returns all known
570 * errors in the SPA space, and we can't query it per region
571 */
572 list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
573 if (pl->start == addr) {
574 /* If length has changed, update this list entry */
575 if (pl->length != length)
576 pl->length = length;
577 return 0;
578 }
579
580 /*
581 * If not a duplicate or a simple length update, add the entry as is,
582 * as any overlapping ranges will get resolved when the list is consumed
583 * and converted to badblocks
584 */
Vishal Vermae0461142016-09-30 17:19:31 -0600585 return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
Dan Williams5faecf42016-02-17 15:25:36 -0800586}
587
588int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
589{
590 int rc;
591
592 nvdimm_bus_lock(&nvdimm_bus->dev);
593 rc = bus_add_poison(nvdimm_bus, addr, length);
594 nvdimm_bus_unlock(&nvdimm_bus->dev);
595
596 return rc;
Vishal Verma0caeef62015-12-24 19:21:43 -0700597}
598EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
599
Vishal Vermae0461142016-09-30 17:19:31 -0600600void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
601 phys_addr_t start, unsigned int len)
602{
603 struct list_head *poison_list = &nvdimm_bus->poison_list;
604 u64 clr_end = start + len - 1;
605 struct nd_poison *pl, *next;
606
607 nvdimm_bus_lock(&nvdimm_bus->dev);
608 WARN_ON_ONCE(list_empty(poison_list));
609
610 /*
611 * [start, clr_end] is the poison interval being cleared.
612 * [pl->start, pl_end] is the poison_list entry we're comparing
613 * the above interval against. The poison list entry may need
614 * to be modified (update either start or length), deleted, or
615 * split into two based on the overlap characteristics
616 */
617
618 list_for_each_entry_safe(pl, next, poison_list, list) {
619 u64 pl_end = pl->start + pl->length - 1;
620
621 /* Skip intervals with no intersection */
622 if (pl_end < start)
623 continue;
624 if (pl->start > clr_end)
625 continue;
626 /* Delete completely overlapped poison entries */
627 if ((pl->start >= start) && (pl_end <= clr_end)) {
628 list_del(&pl->list);
629 kfree(pl);
630 continue;
631 }
632 /* Adjust start point of partially cleared entries */
633 if ((start <= pl->start) && (clr_end > pl->start)) {
634 pl->length -= clr_end - pl->start + 1;
635 pl->start = clr_end + 1;
636 continue;
637 }
638 /* Adjust pl->length for partial clearing at the tail end */
639 if ((pl->start < start) && (pl_end <= clr_end)) {
640 /* pl->start remains the same */
641 pl->length = start - pl->start;
642 continue;
643 }
644 /*
645 * If clearing in the middle of an entry, we split it into
646 * two by modifying the current entry to represent one half of
647 * the split, and adding a new entry for the second half.
648 */
649 if ((pl->start < start) && (pl_end > clr_end)) {
650 u64 new_start = clr_end + 1;
651 u64 new_len = pl_end - new_start + 1;
652
653 /* Add new entry covering the right half */
654 add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
655 /* Adjust this entry to cover the left half */
656 pl->length = start - pl->start;
657 continue;
658 }
659 }
660 nvdimm_bus_unlock(&nvdimm_bus->dev);
661}
662EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
663
Vishal Verma41cd8b72015-06-25 04:21:52 -0400664#ifdef CONFIG_BLK_DEV_INTEGRITY
Vishal Verma41cd8b72015-06-25 04:21:52 -0400665int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
666{
Martin K. Petersen0f8087e2015-10-21 13:19:33 -0400667 struct blk_integrity bi;
Vishal Verma41cd8b72015-06-25 04:21:52 -0400668
Vishal Vermafcae6952015-06-25 04:22:39 -0400669 if (meta_size == 0)
670 return 0;
671
Johannes Thumshirn8729bde2016-06-23 11:52:04 +0200672 memset(&bi, 0, sizeof(bi));
673
Martin K. Petersen0f8087e2015-10-21 13:19:33 -0400674 bi.tuple_size = meta_size;
675 bi.tag_size = meta_size;
676
Martin K. Petersen25520d52015-10-21 13:19:49 -0400677 blk_integrity_register(disk, &bi);
Vishal Verma41cd8b72015-06-25 04:21:52 -0400678 blk_queue_max_integrity_segments(disk->queue, 1);
679
680 return 0;
681}
682EXPORT_SYMBOL(nd_integrity_init);
683
684#else /* CONFIG_BLK_DEV_INTEGRITY */
685int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
686{
687 return 0;
688}
689EXPORT_SYMBOL(nd_integrity_init);
690
691#endif
692
Dan Williams45def222015-04-26 19:26:48 -0400693static __init int libnvdimm_init(void)
694{
Dan Williams4d88a972015-05-31 14:41:48 -0400695 int rc;
696
697 rc = nvdimm_bus_init();
698 if (rc)
699 return rc;
700 rc = nvdimm_init();
701 if (rc)
702 goto err_dimm;
Dan Williams3d880022015-05-31 15:02:11 -0400703 rc = nd_region_init();
704 if (rc)
705 goto err_region;
Dan Williams4d88a972015-05-31 14:41:48 -0400706 return 0;
Dan Williams3d880022015-05-31 15:02:11 -0400707 err_region:
708 nvdimm_exit();
Dan Williams4d88a972015-05-31 14:41:48 -0400709 err_dimm:
710 nvdimm_bus_exit();
711 return rc;
Dan Williams45def222015-04-26 19:26:48 -0400712}
713
714static __exit void libnvdimm_exit(void)
715{
716 WARN_ON(!list_empty(&nvdimm_bus_list));
Dan Williams3d880022015-05-31 15:02:11 -0400717 nd_region_exit();
Dan Williams4d88a972015-05-31 14:41:48 -0400718 nvdimm_exit();
Dan Williams45def222015-04-26 19:26:48 -0400719 nvdimm_bus_exit();
Dan Williamsb354aba2016-05-17 20:24:16 -0700720 nd_region_devs_exit();
721 nvdimm_devs_exit();
Dan Williams45def222015-04-26 19:26:48 -0400722}
723
Dan Williamsb94d5232015-05-19 22:54:31 -0400724MODULE_LICENSE("GPL v2");
725MODULE_AUTHOR("Intel Corporation");
Dan Williams45def222015-04-26 19:26:48 -0400726subsys_initcall(libnvdimm_init);
727module_exit(libnvdimm_exit);