blob: e8b9319aeadb321e4c6a7b50dad5d77463254611 [file] [log] [blame]
Dan Williamsab68f262016-05-18 09:15:08 -07001/*
2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/pagemap.h>
14#include <linux/module.h>
15#include <linux/device.h>
Dan Williams3bc52c42016-07-24 21:55:45 -070016#include <linux/mount.h>
Dan Williamsab68f262016-05-18 09:15:08 -070017#include <linux/pfn_t.h>
Dan Williams3bc52c42016-07-24 21:55:45 -070018#include <linux/hash.h>
Dan Williamsba09c012016-07-24 15:55:42 -070019#include <linux/cdev.h>
Dan Williamsab68f262016-05-18 09:15:08 -070020#include <linux/slab.h>
21#include <linux/dax.h>
22#include <linux/fs.h>
23#include <linux/mm.h>
Dan Williamsccdb07f2016-08-06 16:05:06 -070024#include "dax.h"
Dan Williamsab68f262016-05-18 09:15:08 -070025
Dan Williamsba09c012016-07-24 15:55:42 -070026static dev_t dax_devt;
Dan Williamsab68f262016-05-18 09:15:08 -070027static struct class *dax_class;
28static DEFINE_IDA(dax_minor_ida);
Dan Williamsba09c012016-07-24 15:55:42 -070029static int nr_dax = CONFIG_NR_DEV_DAX;
30module_param(nr_dax, int, S_IRUGO);
Dan Williams3bc52c42016-07-24 21:55:45 -070031static struct vfsmount *dax_mnt;
32static struct kmem_cache *dax_cache __read_mostly;
33static struct super_block *dax_superblock __read_mostly;
Dan Williamsba09c012016-07-24 15:55:42 -070034MODULE_PARM_DESC(nr_dax, "max number of device-dax instances");
Dan Williamsab68f262016-05-18 09:15:08 -070035
36/**
37 * struct dax_region - mapping infrastructure for dax devices
38 * @id: kernel-wide unique region for a memory range
39 * @base: linear address corresponding to @res
40 * @kref: to pin while other agents have a need to do lookups
41 * @dev: parent device backing this region
42 * @align: allocation and mapping alignment for child dax devices
43 * @res: physical address range of the region
44 * @pfn_flags: identify whether the pfns are paged back or not
45 */
46struct dax_region {
47 int id;
48 struct ida ida;
49 void *base;
50 struct kref kref;
51 struct device *dev;
52 unsigned int align;
53 struct resource res;
54 unsigned long pfn_flags;
55};
56
57/**
58 * struct dax_dev - subdivision of a dax region
59 * @region - parent region
60 * @dev - device backing the character device
Dan Williamsba09c012016-07-24 15:55:42 -070061 * @cdev - core chardev data
Dan Williamsdee41072016-05-14 12:20:44 -070062 * @alive - !alive + rcu grace period == no new mappings can be established
Dan Williamsab68f262016-05-18 09:15:08 -070063 * @id - child id in the region
64 * @num_resources - number of physical address extents in this device
65 * @res - array of physical address ranges
66 */
67struct dax_dev {
68 struct dax_region *region;
Dan Williams3bc52c42016-07-24 21:55:45 -070069 struct inode *inode;
Dan Williamsebd84d72016-08-11 00:41:51 -070070 struct device dev;
Dan Williamsba09c012016-07-24 15:55:42 -070071 struct cdev cdev;
Dan Williamsdee41072016-05-14 12:20:44 -070072 bool alive;
Dan Williamsab68f262016-05-18 09:15:08 -070073 int id;
74 int num_resources;
75 struct resource res[0];
76};
77
Dan Williams3bc52c42016-07-24 21:55:45 -070078static struct inode *dax_alloc_inode(struct super_block *sb)
79{
80 return kmem_cache_alloc(dax_cache, GFP_KERNEL);
81}
82
83static void dax_i_callback(struct rcu_head *head)
84{
85 struct inode *inode = container_of(head, struct inode, i_rcu);
86
87 kmem_cache_free(dax_cache, inode);
88}
89
90static void dax_destroy_inode(struct inode *inode)
91{
92 call_rcu(&inode->i_rcu, dax_i_callback);
93}
94
95static const struct super_operations dax_sops = {
96 .statfs = simple_statfs,
97 .alloc_inode = dax_alloc_inode,
98 .destroy_inode = dax_destroy_inode,
99 .drop_inode = generic_delete_inode,
100};
101
102static struct dentry *dax_mount(struct file_system_type *fs_type,
103 int flags, const char *dev_name, void *data)
104{
105 return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC);
106}
107
108static struct file_system_type dax_type = {
109 .name = "dax",
110 .mount = dax_mount,
111 .kill_sb = kill_anon_super,
112};
113
114static int dax_test(struct inode *inode, void *data)
115{
116 return inode->i_cdev == data;
117}
118
119static int dax_set(struct inode *inode, void *data)
120{
121 inode->i_cdev = data;
122 return 0;
123}
124
125static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt)
126{
127 struct inode *inode;
128
129 inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
130 dax_test, dax_set, cdev);
131
132 if (!inode)
133 return NULL;
134
135 if (inode->i_state & I_NEW) {
136 inode->i_mode = S_IFCHR;
137 inode->i_flags = S_DAX;
138 inode->i_rdev = devt;
139 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
140 unlock_new_inode(inode);
141 }
142 return inode;
143}
144
145static void init_once(void *inode)
146{
147 inode_init_once(inode);
148}
149
150static int dax_inode_init(void)
151{
152 int rc;
153
154 dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0,
155 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
156 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
157 init_once);
158 if (!dax_cache)
159 return -ENOMEM;
160
161 rc = register_filesystem(&dax_type);
162 if (rc)
163 goto err_register_fs;
164
165 dax_mnt = kern_mount(&dax_type);
166 if (IS_ERR(dax_mnt)) {
167 rc = PTR_ERR(dax_mnt);
168 goto err_mount;
169 }
170 dax_superblock = dax_mnt->mnt_sb;
171
172 return 0;
173
174 err_mount:
175 unregister_filesystem(&dax_type);
176 err_register_fs:
177 kmem_cache_destroy(dax_cache);
178
179 return rc;
180}
181
182static void dax_inode_exit(void)
183{
184 kern_unmount(dax_mnt);
185 unregister_filesystem(&dax_type);
186 kmem_cache_destroy(dax_cache);
187}
188
Dan Williamsab68f262016-05-18 09:15:08 -0700189static void dax_region_free(struct kref *kref)
190{
191 struct dax_region *dax_region;
192
193 dax_region = container_of(kref, struct dax_region, kref);
194 kfree(dax_region);
195}
196
197void dax_region_put(struct dax_region *dax_region)
198{
199 kref_put(&dax_region->kref, dax_region_free);
200}
201EXPORT_SYMBOL_GPL(dax_region_put);
202
Dan Williamsab68f262016-05-18 09:15:08 -0700203struct dax_region *alloc_dax_region(struct device *parent, int region_id,
204 struct resource *res, unsigned int align, void *addr,
205 unsigned long pfn_flags)
206{
207 struct dax_region *dax_region;
208
209 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
210
211 if (!dax_region)
212 return NULL;
213
214 memcpy(&dax_region->res, res, sizeof(*res));
215 dax_region->pfn_flags = pfn_flags;
216 kref_init(&dax_region->kref);
217 dax_region->id = region_id;
218 ida_init(&dax_region->ida);
219 dax_region->align = align;
220 dax_region->dev = parent;
221 dax_region->base = addr;
222
223 return dax_region;
224}
225EXPORT_SYMBOL_GPL(alloc_dax_region);
226
Dan Williamsebd84d72016-08-11 00:41:51 -0700227static struct dax_dev *to_dax_dev(struct device *dev)
228{
229 return container_of(dev, struct dax_dev, dev);
230}
231
Dan Williamsab68f262016-05-18 09:15:08 -0700232static ssize_t size_show(struct device *dev,
233 struct device_attribute *attr, char *buf)
234{
Dan Williamsebd84d72016-08-11 00:41:51 -0700235 struct dax_dev *dax_dev = to_dax_dev(dev);
Dan Williamsab68f262016-05-18 09:15:08 -0700236 unsigned long long size = 0;
237 int i;
238
239 for (i = 0; i < dax_dev->num_resources; i++)
240 size += resource_size(&dax_dev->res[i]);
241
242 return sprintf(buf, "%llu\n", size);
243}
244static DEVICE_ATTR_RO(size);
245
246static struct attribute *dax_device_attributes[] = {
247 &dev_attr_size.attr,
248 NULL,
249};
250
251static const struct attribute_group dax_device_attribute_group = {
252 .attrs = dax_device_attributes,
253};
254
255static const struct attribute_group *dax_attribute_groups[] = {
256 &dax_device_attribute_group,
257 NULL,
258};
259
Dan Williamsdee41072016-05-14 12:20:44 -0700260static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
261 const char *func)
262{
263 struct dax_region *dax_region = dax_dev->region;
Dan Williamsebd84d72016-08-11 00:41:51 -0700264 struct device *dev = &dax_dev->dev;
Dan Williamsdee41072016-05-14 12:20:44 -0700265 unsigned long mask;
266
267 if (!dax_dev->alive)
268 return -ENXIO;
269
270 /* prevent private / writable mappings from being established */
271 if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
272 dev_info(dev, "%s: %s: fail, attempted private mapping\n",
273 current->comm, func);
274 return -EINVAL;
275 }
276
277 mask = dax_region->align - 1;
278 if (vma->vm_start & mask || vma->vm_end & mask) {
279 dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
280 current->comm, func, vma->vm_start, vma->vm_end,
281 mask);
282 return -EINVAL;
283 }
284
285 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
286 && (vma->vm_flags & VM_DONTCOPY) == 0) {
287 dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
288 current->comm, func);
289 return -EINVAL;
290 }
291
292 if (!vma_is_dax(vma)) {
293 dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
294 current->comm, func);
295 return -EINVAL;
296 }
297
298 return 0;
299}
300
301static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
302 unsigned long size)
303{
304 struct resource *res;
305 phys_addr_t phys;
306 int i;
307
308 for (i = 0; i < dax_dev->num_resources; i++) {
309 res = &dax_dev->res[i];
310 phys = pgoff * PAGE_SIZE + res->start;
311 if (phys >= res->start && phys <= res->end)
312 break;
313 pgoff -= PHYS_PFN(resource_size(res));
314 }
315
316 if (i < dax_dev->num_resources) {
317 res = &dax_dev->res[i];
318 if (phys + size - 1 <= res->end)
319 return phys;
320 }
321
322 return -1;
323}
324
325static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
326 struct vm_fault *vmf)
327{
328 unsigned long vaddr = (unsigned long) vmf->virtual_address;
Dan Williamsebd84d72016-08-11 00:41:51 -0700329 struct device *dev = &dax_dev->dev;
Dan Williamsdee41072016-05-14 12:20:44 -0700330 struct dax_region *dax_region;
331 int rc = VM_FAULT_SIGBUS;
332 phys_addr_t phys;
333 pfn_t pfn;
334
335 if (check_vma(dax_dev, vma, __func__))
336 return VM_FAULT_SIGBUS;
337
338 dax_region = dax_dev->region;
339 if (dax_region->align > PAGE_SIZE) {
340 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
341 return VM_FAULT_SIGBUS;
342 }
343
344 phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
345 if (phys == -1) {
346 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
347 vmf->pgoff);
348 return VM_FAULT_SIGBUS;
349 }
350
351 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
352
353 rc = vm_insert_mixed(vma, vaddr, pfn);
354
355 if (rc == -ENOMEM)
356 return VM_FAULT_OOM;
357 if (rc < 0 && rc != -EBUSY)
358 return VM_FAULT_SIGBUS;
359
360 return VM_FAULT_NOPAGE;
361}
362
363static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364{
365 int rc;
366 struct file *filp = vma->vm_file;
367 struct dax_dev *dax_dev = filp->private_data;
368
Dan Williamsebd84d72016-08-11 00:41:51 -0700369 dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
Dan Williamsdee41072016-05-14 12:20:44 -0700370 current->comm, (vmf->flags & FAULT_FLAG_WRITE)
371 ? "write" : "read", vma->vm_start, vma->vm_end);
372 rcu_read_lock();
373 rc = __dax_dev_fault(dax_dev, vma, vmf);
374 rcu_read_unlock();
375
376 return rc;
377}
378
379static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
380 struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd,
381 unsigned int flags)
382{
383 unsigned long pmd_addr = addr & PMD_MASK;
Dan Williamsebd84d72016-08-11 00:41:51 -0700384 struct device *dev = &dax_dev->dev;
Dan Williamsdee41072016-05-14 12:20:44 -0700385 struct dax_region *dax_region;
386 phys_addr_t phys;
387 pgoff_t pgoff;
388 pfn_t pfn;
389
390 if (check_vma(dax_dev, vma, __func__))
391 return VM_FAULT_SIGBUS;
392
393 dax_region = dax_dev->region;
394 if (dax_region->align > PMD_SIZE) {
395 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
396 return VM_FAULT_SIGBUS;
397 }
398
399 /* dax pmd mappings require pfn_t_devmap() */
400 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
401 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
402 return VM_FAULT_SIGBUS;
403 }
404
405 pgoff = linear_page_index(vma, pmd_addr);
406 phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
407 if (phys == -1) {
408 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
409 pgoff);
410 return VM_FAULT_SIGBUS;
411 }
412
413 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
414
415 return vmf_insert_pfn_pmd(vma, addr, pmd, pfn,
416 flags & FAULT_FLAG_WRITE);
417}
418
419static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
420 pmd_t *pmd, unsigned int flags)
421{
422 int rc;
423 struct file *filp = vma->vm_file;
424 struct dax_dev *dax_dev = filp->private_data;
425
Dan Williamsebd84d72016-08-11 00:41:51 -0700426 dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
Dan Williamsdee41072016-05-14 12:20:44 -0700427 current->comm, (flags & FAULT_FLAG_WRITE)
428 ? "write" : "read", vma->vm_start, vma->vm_end);
429
430 rcu_read_lock();
431 rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
432 rcu_read_unlock();
433
434 return rc;
435}
436
Dan Williamsdee41072016-05-14 12:20:44 -0700437static const struct vm_operations_struct dax_dev_vm_ops = {
438 .fault = dax_dev_fault,
439 .pmd_fault = dax_dev_pmd_fault,
Dan Williamsdee41072016-05-14 12:20:44 -0700440};
441
Dan Williamsaf69f512016-08-11 00:38:03 -0700442static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
Dan Williamsdee41072016-05-14 12:20:44 -0700443{
444 struct dax_dev *dax_dev = filp->private_data;
445 int rc;
446
Dan Williamsebd84d72016-08-11 00:41:51 -0700447 dev_dbg(&dax_dev->dev, "%s\n", __func__);
Dan Williamsdee41072016-05-14 12:20:44 -0700448
449 rc = check_vma(dax_dev, vma, __func__);
450 if (rc)
451 return rc;
452
Dan Williamsdee41072016-05-14 12:20:44 -0700453 vma->vm_ops = &dax_dev_vm_ops;
454 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
455 return 0;
Dan Williams043a9252016-08-07 08:23:56 -0700456}
Dan Williamsdee41072016-05-14 12:20:44 -0700457
Dan Williams043a9252016-08-07 08:23:56 -0700458/* return an unmapped area aligned to the dax region specified alignment */
Dan Williamsaf69f512016-08-11 00:38:03 -0700459static unsigned long dax_get_unmapped_area(struct file *filp,
Dan Williams043a9252016-08-07 08:23:56 -0700460 unsigned long addr, unsigned long len, unsigned long pgoff,
461 unsigned long flags)
462{
463 unsigned long off, off_end, off_align, len_align, addr_align, align;
464 struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
465 struct dax_region *dax_region;
466
467 if (!dax_dev || addr)
468 goto out;
469
470 dax_region = dax_dev->region;
471 align = dax_region->align;
472 off = pgoff << PAGE_SHIFT;
473 off_end = off + len;
474 off_align = round_up(off, align);
475
476 if ((off_end <= off_align) || ((off_end - off_align) < align))
477 goto out;
478
479 len_align = len + align;
480 if ((off + len_align) < off)
481 goto out;
482
483 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
484 pgoff, flags);
485 if (!IS_ERR_VALUE(addr_align)) {
486 addr_align += (off - addr_align) & (align - 1);
487 return addr_align;
488 }
489 out:
490 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
491}
492
Dan Williamsaf69f512016-08-11 00:38:03 -0700493static int dax_open(struct inode *inode, struct file *filp)
Dan Williams043a9252016-08-07 08:23:56 -0700494{
Dan Williamsba09c012016-07-24 15:55:42 -0700495 struct dax_dev *dax_dev;
Dan Williams043a9252016-08-07 08:23:56 -0700496
Dan Williamsba09c012016-07-24 15:55:42 -0700497 dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev);
498 dev_dbg(&dax_dev->dev, "%s\n", __func__);
Dan Williams3bc52c42016-07-24 21:55:45 -0700499 inode->i_mapping = dax_dev->inode->i_mapping;
500 inode->i_mapping->host = dax_dev->inode;
501 filp->f_mapping = inode->i_mapping;
Dan Williamsebd84d72016-08-11 00:41:51 -0700502 filp->private_data = dax_dev;
503 inode->i_flags = S_DAX;
Dan Williams043a9252016-08-07 08:23:56 -0700504
Dan Williams043a9252016-08-07 08:23:56 -0700505 return 0;
506}
507
Dan Williamsaf69f512016-08-11 00:38:03 -0700508static int dax_release(struct inode *inode, struct file *filp)
Dan Williams043a9252016-08-07 08:23:56 -0700509{
510 struct dax_dev *dax_dev = filp->private_data;
Dan Williams043a9252016-08-07 08:23:56 -0700511
Dan Williamsba09c012016-07-24 15:55:42 -0700512 dev_dbg(&dax_dev->dev, "%s\n", __func__);
Dan Williams043a9252016-08-07 08:23:56 -0700513 return 0;
Dan Williamsdee41072016-05-14 12:20:44 -0700514}
515
Dan Williamsab68f262016-05-18 09:15:08 -0700516static const struct file_operations dax_fops = {
517 .llseek = noop_llseek,
518 .owner = THIS_MODULE,
Dan Williamsaf69f512016-08-11 00:38:03 -0700519 .open = dax_open,
520 .release = dax_release,
521 .get_unmapped_area = dax_get_unmapped_area,
522 .mmap = dax_mmap,
Dan Williamsab68f262016-05-18 09:15:08 -0700523};
524
Dan Williamsebd84d72016-08-11 00:41:51 -0700525static void dax_dev_release(struct device *dev)
Dan Williams043a9252016-08-07 08:23:56 -0700526{
Dan Williamsebd84d72016-08-11 00:41:51 -0700527 struct dax_dev *dax_dev = to_dax_dev(dev);
Dan Williams043a9252016-08-07 08:23:56 -0700528 struct dax_region *dax_region = dax_dev->region;
529
Dan Williamsebd84d72016-08-11 00:41:51 -0700530 ida_simple_remove(&dax_region->ida, dax_dev->id);
531 ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
532 dax_region_put(dax_region);
Dan Williams3bc52c42016-07-24 21:55:45 -0700533 iput(dax_dev->inode);
Dan Williamsebd84d72016-08-11 00:41:51 -0700534 kfree(dax_dev);
535}
536
537static void unregister_dax_dev(void *dev)
538{
539 struct dax_dev *dax_dev = to_dax_dev(dev);
Dan Williamsba09c012016-07-24 15:55:42 -0700540 struct cdev *cdev = &dax_dev->cdev;
Dan Williamsebd84d72016-08-11 00:41:51 -0700541
Dan Williams043a9252016-08-07 08:23:56 -0700542 dev_dbg(dev, "%s\n", __func__);
543
544 /*
545 * Note, rcu is not protecting the liveness of dax_dev, rcu is
546 * ensuring that any fault handlers that might have seen
547 * dax_dev->alive == true, have completed. Any fault handlers
548 * that start after synchronize_rcu() has started will abort
549 * upon seeing dax_dev->alive == false.
550 */
551 dax_dev->alive = false;
552 synchronize_rcu();
Dan Williamsba09c012016-07-24 15:55:42 -0700553 cdev_del(cdev);
Dan Williams043a9252016-08-07 08:23:56 -0700554 device_unregister(dev);
Dan Williams043a9252016-08-07 08:23:56 -0700555}
556
557int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
558 int count)
559{
560 struct device *parent = dax_region->dev;
561 struct dax_dev *dax_dev;
562 struct device *dev;
Dan Williamsba09c012016-07-24 15:55:42 -0700563 struct cdev *cdev;
Dan Williams043a9252016-08-07 08:23:56 -0700564 int rc, minor;
565 dev_t dev_t;
566
567 dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
568 if (!dax_dev)
569 return -ENOMEM;
Dan Williams043a9252016-08-07 08:23:56 -0700570
571 dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
572 if (dax_dev->id < 0) {
573 rc = dax_dev->id;
574 goto err_id;
575 }
576
577 minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
578 if (minor < 0) {
579 rc = minor;
580 goto err_minor;
581 }
582
Dan Williams3bc52c42016-07-24 21:55:45 -0700583 dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t);
584 if (!dax_dev->inode) {
585 rc = -ENOMEM;
586 goto err_inode;
587 }
588
Dan Williamsba09c012016-07-24 15:55:42 -0700589 /* device_initialize() so cdev can reference kobj parent */
590 dev_t = MKDEV(MAJOR(dax_devt), minor);
Dan Williamsebd84d72016-08-11 00:41:51 -0700591 dev = &dax_dev->dev;
592 device_initialize(dev);
Dan Williamsba09c012016-07-24 15:55:42 -0700593
594 cdev = &dax_dev->cdev;
595 cdev_init(cdev, &dax_fops);
596 cdev->owner = parent->driver->owner;
597 cdev->kobj.parent = &dev->kobj;
598 rc = cdev_add(&dax_dev->cdev, dev_t, 1);
599 if (rc)
600 goto err_cdev;
601
602 /* from here on we're committed to teardown via dax_dev_release() */
603 memcpy(dax_dev->res, res, sizeof(*res) * count);
604 dax_dev->num_resources = count;
605 dax_dev->alive = true;
606 dax_dev->region = dax_region;
607 kref_get(&dax_region->kref);
608
Dan Williamsebd84d72016-08-11 00:41:51 -0700609 dev->devt = dev_t;
610 dev->class = dax_class;
611 dev->parent = parent;
612 dev->groups = dax_attribute_groups;
613 dev->release = dax_dev_release;
614 dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
615 rc = device_add(dev);
616 if (rc) {
617 put_device(dev);
Dan Williams043a9252016-08-07 08:23:56 -0700618 return rc;
Dan Williamsebd84d72016-08-11 00:41:51 -0700619 }
Dan Williams043a9252016-08-07 08:23:56 -0700620
Dan Williamsebd84d72016-08-11 00:41:51 -0700621 return devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev);
Dan Williams043a9252016-08-07 08:23:56 -0700622
Dan Williamsba09c012016-07-24 15:55:42 -0700623 err_cdev:
Dan Williams3bc52c42016-07-24 21:55:45 -0700624 iput(dax_dev->inode);
625 err_inode:
Dan Williamsba09c012016-07-24 15:55:42 -0700626 ida_simple_remove(&dax_minor_ida, minor);
Dan Williams043a9252016-08-07 08:23:56 -0700627 err_minor:
628 ida_simple_remove(&dax_region->ida, dax_dev->id);
629 err_id:
Dan Williamsebd84d72016-08-11 00:41:51 -0700630 kfree(dax_dev);
Dan Williams043a9252016-08-07 08:23:56 -0700631
632 return rc;
633}
634EXPORT_SYMBOL_GPL(devm_create_dax_dev);
635
Dan Williamsab68f262016-05-18 09:15:08 -0700636static int __init dax_init(void)
637{
638 int rc;
639
Dan Williams3bc52c42016-07-24 21:55:45 -0700640 rc = dax_inode_init();
Dan Williamsba09c012016-07-24 15:55:42 -0700641 if (rc)
Dan Williamsab68f262016-05-18 09:15:08 -0700642 return rc;
Dan Williamsab68f262016-05-18 09:15:08 -0700643
Dan Williams3bc52c42016-07-24 21:55:45 -0700644 nr_dax = max(nr_dax, 256);
645 rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
646 if (rc)
647 goto err_chrdev;
Dan Williamsab68f262016-05-18 09:15:08 -0700648
Dan Williams3bc52c42016-07-24 21:55:45 -0700649 dax_class = class_create(THIS_MODULE, "dax");
650 if (IS_ERR(dax_class)) {
651 rc = PTR_ERR(dax_class);
652 goto err_class;
653 }
654
655 return 0;
656
657 err_class:
658 unregister_chrdev_region(dax_devt, nr_dax);
659 err_chrdev:
660 dax_inode_exit();
661 return rc;
Dan Williamsab68f262016-05-18 09:15:08 -0700662}
663
664static void __exit dax_exit(void)
665{
666 class_destroy(dax_class);
Dan Williamsba09c012016-07-24 15:55:42 -0700667 unregister_chrdev_region(dax_devt, nr_dax);
Dan Williamsab68f262016-05-18 09:15:08 -0700668 ida_destroy(&dax_minor_ida);
Dan Williams3bc52c42016-07-24 21:55:45 -0700669 dax_inode_exit();
Dan Williamsab68f262016-05-18 09:15:08 -0700670}
671
672MODULE_AUTHOR("Intel Corporation");
673MODULE_LICENSE("GPL v2");
674subsys_initcall(dax_init);
675module_exit(dax_exit);