blob: df36ad5e772a3187ddf74a5ac140872b91b0edac [file] [log] [blame]
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001/* drivers/android/pmem.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/miscdevice.h>
18#include <linux/platform_device.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/mm.h>
22#include <linux/list.h>
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070023#include <linux/debugfs.h>
24#include <linux/android_pmem.h>
25#include <linux/mempolicy.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026#include <linux/kobject.h>
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070027#include <asm/io.h>
28#include <asm/uaccess.h>
29#include <asm/cacheflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#include <asm/sizes.h>
31#include <linux/pm_runtime.h>
32#include <linux/memory_alloc.h>
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034#define PMEM_MAX_DEVICES (10)
35
36#define PMEM_MAX_ORDER (128)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070037#define PMEM_MIN_ALLOC PAGE_SIZE
38
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070039#define PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS (64)
40
41#define PMEM_32BIT_WORD_ORDER (5)
42#define PMEM_BITS_PER_WORD_MASK (BITS_PER_LONG - 1)
43
44#ifdef CONFIG_ANDROID_PMEM_DEBUG
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070045#define PMEM_DEBUG 1
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046#else
47#define PMEM_DEBUG 0
48#endif
49
50#define SYSTEM_ALLOC_RETRY 10
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070051
52/* indicates that a refernce to this file has been taken via get_pmem_file,
53 * the file should not be released until put_pmem_file is called */
54#define PMEM_FLAGS_BUSY 0x1
55/* indicates that this is a suballocation of a larger master range */
56#define PMEM_FLAGS_CONNECTED 0x1 << 1
57/* indicates this is a master and not a sub allocation and that it is mmaped */
58#define PMEM_FLAGS_MASTERMAP 0x1 << 2
59/* submap and unsubmap flags indicate:
60 * 00: subregion has never been mmaped
61 * 10: subregion has been mmaped, reference to the mm was taken
62 * 11: subretion has ben released, refernece to the mm still held
63 * 01: subretion has been released, reference to the mm has been released
64 */
65#define PMEM_FLAGS_SUBMAP 0x1 << 3
66#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
67
Rebecca Schultza4ff0e82008-07-24 11:22:53 -070068struct pmem_data {
69 /* in alloc mode: an index into the bitmap
70 * in no_alloc mode: the size of the allocation */
71 int index;
72 /* see flags above for descriptions */
73 unsigned int flags;
74 /* protects this data field, if the mm_mmap sem will be held at the
75 * same time as this sem, the mm sem must be taken first (as this is
76 * the order for vma_open and vma_close ops */
77 struct rw_semaphore sem;
78 /* info about the mmaping process */
79 struct vm_area_struct *vma;
80 /* task struct of the mapping process */
81 struct task_struct *task;
82 /* process id of teh mapping process */
83 pid_t pid;
84 /* file descriptor of the master */
85 int master_fd;
86 /* file struct of the master */
87 struct file *master_file;
88 /* a list of currently available regions if this is a suballocation */
89 struct list_head region_list;
90 /* a linked list of data so we can access them for debugging */
91 struct list_head list;
92#if PMEM_DEBUG
93 int ref;
94#endif
95};
96
97struct pmem_bits {
98 unsigned allocated:1; /* 1 if allocated, 0 if free */
99 unsigned order:7; /* size of the region in pmem space */
100};
101
102struct pmem_region_node {
103 struct pmem_region region;
104 struct list_head list;
105};
106
107#define PMEM_DEBUG_MSGS 0
108#if PMEM_DEBUG_MSGS
109#define DLOG(fmt,args...) \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 do { pr_debug("[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700111 ##args); } \
112 while (0)
113#else
114#define DLOG(x...) do {} while (0)
115#endif
116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117enum pmem_align {
118 PMEM_ALIGN_4K,
119 PMEM_ALIGN_1M,
120};
121
122#define PMEM_NAME_SIZE 16
123
124struct alloc_list {
125 void *addr; /* physical addr of allocation */
126 void *aaddr; /* aligned physical addr */
127 unsigned int size; /* total size of allocation */
128 unsigned char __iomem *vaddr; /* Virtual addr */
129 struct list_head allocs;
130};
131
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700132struct pmem_info {
133 struct miscdevice dev;
134 /* physical start address of the remaped pmem space */
135 unsigned long base;
136 /* vitual start address of the remaped pmem space */
137 unsigned char __iomem *vbase;
138 /* total size of the pmem space */
139 unsigned long size;
140 /* number of entries in the pmem space */
141 unsigned long num_entries;
142 /* pfn of the garbage page in memory */
143 unsigned long garbage_pfn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 /* which memory type (i.e. SMI, EBI1) this PMEM device is backed by */
145 unsigned memory_type;
146
147 char name[PMEM_NAME_SIZE];
148
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700149 /* index of the garbage page in the pmem space */
150 int garbage_index;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151
152 enum pmem_allocator_type allocator_type;
153
154 int (*allocate)(const int,
155 const unsigned long,
156 const unsigned int);
157 int (*free)(int, int);
158 int (*free_space)(int, struct pmem_freespace *);
159 unsigned long (*len)(int, struct pmem_data *);
160 unsigned long (*start_addr)(int, struct pmem_data *);
161
162 /* actual size of memory element, e.g.: (4 << 10) is 4K */
163 unsigned int quantum;
164
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700165 /* indicates maps of this region should be cached, if a mix of
166 * cached and uncached is desired, set this and open the device with
167 * O_SYNC to get an uncached region */
168 unsigned cached;
169 unsigned buffered;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 union {
171 struct {
172 /* in all_or_nothing allocator mode the first mapper
173 * gets the whole space and sets this flag */
174 unsigned allocated;
175 } all_or_nothing;
176
177 struct {
178 /* the buddy allocator bitmap for the region
179 * indicating which entries are allocated and which
180 * are free.
181 */
182
183 struct pmem_bits *buddy_bitmap;
184 } buddy_bestfit;
185
186 struct {
187 unsigned int bitmap_free; /* # of zero bits/quanta */
188 uint32_t *bitmap;
189 int32_t bitmap_allocs;
190 struct {
191 short bit;
192 unsigned short quanta;
193 } *bitm_alloc;
194 } bitmap;
195
196 struct {
197 unsigned long used; /* Bytes currently allocated */
198 struct list_head alist; /* List of allocations */
199 } system_mem;
200 } allocator;
201
202 int id;
203 struct kobject kobj;
204
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700205 /* for debugging, creates a list of pmem file structs, the
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700206 * data_list_mutex should be taken before pmem_data->sem if both are
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700207 * needed */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 struct mutex data_list_mutex;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700209 struct list_head data_list;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 /* arena_mutex protects the global allocation arena
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700211 *
212 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213 * down(pmem_data->sem) => mutex_lock(arena_mutex)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700214 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 struct mutex arena_mutex;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700216
217 long (*ioctl)(struct file *, unsigned int, unsigned long);
218 int (*release)(struct inode *, struct file *);
Laura Abbott1e36a022011-06-22 17:08:13 -0700219 /* reference count of allocations */
220 atomic_t allocation_cnt;
221 /*
222 * request function for a region when the allocation count goes
223 * from 0 -> 1
224 */
225 void (*mem_request)(void *);
226 /*
227 * release function for a region when the allocation count goes
228 * from 1 -> 0
229 */
230 void (*mem_release)(void *);
231 /*
232 * private data for the request/release callback
233 */
234 void *region_data;
235 /*
236 * map and unmap as needed
237 */
238 int map_on_demand;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700239};
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240#define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700241
Laura Abbott1e36a022011-06-22 17:08:13 -0700242static void ioremap_pmem(int id);
243static void pmem_put_region(int id);
244static int pmem_get_region(int id);
245
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700246static struct pmem_info pmem[PMEM_MAX_DEVICES];
247static int id_count;
248
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249#define PMEM_SYSFS_DIR_NAME "pmem_regions" /* under /sys/kernel/ */
250static struct kset *pmem_kset;
251
252#define PMEM_IS_FREE_BUDDY(id, index) \
253 (!(pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].allocated))
254#define PMEM_BUDDY_ORDER(id, index) \
255 (pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].order)
256#define PMEM_BUDDY_INDEX(id, index) \
257 (index ^ (1 << PMEM_BUDDY_ORDER(id, index)))
258#define PMEM_BUDDY_NEXT_INDEX(id, index) \
259 (index + (1 << PMEM_BUDDY_ORDER(id, index)))
260#define PMEM_OFFSET(index) (index * pmem[id].quantum)
261#define PMEM_START_ADDR(id, index) \
262 (PMEM_OFFSET(index) + pmem[id].base)
263#define PMEM_BUDDY_LEN(id, index) \
264 ((1 << PMEM_BUDDY_ORDER(id, index)) * pmem[id].quantum)
265#define PMEM_END_ADDR(id, index) \
266 (PMEM_START_ADDR(id, index) + PMEM_LEN(id, index))
267#define PMEM_START_VADDR(id, index) \
268 (PMEM_OFFSET(id, index) + pmem[id].vbase)
269#define PMEM_END_VADDR(id, index) \
270 (PMEM_START_VADDR(id, index) + PMEM_LEN(id, index))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700271#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
272#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273#define PMEM_IS_SUBMAP(data) \
274 ((data->flags & PMEM_FLAGS_SUBMAP) && \
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700275 (!(data->flags & PMEM_FLAGS_UNSUBMAP)))
276
277static int pmem_release(struct inode *, struct file *);
278static int pmem_mmap(struct file *, struct vm_area_struct *);
279static int pmem_open(struct inode *, struct file *);
280static long pmem_ioctl(struct file *, unsigned int, unsigned long);
281
282struct file_operations pmem_fops = {
283 .release = pmem_release,
284 .mmap = pmem_mmap,
285 .open = pmem_open,
286 .unlocked_ioctl = pmem_ioctl,
287};
288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289#define PMEM_ATTR(_name, _mode, _show, _store) { \
290 .attr = {.name = __stringify(_name), .mode = _mode }, \
291 .show = _show, \
292 .store = _store, \
293}
294
295struct pmem_attr {
296 struct attribute attr;
297 ssize_t(*show) (const int id, char * const);
298 ssize_t(*store) (const int id, const char * const, const size_t count);
299};
300#define to_pmem_attr(a) container_of(a, struct pmem_attr, attr)
301
302#define RW_PMEM_ATTR(name) \
303static struct pmem_attr pmem_attr_## name = \
304 PMEM_ATTR(name, S_IRUGO | S_IWUSR, show_pmem_## name, store_pmem_## name)
305
306#define RO_PMEM_ATTR(name) \
307static struct pmem_attr pmem_attr_## name = \
308 PMEM_ATTR(name, S_IRUGO, show_pmem_## name, NULL)
309
310#define WO_PMEM_ATTR(name) \
311static struct pmem_attr pmem_attr_## name = \
312 PMEM_ATTR(name, S_IWUSR, NULL, store_pmem_## name)
313
314static ssize_t show_pmem(struct kobject *kobj,
315 struct attribute *attr,
316 char *buf)
317{
318 struct pmem_attr *a = to_pmem_attr(attr);
319 return a->show ? a->show(to_pmem_info_id(kobj), buf) : -EIO;
320}
321
322static ssize_t store_pmem(struct kobject *kobj, struct attribute *attr,
323 const char *buf, size_t count)
324{
325 struct pmem_attr *a = to_pmem_attr(attr);
326 return a->store ? a->store(to_pmem_info_id(kobj), buf, count) : -EIO;
327}
328
329static struct sysfs_ops pmem_ops = {
330 .show = show_pmem,
331 .store = store_pmem,
332};
333
334static ssize_t show_pmem_base(int id, char *buf)
335{
336 return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
337 pmem[id].base, pmem[id].base);
338}
339RO_PMEM_ATTR(base);
340
341static ssize_t show_pmem_size(int id, char *buf)
342{
343 return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
344 pmem[id].size, pmem[id].size);
345}
346RO_PMEM_ATTR(size);
347
348static ssize_t show_pmem_allocator_type(int id, char *buf)
349{
350 switch (pmem[id].allocator_type) {
351 case PMEM_ALLOCATORTYPE_ALLORNOTHING:
352 return scnprintf(buf, PAGE_SIZE, "%s\n", "All or Nothing");
353 case PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
354 return scnprintf(buf, PAGE_SIZE, "%s\n", "Buddy Bestfit");
355 case PMEM_ALLOCATORTYPE_BITMAP:
356 return scnprintf(buf, PAGE_SIZE, "%s\n", "Bitmap");
357 case PMEM_ALLOCATORTYPE_SYSTEM:
358 return scnprintf(buf, PAGE_SIZE, "%s\n", "System heap");
359 default:
360 return scnprintf(buf, PAGE_SIZE,
361 "??? Invalid allocator type (%d) for this region! "
362 "Something isn't right.\n",
363 pmem[id].allocator_type);
364 }
365}
366RO_PMEM_ATTR(allocator_type);
367
368static ssize_t show_pmem_mapped_regions(int id, char *buf)
369{
370 struct list_head *elt;
371 int ret;
372
373 ret = scnprintf(buf, PAGE_SIZE,
374 "pid #: mapped regions (offset, len) (offset,len)...\n");
375
376 mutex_lock(&pmem[id].data_list_mutex);
377 list_for_each(elt, &pmem[id].data_list) {
378 struct pmem_data *data =
379 list_entry(elt, struct pmem_data, list);
380 struct list_head *elt2;
381
382 down_read(&data->sem);
383 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "pid %u:",
384 data->pid);
385 list_for_each(elt2, &data->region_list) {
386 struct pmem_region_node *region_node = list_entry(elt2,
387 struct pmem_region_node,
388 list);
389 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
390 "(%lx,%lx) ",
391 region_node->region.offset,
392 region_node->region.len);
393 }
394 up_read(&data->sem);
395 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
396 }
397 mutex_unlock(&pmem[id].data_list_mutex);
398 return ret;
399}
400RO_PMEM_ATTR(mapped_regions);
401
402#define PMEM_COMMON_SYSFS_ATTRS \
403 &pmem_attr_base.attr, \
404 &pmem_attr_size.attr, \
405 &pmem_attr_allocator_type.attr, \
406 &pmem_attr_mapped_regions.attr
407
408
409static ssize_t show_pmem_allocated(int id, char *buf)
410{
411 ssize_t ret;
412
413 mutex_lock(&pmem[id].arena_mutex);
414 ret = scnprintf(buf, PAGE_SIZE, "%s\n",
415 pmem[id].allocator.all_or_nothing.allocated ?
416 "is allocated" : "is NOT allocated");
417 mutex_unlock(&pmem[id].arena_mutex);
418 return ret;
419}
420RO_PMEM_ATTR(allocated);
421
422static struct attribute *pmem_allornothing_attrs[] = {
423 PMEM_COMMON_SYSFS_ATTRS,
424
425 &pmem_attr_allocated.attr,
426
427 NULL
428};
429
430static struct kobj_type pmem_allornothing_ktype = {
431 .sysfs_ops = &pmem_ops,
432 .default_attrs = pmem_allornothing_attrs,
433};
434
435static ssize_t show_pmem_total_entries(int id, char *buf)
436{
437 return scnprintf(buf, PAGE_SIZE, "%lu\n", pmem[id].num_entries);
438}
439RO_PMEM_ATTR(total_entries);
440
441static ssize_t show_pmem_quantum_size(int id, char *buf)
442{
443 return scnprintf(buf, PAGE_SIZE, "%u (%#x)\n",
444 pmem[id].quantum, pmem[id].quantum);
445}
446RO_PMEM_ATTR(quantum_size);
447
448static ssize_t show_pmem_buddy_bitmap_dump(int id, char *buf)
449{
450 int ret, i;
451
452 mutex_lock(&pmem[id].data_list_mutex);
453 ret = scnprintf(buf, PAGE_SIZE, "index\torder\tlength\tallocated\n");
454
455 for (i = 0; i < pmem[id].num_entries && (PAGE_SIZE - ret);
456 i = PMEM_BUDDY_NEXT_INDEX(id, i))
457 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d\t%d\t%d\t%d\n",
458 i, PMEM_BUDDY_ORDER(id, i),
459 PMEM_BUDDY_LEN(id, i),
460 !PMEM_IS_FREE_BUDDY(id, i));
461
462 mutex_unlock(&pmem[id].data_list_mutex);
463 return ret;
464}
465RO_PMEM_ATTR(buddy_bitmap_dump);
466
467#define PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS \
468 &pmem_attr_quantum_size.attr, \
469 &pmem_attr_total_entries.attr
470
471static struct attribute *pmem_buddy_bestfit_attrs[] = {
472 PMEM_COMMON_SYSFS_ATTRS,
473
474 PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
475
476 &pmem_attr_buddy_bitmap_dump.attr,
477
478 NULL
479};
480
481static struct kobj_type pmem_buddy_bestfit_ktype = {
482 .sysfs_ops = &pmem_ops,
483 .default_attrs = pmem_buddy_bestfit_attrs,
484};
485
486static ssize_t show_pmem_free_quanta(int id, char *buf)
487{
488 ssize_t ret;
489
490 mutex_lock(&pmem[id].arena_mutex);
491 ret = scnprintf(buf, PAGE_SIZE, "%u\n",
492 pmem[id].allocator.bitmap.bitmap_free);
493 mutex_unlock(&pmem[id].arena_mutex);
494 return ret;
495}
496RO_PMEM_ATTR(free_quanta);
497
498static ssize_t show_pmem_bits_allocated(int id, char *buf)
499{
500 ssize_t ret;
501 unsigned int i;
502
503 mutex_lock(&pmem[id].arena_mutex);
504
505 ret = scnprintf(buf, PAGE_SIZE,
506 "id: %d\nbitnum\tindex\tquanta allocated\n", id);
507
508 for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
509 if (pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1)
510 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
511 "%u\t%u\t%u\n",
512 i,
513 pmem[id].allocator.bitmap.bitm_alloc[i].bit,
514 pmem[id].allocator.bitmap.bitm_alloc[i].quanta
515 );
516
517 mutex_unlock(&pmem[id].arena_mutex);
518 return ret;
519}
520RO_PMEM_ATTR(bits_allocated);
521
522static struct attribute *pmem_bitmap_attrs[] = {
523 PMEM_COMMON_SYSFS_ATTRS,
524
525 PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
526
527 &pmem_attr_free_quanta.attr,
528 &pmem_attr_bits_allocated.attr,
529
530 NULL
531};
532
533static struct attribute *pmem_system_attrs[] = {
534 PMEM_COMMON_SYSFS_ATTRS,
535
536 NULL
537};
538
539static struct kobj_type pmem_bitmap_ktype = {
540 .sysfs_ops = &pmem_ops,
541 .default_attrs = pmem_bitmap_attrs,
542};
543
544static struct kobj_type pmem_system_ktype = {
545 .sysfs_ops = &pmem_ops,
546 .default_attrs = pmem_system_attrs,
547};
548
Laura Abbott1e36a022011-06-22 17:08:13 -0700549static int pmem_allocate_from_id(const int id, const unsigned long size,
550 const unsigned int align)
551{
552 int ret;
553 ret = pmem_get_region(id);
554
555 if (ret)
556 return -1;
557
558 ret = pmem[id].allocate(id, size, align);
559
560 if (ret < 0)
561 pmem_put_region(id);
562
563 return ret;
564}
565
566static int pmem_free_from_id(const int id, const int index)
567{
568 pmem_put_region(id);
569 return pmem[id].free(id, index);
570}
571
572static int pmem_get_region(int id)
573{
574 /* Must be called with arena mutex locked */
575 atomic_inc(&pmem[id].allocation_cnt);
576 if (!pmem[id].vbase) {
577 DLOG("PMEMDEBUG: mapping for %s", pmem[id].name);
578 if (pmem[id].mem_request)
579 pmem[id].mem_request(pmem[id].region_data);
580 ioremap_pmem(id);
581 }
582
583 if (pmem[id].vbase) {
584 return 0;
585 } else {
586 if (pmem[id].mem_release)
587 pmem[id].mem_release(pmem[id].region_data);
588 atomic_dec(&pmem[id].allocation_cnt);
589 return 1;
590 }
591}
592
593static void pmem_put_region(int id)
594{
595 /* Must be called with arena mutex locked */
596 if (atomic_dec_and_test(&pmem[id].allocation_cnt)) {
597 DLOG("PMEMDEBUG: unmapping for %s", pmem[id].name);
598 BUG_ON(!pmem[id].vbase);
599 if (pmem[id].map_on_demand) {
600 iounmap(pmem[id].vbase);
601 pmem[id].vbase = NULL;
602 if (pmem[id].mem_release)
603 pmem[id].mem_release(pmem[id].region_data);
604
605 }
606 }
607}
608
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700609static int get_id(struct file *file)
610{
611 return MINOR(file->f_dentry->d_inode->i_rdev);
612}
613
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700614static char *get_name(struct file *file)
615{
616 int id = get_id(file);
617 return pmem[id].name;
618}
619
620static int is_pmem_file(struct file *file)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700621{
622 int id;
623
624 if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
625 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700627 id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700628 return (unlikely(id >= PMEM_MAX_DEVICES ||
629 file->f_dentry->d_inode->i_rdev !=
630 MKDEV(MISC_MAJOR, pmem[id].dev.minor))) ? 0 : 1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700631}
632
633static int has_allocation(struct file *file)
634{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700635 /* must be called with at least read lock held on
636 * ((struct pmem_data *)(file->private_data))->sem which
637 * means that file is guaranteed not to be NULL upon entry!!
638 * check is_pmem_file first if not accessed via pmem_file_ops */
639 struct pmem_data *pdata = file->private_data;
640 return pdata && pdata->index != -1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700641}
642
643static int is_master_owner(struct file *file)
644{
645 struct file *master_file;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700646 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700647 int put_needed, ret = 0;
648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 if (!has_allocation(file))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700650 return 0;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700651 if (PMEM_FLAGS_MASTERMAP & data->flags)
652 return 1;
653 master_file = fget_light(data->master_fd, &put_needed);
654 if (master_file && data->master_file == master_file)
655 ret = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700656 if (master_file)
657 fput_light(master_file, put_needed);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700658 return ret;
659}
660
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661static int pmem_free_all_or_nothing(int id, int index)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700662{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700663 /* caller should hold the lock on arena_mutex! */
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700664 DLOG("index %d\n", index);
665
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 pmem[id].allocator.all_or_nothing.allocated = 0;
667 return 0;
668}
669
670static int pmem_free_space_all_or_nothing(int id,
671 struct pmem_freespace *fs)
672{
673 /* caller should hold the lock on arena_mutex! */
674 fs->total = (unsigned long)
675 pmem[id].allocator.all_or_nothing.allocated == 0 ?
676 pmem[id].size : 0;
677
678 fs->largest = fs->total;
679 return 0;
680}
681
682
683static int pmem_free_buddy_bestfit(int id, int index)
684{
685 /* caller should hold the lock on arena_mutex! */
686 int curr = index;
687 DLOG("index %d\n", index);
688
689
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700690 /* clean up the bitmap, merging any buddies */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700691 pmem[id].allocator.buddy_bestfit.buddy_bitmap[curr].allocated = 0;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700692 /* find a slots buddy Buddy# = Slot# ^ (1 << order)
693 * if the buddy is also free merge them
694 * repeat until the buddy is not free or end of the bitmap is reached
695 */
696 do {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 int buddy = PMEM_BUDDY_INDEX(id, curr);
698 if (buddy < pmem[id].num_entries &&
699 PMEM_IS_FREE_BUDDY(id, buddy) &&
700 PMEM_BUDDY_ORDER(id, buddy) ==
701 PMEM_BUDDY_ORDER(id, curr)) {
702 PMEM_BUDDY_ORDER(id, buddy)++;
703 PMEM_BUDDY_ORDER(id, curr)++;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700704 curr = min(buddy, curr);
705 } else {
706 break;
707 }
708 } while (curr < pmem[id].num_entries);
709
710 return 0;
711}
712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713
714static int pmem_free_space_buddy_bestfit(int id,
715 struct pmem_freespace *fs)
716{
717 /* caller should hold the lock on arena_mutex! */
718 int curr;
719 unsigned long size;
720 fs->total = 0;
721 fs->largest = 0;
722
723 for (curr = 0; curr < pmem[id].num_entries;
724 curr = PMEM_BUDDY_NEXT_INDEX(id, curr)) {
725 if (PMEM_IS_FREE_BUDDY(id, curr)) {
726 size = PMEM_BUDDY_LEN(id, curr);
727 if (size > fs->largest)
728 fs->largest = size;
729 fs->total += size;
730 }
731 }
732 return 0;
733}
734
735
736static inline uint32_t start_mask(int bit_start)
737{
738 return (uint32_t)(~0) << (bit_start & PMEM_BITS_PER_WORD_MASK);
739}
740
741static inline uint32_t end_mask(int bit_end)
742{
743 return (uint32_t)(~0) >>
744 ((BITS_PER_LONG - bit_end) & PMEM_BITS_PER_WORD_MASK);
745}
746
747static inline int compute_total_words(int bit_end, int word_index)
748{
749 return ((bit_end + BITS_PER_LONG - 1) >>
750 PMEM_32BIT_WORD_ORDER) - word_index;
751}
752
753static void bitmap_bits_clear_all(uint32_t *bitp, int bit_start, int bit_end)
754{
755 int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
756
757 total_words = compute_total_words(bit_end, word_index);
758 if (total_words > 0) {
759 if (total_words == 1) {
760 bitp[word_index] &=
761 ~(start_mask(bit_start) & end_mask(bit_end));
762 } else {
763 bitp[word_index++] &= ~start_mask(bit_start);
764 if (total_words > 2) {
765 int total_bytes;
766
767 total_words -= 2;
768 total_bytes = total_words << 2;
769
770 memset(&bitp[word_index], 0, total_bytes);
771 word_index += total_words;
772 }
773 bitp[word_index] &= ~end_mask(bit_end);
774 }
775 }
776}
777
778static int pmem_free_bitmap(int id, int bitnum)
779{
780 /* caller should hold the lock on arena_mutex! */
781 int i;
782 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
783
784 DLOG("bitnum %d\n", bitnum);
785
786 for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) {
787 const int curr_bit =
788 pmem[id].allocator.bitmap.bitm_alloc[i].bit;
789
790 if (curr_bit == bitnum) {
791 const int curr_quanta =
792 pmem[id].allocator.bitmap.bitm_alloc[i].quanta;
793
794 bitmap_bits_clear_all(pmem[id].allocator.bitmap.bitmap,
795 curr_bit, curr_bit + curr_quanta);
796 pmem[id].allocator.bitmap.bitmap_free += curr_quanta;
797 pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
798 pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
799 return 0;
800 }
801 }
802 printk(KERN_ALERT "pmem: %s: Attempt to free unallocated index %d, id"
803 " %d, pid %d(%s)\n", __func__, bitnum, id, current->pid,
804 get_task_comm(currtask_name, current));
805
806 return -1;
807}
808
809static int pmem_free_system(int id, int index)
810{
811 /* caller should hold the lock on arena_mutex! */
812 struct alloc_list *item;
813
814 DLOG("index %d\n", index);
815 if (index != 0)
816 item = (struct alloc_list *)index;
817 else
818 return 0;
819
820 if (item->vaddr != NULL) {
821 iounmap(item->vaddr);
822 kfree(__va(item->addr));
823 list_del(&item->allocs);
824 kfree(item);
825 }
826
827 return 0;
828}
829
830static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs)
831{
832 int i, j;
833 int max_allocs = pmem[id].allocator.bitmap.bitmap_allocs;
834 int alloc_start = 0;
835 int next_alloc;
836 unsigned long size = 0;
837
838 fs->total = 0;
839 fs->largest = 0;
840
841 for (i = 0; i < max_allocs; i++) {
842
843 int alloc_quanta = 0;
844 int alloc_idx = 0;
845 next_alloc = pmem[id].num_entries;
846
847 /* Look for the lowest bit where next allocation starts */
848 for (j = 0; j < max_allocs; j++) {
849 const int curr_alloc = pmem[id].allocator.
850 bitmap.bitm_alloc[j].bit;
851 if (curr_alloc != -1) {
852 if (alloc_start == curr_alloc)
853 alloc_idx = j;
854 if (alloc_start >= curr_alloc)
855 continue;
856 if (curr_alloc < next_alloc)
857 next_alloc = curr_alloc;
858 }
859 }
860 alloc_quanta = pmem[id].allocator.bitmap.
861 bitm_alloc[alloc_idx].quanta;
862 size = (next_alloc - (alloc_start + alloc_quanta)) *
863 pmem[id].quantum;
864
865 if (size > fs->largest)
866 fs->largest = size;
867 fs->total += size;
868
869 if (next_alloc == pmem[id].num_entries)
870 break;
871 else
872 alloc_start = next_alloc;
873 }
874
875 return 0;
876}
877
878static int pmem_free_space_system(int id, struct pmem_freespace *fs)
879{
880 fs->total = pmem[id].size;
881 fs->largest = pmem[id].size;
882
883 return 0;
884}
885
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700886static void pmem_revoke(struct file *file, struct pmem_data *data);
887
888static int pmem_release(struct inode *inode, struct file *file)
889{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700890 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700891 struct pmem_region_node *region_node;
892 struct list_head *elt, *elt2;
893 int id = get_id(file), ret = 0;
894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895#if PMEM_DEBUG_MSGS
896 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
897#endif
898 DLOG("releasing memory pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
899 current->pid, get_task_comm(currtask_name, current),
900 file, file_count(file), get_name(file), id);
901 mutex_lock(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700902 /* if this file is a master, revoke all the memory in the connected
903 * files */
904 if (PMEM_FLAGS_MASTERMAP & data->flags) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700905 list_for_each(elt, &pmem[id].data_list) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700906 struct pmem_data *sub_data =
907 list_entry(elt, struct pmem_data, list);
908 int is_master;
909
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700910 down_read(&sub_data->sem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700911 is_master = (PMEM_IS_SUBMAP(sub_data) &&
912 file == sub_data->master_file);
913 up_read(&sub_data->sem);
914
915 if (is_master)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700916 pmem_revoke(file, sub_data);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700917 }
918 }
919 list_del(&data->list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 mutex_unlock(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700921
922 down_write(&data->sem);
923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700924 /* if it is not a connected file and it has an allocation, free it */
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700925 if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 mutex_lock(&pmem[id].arena_mutex);
Laura Abbott1e36a022011-06-22 17:08:13 -0700927 ret = pmem_free_from_id(id, data->index);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 mutex_unlock(&pmem[id].arena_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700929 }
930
931 /* if this file is a submap (mapped, connected file), downref the
932 * task struct */
933 if (PMEM_FLAGS_SUBMAP & data->flags)
934 if (data->task) {
935 put_task_struct(data->task);
936 data->task = NULL;
937 }
938
939 file->private_data = NULL;
940
941 list_for_each_safe(elt, elt2, &data->region_list) {
942 region_node = list_entry(elt, struct pmem_region_node, list);
943 list_del(elt);
944 kfree(region_node);
945 }
946 BUG_ON(!list_empty(&data->region_list));
947
948 up_write(&data->sem);
949 kfree(data);
950 if (pmem[id].release)
951 ret = pmem[id].release(inode, file);
952
953 return ret;
954}
955
956static int pmem_open(struct inode *inode, struct file *file)
957{
958 struct pmem_data *data;
959 int id = get_id(file);
960 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700961#if PMEM_DEBUG_MSGS
962 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
963#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700965 DLOG("pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
966 current->pid, get_task_comm(currtask_name, current),
967 file, file_count(file), get_name(file), id);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700968 data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
969 if (!data) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700970 printk(KERN_ALERT "pmem: %s: unable to allocate memory for "
971 "pmem metadata.", __func__);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700972 return -1;
973 }
974 data->flags = 0;
975 data->index = -1;
976 data->task = NULL;
977 data->vma = NULL;
978 data->pid = 0;
979 data->master_file = NULL;
980#if PMEM_DEBUG
981 data->ref = 0;
982#endif
983 INIT_LIST_HEAD(&data->region_list);
984 init_rwsem(&data->sem);
985
986 file->private_data = data;
987 INIT_LIST_HEAD(&data->list);
988
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 mutex_lock(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700990 list_add(&data->list, &pmem[id].data_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 mutex_unlock(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700992 return ret;
993}
994
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995static unsigned long pmem_order(unsigned long len, int id)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -0700996{
997 int i;
998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700999 len = (len + pmem[id].quantum - 1)/pmem[id].quantum;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001000 len--;
1001 for (i = 0; i < sizeof(len)*8; i++)
1002 if (len >> i == 0)
1003 break;
1004 return i;
1005}
1006
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007static int pmem_allocator_all_or_nothing(const int id,
1008 const unsigned long len,
1009 const unsigned int align)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001010{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 /* caller should hold the lock on arena_mutex! */
1012 DLOG("all or nothing\n");
1013 if ((len > pmem[id].size) ||
1014 pmem[id].allocator.all_or_nothing.allocated)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001015 return -1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001016 pmem[id].allocator.all_or_nothing.allocated = 1;
1017 return len;
1018}
1019
1020static int pmem_allocator_buddy_bestfit(const int id,
1021 const unsigned long len,
1022 unsigned int align)
1023{
1024 /* caller should hold the lock on arena_mutex! */
1025 int curr;
1026 int best_fit = -1;
1027 unsigned long order;
1028
1029 DLOG("buddy bestfit\n");
1030 order = pmem_order(len, id);
1031 if (order > PMEM_MAX_ORDER)
1032 goto out;
1033
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001034 DLOG("order %lx\n", order);
1035
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 /* Look through the bitmap.
1037 * If a free slot of the correct order is found, use it.
1038 * Otherwise, use the best fit (smallest with size > order) slot.
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001039 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001040 for (curr = 0;
1041 curr < pmem[id].num_entries;
1042 curr = PMEM_BUDDY_NEXT_INDEX(id, curr))
1043 if (PMEM_IS_FREE_BUDDY(id, curr)) {
1044 if (PMEM_BUDDY_ORDER(id, curr) ==
1045 (unsigned char)order) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001046 /* set the not free bit and clear others */
1047 best_fit = curr;
1048 break;
1049 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 if (PMEM_BUDDY_ORDER(id, curr) >
1051 (unsigned char)order &&
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001052 (best_fit < 0 ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001053 PMEM_BUDDY_ORDER(id, curr) <
1054 PMEM_BUDDY_ORDER(id, best_fit)))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001055 best_fit = curr;
1056 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 /* if best_fit < 0, there are no suitable slots; return an error */
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001059 if (best_fit < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060#if PMEM_DEBUG
1061 printk(KERN_ALERT "pmem: %s: no space left to allocate!\n",
1062 __func__);
1063#endif
1064 goto out;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001065 }
1066
1067 /* now partition the best fit:
1068 * split the slot into 2 buddies of order - 1
1069 * repeat until the slot is of the correct order
1070 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 while (PMEM_BUDDY_ORDER(id, best_fit) > (unsigned char)order) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001072 int buddy;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001073 PMEM_BUDDY_ORDER(id, best_fit) -= 1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001074 buddy = PMEM_BUDDY_INDEX(id, best_fit);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075 PMEM_BUDDY_ORDER(id, buddy) = PMEM_BUDDY_ORDER(id, best_fit);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001076 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077 pmem[id].allocator.buddy_bestfit.buddy_bitmap[best_fit].allocated = 1;
1078out:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001079 return best_fit;
1080}
1081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082
1083static inline unsigned long paddr_from_bit(const int id, const int bitnum)
1084{
1085 return pmem[id].base + pmem[id].quantum * bitnum;
1086}
1087
1088static inline unsigned long bit_from_paddr(const int id,
1089 const unsigned long paddr)
1090{
1091 return (paddr - pmem[id].base) / pmem[id].quantum;
1092}
1093
1094static void bitmap_bits_set_all(uint32_t *bitp, int bit_start, int bit_end)
1095{
1096 int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
1097
1098 total_words = compute_total_words(bit_end, word_index);
1099 if (total_words > 0) {
1100 if (total_words == 1) {
1101 bitp[word_index] |=
1102 (start_mask(bit_start) & end_mask(bit_end));
1103 } else {
1104 bitp[word_index++] |= start_mask(bit_start);
1105 if (total_words > 2) {
1106 int total_bytes;
1107
1108 total_words -= 2;
1109 total_bytes = total_words << 2;
1110
1111 memset(&bitp[word_index], ~0, total_bytes);
1112 word_index += total_words;
1113 }
1114 bitp[word_index] |= end_mask(bit_end);
1115 }
1116 }
1117}
1118
1119static int
1120bitmap_allocate_contiguous(uint32_t *bitp, int num_bits_to_alloc,
1121 int total_bits, int spacing)
1122{
1123 int bit_start, last_bit, word_index;
1124
1125 if (num_bits_to_alloc <= 0)
1126 return -1;
1127
1128 for (bit_start = 0; ;
1129 bit_start = (last_bit +
1130 (word_index << PMEM_32BIT_WORD_ORDER) + spacing - 1)
1131 & ~(spacing - 1)) {
1132 int bit_end = bit_start + num_bits_to_alloc, total_words;
1133
1134 if (bit_end > total_bits)
1135 return -1; /* out of contiguous memory */
1136
1137 word_index = bit_start >> PMEM_32BIT_WORD_ORDER;
1138 total_words = compute_total_words(bit_end, word_index);
1139
1140 if (total_words <= 0)
1141 return -1;
1142
1143 if (total_words == 1) {
1144 last_bit = fls(bitp[word_index] &
1145 (start_mask(bit_start) &
1146 end_mask(bit_end)));
1147 if (last_bit)
1148 continue;
1149 } else {
1150 int end_word = word_index + (total_words - 1);
1151 last_bit =
1152 fls(bitp[word_index] & start_mask(bit_start));
1153 if (last_bit)
1154 continue;
1155
1156 for (word_index++;
1157 word_index < end_word;
1158 word_index++) {
1159 last_bit = fls(bitp[word_index]);
1160 if (last_bit)
1161 break;
1162 }
1163 if (last_bit)
1164 continue;
1165
1166 last_bit = fls(bitp[word_index] & end_mask(bit_end));
1167 if (last_bit)
1168 continue;
1169 }
1170 bitmap_bits_set_all(bitp, bit_start, bit_end);
1171 return bit_start;
1172 }
1173 return -1;
1174}
1175
1176static int reserve_quanta(const unsigned int quanta_needed,
1177 const int id,
1178 unsigned int align)
1179{
1180 /* alignment should be a valid power of 2 */
1181 int ret = -1, start_bit = 0, spacing = 1;
1182
1183 /* Sanity check */
1184 if (quanta_needed > pmem[id].allocator.bitmap.bitmap_free) {
1185#if PMEM_DEBUG
1186 printk(KERN_ALERT "pmem: %s: request (%d) too big for"
1187 " available free (%d)\n", __func__, quanta_needed,
1188 pmem[id].allocator.bitmap.bitmap_free);
1189#endif
1190 return -1;
1191 }
1192
1193 start_bit = bit_from_paddr(id,
1194 (pmem[id].base + align - 1) & ~(align - 1));
1195 if (start_bit <= -1) {
1196#if PMEM_DEBUG
1197 printk(KERN_ALERT
1198 "pmem: %s: bit_from_paddr fails for"
1199 " %u alignment.\n", __func__, align);
1200#endif
1201 return -1;
1202 }
1203 spacing = align / pmem[id].quantum;
1204 spacing = spacing > 1 ? spacing : 1;
1205
1206 ret = bitmap_allocate_contiguous(pmem[id].allocator.bitmap.bitmap,
1207 quanta_needed,
1208 (pmem[id].size + pmem[id].quantum - 1) / pmem[id].quantum,
1209 spacing);
1210
1211#if PMEM_DEBUG
1212 if (ret < 0)
1213 printk(KERN_ALERT "pmem: %s: not enough contiguous bits free "
1214 "in bitmap! Region memory is either too fragmented or"
1215 " request is too large for available memory.\n",
1216 __func__);
1217#endif
1218
1219 return ret;
1220}
1221
1222static int pmem_allocator_bitmap(const int id,
1223 const unsigned long len,
1224 const unsigned int align)
1225{
1226 /* caller should hold the lock on arena_mutex! */
1227 int bitnum, i;
1228 unsigned int quanta_needed;
1229
1230 DLOG("bitmap id %d, len %ld, align %u\n", id, len, align);
1231 if (!pmem[id].allocator.bitmap.bitm_alloc) {
1232#if PMEM_DEBUG
1233 printk(KERN_ALERT "pmem: bitm_alloc not present! id: %d\n",
1234 id);
1235#endif
1236 return -1;
1237 }
1238
1239 quanta_needed = (len + pmem[id].quantum - 1) / pmem[id].quantum;
1240 DLOG("quantum size %u quanta needed %u free %u id %d\n",
1241 pmem[id].quantum, quanta_needed,
1242 pmem[id].allocator.bitmap.bitmap_free, id);
1243
1244 if (pmem[id].allocator.bitmap.bitmap_free < quanta_needed) {
1245#if PMEM_DEBUG
1246 printk(KERN_ALERT "pmem: memory allocation failure. "
1247 "PMEM memory region exhausted, id %d."
1248 " Unable to comply with allocation request.\n", id);
1249#endif
1250 return -1;
1251 }
1252
1253 bitnum = reserve_quanta(quanta_needed, id, align);
1254 if (bitnum == -1)
1255 goto leave;
1256
1257 for (i = 0;
1258 i < pmem[id].allocator.bitmap.bitmap_allocs &&
1259 pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1;
1260 i++)
1261 ;
1262
1263 if (i >= pmem[id].allocator.bitmap.bitmap_allocs) {
1264 void *temp;
1265 int32_t new_bitmap_allocs =
1266 pmem[id].allocator.bitmap.bitmap_allocs << 1;
1267 int j;
1268
1269 if (!new_bitmap_allocs) { /* failed sanity check!! */
1270#if PMEM_DEBUG
1271 pr_alert("pmem: bitmap_allocs number"
1272 " wrapped around to zero! Something "
1273 "is VERY wrong.\n");
1274#endif
1275 return -1;
1276 }
1277
1278 if (new_bitmap_allocs > pmem[id].num_entries) {
1279 /* failed sanity check!! */
1280#if PMEM_DEBUG
1281 pr_alert("pmem: required bitmap_allocs"
1282 " number exceeds maximum entries possible"
1283 " for current quanta\n");
1284#endif
1285 return -1;
1286 }
1287
1288 temp = krealloc(pmem[id].allocator.bitmap.bitm_alloc,
1289 new_bitmap_allocs *
1290 sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
1291 GFP_KERNEL);
1292 if (!temp) {
1293#if PMEM_DEBUG
1294 pr_alert("pmem: can't realloc bitmap_allocs,"
1295 "id %d, current num bitmap allocs %d\n",
1296 id, pmem[id].allocator.bitmap.bitmap_allocs);
1297#endif
1298 return -1;
1299 }
1300 pmem[id].allocator.bitmap.bitmap_allocs = new_bitmap_allocs;
1301 pmem[id].allocator.bitmap.bitm_alloc = temp;
1302
1303 for (j = i; j < new_bitmap_allocs; j++) {
1304 pmem[id].allocator.bitmap.bitm_alloc[j].bit = -1;
1305 pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
1306 }
1307
1308 DLOG("increased # of allocated regions to %d for id %d\n",
1309 pmem[id].allocator.bitmap.bitmap_allocs, id);
1310 }
1311
1312 DLOG("bitnum %d, bitm_alloc index %d\n", bitnum, i);
1313
1314 pmem[id].allocator.bitmap.bitmap_free -= quanta_needed;
1315 pmem[id].allocator.bitmap.bitm_alloc[i].bit = bitnum;
1316 pmem[id].allocator.bitmap.bitm_alloc[i].quanta = quanta_needed;
1317leave:
1318 return bitnum;
1319}
1320
1321static int pmem_allocator_system(const int id,
1322 const unsigned long len,
1323 const unsigned int align)
1324{
1325 /* caller should hold the lock on arena_mutex! */
1326 struct alloc_list *list;
1327 unsigned long aligned_len;
1328 int count = SYSTEM_ALLOC_RETRY;
1329 void *buf;
1330
1331 DLOG("system id %d, len %ld, align %u\n", id, len, align);
1332
1333 if ((pmem[id].allocator.system_mem.used + len) > pmem[id].size) {
1334 DLOG("requested size would be larger than quota\n");
1335 return -1;
1336 }
1337
1338 /* Handle alignment */
1339 aligned_len = len + align;
1340
1341 /* Attempt allocation */
1342 list = kmalloc(sizeof(struct alloc_list), GFP_KERNEL);
1343 if (list == NULL) {
1344 printk(KERN_ERR "pmem: failed to allocate system metadata\n");
1345 return -1;
1346 }
1347 list->vaddr = NULL;
1348
1349 buf = NULL;
1350 while ((buf == NULL) && count--) {
1351 buf = kmalloc((aligned_len), GFP_KERNEL);
1352 if (buf == NULL) {
1353 DLOG("pmem: kmalloc %d temporarily failed len= %ld\n",
1354 count, aligned_len);
1355 }
1356 }
1357 if (!buf) {
1358 printk(KERN_CRIT "pmem: kmalloc failed for id= %d len= %ld\n",
1359 id, aligned_len);
1360 kfree(list);
1361 return -1;
1362 }
1363 list->size = aligned_len;
1364 list->addr = (void *)__pa(buf);
1365 list->aaddr = (void *)(((unsigned int)(list->addr) + (align - 1)) &
1366 ~(align - 1));
1367
1368 if (!pmem[id].cached)
1369 list->vaddr = ioremap(__pa(buf), aligned_len);
1370 else
1371 list->vaddr = ioremap_cached(__pa(buf), aligned_len);
1372
1373 INIT_LIST_HEAD(&list->allocs);
1374 list_add(&list->allocs, &pmem[id].allocator.system_mem.alist);
1375
1376 return (int)list;
1377}
1378
1379static pgprot_t pmem_phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001380{
1381 int id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382#ifdef pgprot_writecombine
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001383 if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001384 /* on ARMv6 and ARMv7 this expands to Normal Noncached */
1385 return pgprot_writecombine(vma_prot);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001386#endif
1387#ifdef pgprot_ext_buffered
1388 else if (pmem[id].buffered)
1389 return pgprot_ext_buffered(vma_prot);
1390#endif
1391 return vma_prot;
1392}
1393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001394static unsigned long pmem_start_addr_all_or_nothing(int id,
1395 struct pmem_data *data)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001396{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397 return PMEM_START_ADDR(id, 0);
1398}
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400static unsigned long pmem_start_addr_buddy_bestfit(int id,
1401 struct pmem_data *data)
1402{
1403 return PMEM_START_ADDR(id, data->index);
1404}
1405
1406static unsigned long pmem_start_addr_bitmap(int id, struct pmem_data *data)
1407{
1408 return data->index * pmem[id].quantum + pmem[id].base;
1409}
1410
1411static unsigned long pmem_start_addr_system(int id, struct pmem_data *data)
1412{
1413 return (unsigned long)(((struct alloc_list *)(data->index))->aaddr);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001414}
1415
1416static void *pmem_start_vaddr(int id, struct pmem_data *data)
1417{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM)
1419 return ((struct alloc_list *)(data->index))->vaddr;
1420 else
1421 return pmem[id].start_addr(id, data) - pmem[id].base + pmem[id].vbase;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001422}
1423
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424static unsigned long pmem_len_all_or_nothing(int id, struct pmem_data *data)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001425{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001426 return data->index;
1427}
1428
1429static unsigned long pmem_len_buddy_bestfit(int id, struct pmem_data *data)
1430{
1431 return PMEM_BUDDY_LEN(id, data->index);
1432}
1433
1434static unsigned long pmem_len_bitmap(int id, struct pmem_data *data)
1435{
1436 int i;
1437 unsigned long ret = 0;
1438
1439 mutex_lock(&pmem[id].arena_mutex);
1440
1441 for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
1442 if (pmem[id].allocator.bitmap.bitm_alloc[i].bit ==
1443 data->index) {
1444 ret = pmem[id].allocator.bitmap.bitm_alloc[i].quanta *
1445 pmem[id].quantum;
1446 break;
1447 }
1448
1449 mutex_unlock(&pmem[id].arena_mutex);
1450#if PMEM_DEBUG
1451 if (i >= pmem[id].allocator.bitmap.bitmap_allocs)
1452 pr_alert("pmem: %s: can't find bitnum %d in "
1453 "alloc'd array!\n", __func__, data->index);
1454#endif
1455 return ret;
1456}
1457
1458static unsigned long pmem_len_system(int id, struct pmem_data *data)
1459{
1460 unsigned long ret = 0;
1461
1462 mutex_lock(&pmem[id].arena_mutex);
1463
1464 ret = ((struct alloc_list *)data->index)->size;
1465 mutex_unlock(&pmem[id].arena_mutex);
1466
1467 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001468}
1469
1470static int pmem_map_garbage(int id, struct vm_area_struct *vma,
1471 struct pmem_data *data, unsigned long offset,
1472 unsigned long len)
1473{
1474 int i, garbage_pages = len >> PAGE_SHIFT;
1475
1476 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
1477 for (i = 0; i < garbage_pages; i++) {
1478 if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
1479 pmem[id].garbage_pfn))
1480 return -EAGAIN;
1481 }
1482 return 0;
1483}
1484
1485static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
1486 struct pmem_data *data, unsigned long offset,
1487 unsigned long len)
1488{
1489 int garbage_pages;
1490 DLOG("unmap offset %lx len %lx\n", offset, len);
1491
1492 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
1493
1494 garbage_pages = len >> PAGE_SHIFT;
1495 zap_page_range(vma, vma->vm_start + offset, len, NULL);
1496 pmem_map_garbage(id, vma, data, offset, len);
1497 return 0;
1498}
1499
1500static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
1501 struct pmem_data *data, unsigned long offset,
1502 unsigned long len)
1503{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001504 int ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001505 DLOG("map offset %lx len %lx\n", offset, len);
1506 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
1507 BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
1508 BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
1509 BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
1510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511 ret = io_remap_pfn_range(vma, vma->vm_start + offset,
1512 (pmem[id].start_addr(id, data) + offset) >> PAGE_SHIFT,
1513 len, vma->vm_page_prot);
1514 if (ret) {
1515#if PMEM_DEBUG
1516 pr_alert("pmem: %s: io_remap_pfn_range fails with "
1517 "return value: %d!\n", __func__, ret);
1518#endif
1519
1520 ret = -EAGAIN;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001523}
1524
1525static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
1526 struct pmem_data *data, unsigned long offset,
1527 unsigned long len)
1528{
1529 /* hold the mm semp for the vma you are modifying when you call this */
1530 BUG_ON(!vma);
1531 zap_page_range(vma, vma->vm_start + offset, len, NULL);
1532 return pmem_map_pfn_range(id, vma, data, offset, len);
1533}
1534
1535static void pmem_vma_open(struct vm_area_struct *vma)
1536{
1537 struct file *file = vma->vm_file;
1538 struct pmem_data *data = file->private_data;
1539 int id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001540
1541#if PMEM_DEBUG_MSGS
1542 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
1543#endif
1544 DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
1545 get_name(file), id, current->pid,
1546 get_task_comm(currtask_name, current),
1547 current->parent->pid, file, file_count(file));
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001548 /* this should never be called as we don't support copying pmem
1549 * ranges via fork */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001550 down_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001551 BUG_ON(!has_allocation(file));
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001552 /* remap the garbage pages, forkers don't get access to the data */
1553 pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001554 up_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001555}
1556
1557static void pmem_vma_close(struct vm_area_struct *vma)
1558{
1559 struct file *file = vma->vm_file;
1560 struct pmem_data *data = file->private_data;
1561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001562#if PMEM_DEBUG_MSGS
1563 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
1564#endif
1565 DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
1566 get_name(file), get_id(file), current->pid,
1567 get_task_comm(currtask_name, current),
1568 current->parent->pid, file, file_count(file));
1569
1570 if (unlikely(!is_pmem_file(file))) {
1571 pr_warning("pmem: something is very wrong, you are "
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001572 "closing a vm backing an allocation that doesn't "
1573 "exist!\n");
1574 return;
1575 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001576
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001577 down_write(&data->sem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 if (unlikely(!has_allocation(file))) {
1579 up_write(&data->sem);
1580 pr_warning("pmem: something is very wrong, you are "
1581 "closing a vm backing an allocation that doesn't "
1582 "exist!\n");
1583 return;
1584 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001585 if (data->vma == vma) {
1586 data->vma = NULL;
1587 if ((data->flags & PMEM_FLAGS_CONNECTED) &&
1588 (data->flags & PMEM_FLAGS_SUBMAP))
1589 data->flags |= PMEM_FLAGS_UNSUBMAP;
1590 }
1591 /* the kernel is going to free this vma now anyway */
1592 up_write(&data->sem);
1593}
1594
1595static struct vm_operations_struct vm_ops = {
1596 .open = pmem_vma_open,
1597 .close = pmem_vma_close,
1598};
1599
1600static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
1601{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001602 struct pmem_data *data = file->private_data;
Laura Abbott1e36a022011-06-22 17:08:13 -07001603 int index = -1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001604 unsigned long vma_size = vma->vm_end - vma->vm_start;
1605 int ret = 0, id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001606#if PMEM_DEBUG_MSGS
1607 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
1608#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001609
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001610 if (!data) {
1611 pr_err("pmem: Invalid file descriptor, no private data\n");
1612 return -EINVAL;
1613 }
1614 DLOG("pid %u(%s) mmap vma_size %lu on dev %s(id: %d)\n", current->pid,
1615 get_task_comm(currtask_name, current), vma_size,
1616 get_name(file), id);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001617 if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
1618#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001619 pr_err("pmem: mmaps must be at offset zero, aligned"
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001620 " and a multiple of pages_size.\n");
1621#endif
1622 return -EINVAL;
1623 }
1624
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001625 down_write(&data->sem);
1626 /* check this file isn't already mmaped, for submaps check this file
1627 * has never been mmaped */
1628 if ((data->flags & PMEM_FLAGS_SUBMAP) ||
1629 (data->flags & PMEM_FLAGS_UNSUBMAP)) {
1630#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001631 pr_err("pmem: you can only mmap a pmem file once, "
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001632 "this file is already mmaped. %x\n", data->flags);
1633#endif
1634 ret = -EINVAL;
1635 goto error;
1636 }
1637 /* if file->private_data == unalloced, alloc*/
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638 if (data->index == -1) {
1639 mutex_lock(&pmem[id].arena_mutex);
Laura Abbott1e36a022011-06-22 17:08:13 -07001640 index = pmem_allocate_from_id(id,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641 vma->vm_end - vma->vm_start,
1642 SZ_4K);
1643 mutex_unlock(&pmem[id].arena_mutex);
1644 /* either no space was available or an error occured */
1645 if (index == -1) {
1646 pr_err("pmem: mmap unable to allocate memory"
1647 "on %s\n", get_name(file));
1648 ret = -ENOMEM;
1649 goto error;
1650 }
1651 /* store the index of a successful allocation */
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001652 data->index = index;
1653 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001654
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001655 if (pmem[id].len(id, data) < vma_size) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001656#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 pr_err("pmem: mmap size [%lu] does not match"
1658 " size of backing region [%lu].\n", vma_size,
1659 pmem[id].len(id, data));
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001660#endif
1661 ret = -EINVAL;
1662 goto error;
1663 }
1664
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665 vma->vm_pgoff = pmem[id].start_addr(id, data) >> PAGE_SHIFT;
1666
1667 vma->vm_page_prot = pmem_phys_mem_access_prot(file, vma->vm_page_prot);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001668
1669 if (data->flags & PMEM_FLAGS_CONNECTED) {
1670 struct pmem_region_node *region_node;
1671 struct list_head *elt;
1672 if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 pr_alert("pmem: mmap failed in kernel!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001674 ret = -EAGAIN;
1675 goto error;
1676 }
1677 list_for_each(elt, &data->region_list) {
1678 region_node = list_entry(elt, struct pmem_region_node,
1679 list);
1680 DLOG("remapping file: %p %lx %lx\n", file,
1681 region_node->region.offset,
1682 region_node->region.len);
1683 if (pmem_remap_pfn_range(id, vma, data,
1684 region_node->region.offset,
1685 region_node->region.len)) {
1686 ret = -EAGAIN;
1687 goto error;
1688 }
1689 }
1690 data->flags |= PMEM_FLAGS_SUBMAP;
1691 get_task_struct(current->group_leader);
1692 data->task = current->group_leader;
1693 data->vma = vma;
1694#if PMEM_DEBUG
1695 data->pid = current->pid;
1696#endif
1697 DLOG("submmapped file %p vma %p pid %u\n", file, vma,
1698 current->pid);
1699 } else {
1700 if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001701 pr_err("pmem: mmap failed in kernel!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001702 ret = -EAGAIN;
1703 goto error;
1704 }
1705 data->flags |= PMEM_FLAGS_MASTERMAP;
1706 data->pid = current->pid;
1707 }
1708 vma->vm_ops = &vm_ops;
1709error:
1710 up_write(&data->sem);
1711 return ret;
1712}
1713
1714/* the following are the api for accessing pmem regions by other drivers
1715 * from inside the kernel */
1716int get_pmem_user_addr(struct file *file, unsigned long *start,
1717 unsigned long *len)
1718{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001719 int ret = -1;
1720
1721 if (is_pmem_file(file)) {
1722 struct pmem_data *data = file->private_data;
1723
1724 down_read(&data->sem);
1725 if (has_allocation(file)) {
1726 if (data->vma) {
1727 *start = data->vma->vm_start;
1728 *len = data->vma->vm_end - data->vma->vm_start;
1729 } else {
1730 *start = *len = 0;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001731#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732 pr_err("pmem: %s: no vma present.\n",
1733 __func__);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001734#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001735 }
1736 ret = 0;
1737 }
1738 up_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001739 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001740
1741#if PMEM_DEBUG
1742 if (ret)
1743 pr_err("pmem: %s: requested pmem data from invalid"
1744 "file.\n", __func__);
1745#endif
1746 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001747}
1748
1749int get_pmem_addr(struct file *file, unsigned long *start,
1750 unsigned long *vstart, unsigned long *len)
1751{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001752 int ret = -1;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001754 if (is_pmem_file(file)) {
1755 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001756
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001757 down_read(&data->sem);
1758 if (has_allocation(file)) {
1759 int id = get_id(file);
1760
1761 *start = pmem[id].start_addr(id, data);
1762 *len = pmem[id].len(id, data);
1763 *vstart = (unsigned long)
1764 pmem_start_vaddr(id, data);
1765 up_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001766#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001767 down_write(&data->sem);
1768 data->ref++;
1769 up_write(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001770#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 DLOG("returning start %#lx len %lu "
1772 "vstart %#lx\n",
1773 *start, *len, *vstart);
1774 ret = 0;
1775 } else {
1776 up_read(&data->sem);
1777 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001778 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001779 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001780}
1781
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001782int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001783 unsigned long *len, struct file **filp)
1784{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785 int ret = -1;
1786 struct file *file = fget(fd);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001787
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001788 if (unlikely(file == NULL)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 pr_err("pmem: %s: requested data from file "
1790 "descriptor that doesn't exist.\n", __func__);
1791 } else {
1792#if PMEM_DEBUG_MSGS
1793 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
1794#endif
1795 DLOG("filp %p rdev %d pid %u(%s) file %p(%ld)"
1796 " dev %s(id: %d)\n", filp,
1797 file->f_dentry->d_inode->i_rdev,
1798 current->pid, get_task_comm(currtask_name, current),
1799 file, file_count(file), get_name(file), get_id(file));
1800
1801 if (!get_pmem_addr(file, start, vstart, len)) {
1802 if (filp)
1803 *filp = file;
1804 ret = 0;
1805 } else {
1806 fput(file);
1807 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001808 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001809 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001810}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001811EXPORT_SYMBOL(get_pmem_file);
1812
1813int get_pmem_fd(int fd, unsigned long *start, unsigned long *len)
1814{
1815 unsigned long vstart;
1816 return get_pmem_file(fd, start, &vstart, len, NULL);
1817}
1818EXPORT_SYMBOL(get_pmem_fd);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001819
1820void put_pmem_file(struct file *file)
1821{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001822#if PMEM_DEBUG_MSGS
1823 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001824#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001825 DLOG("rdev %d pid %u(%s) file %p(%ld)" " dev %s(id: %d)\n",
1826 file->f_dentry->d_inode->i_rdev, current->pid,
1827 get_task_comm(currtask_name, current), file,
1828 file_count(file), get_name(file), get_id(file));
1829 if (is_pmem_file(file)) {
1830#if PMEM_DEBUG
1831 struct pmem_data *data = file->private_data;
1832
1833 down_write(&data->sem);
1834 if (!data->ref--) {
1835 data->ref++;
1836 pr_alert("pmem: pmem_put > pmem_get %s "
1837 "(pid %d)\n",
1838 pmem[get_id(file)].dev.name, data->pid);
1839 BUG();
1840 }
1841 up_write(&data->sem);
1842#endif
1843 fput(file);
1844 }
1845}
1846EXPORT_SYMBOL(put_pmem_file);
1847
1848void put_pmem_fd(int fd)
1849{
1850 int put_needed;
1851 struct file *file = fget_light(fd, &put_needed);
1852
1853 if (file) {
1854 put_pmem_file(file);
1855 fput_light(file, put_needed);
1856 }
1857}
1858
1859void flush_pmem_fd(int fd, unsigned long offset, unsigned long len)
1860{
1861 int fput_needed;
1862 struct file *file = fget_light(fd, &fput_needed);
1863
1864 if (file) {
1865 flush_pmem_file(file, offset, len);
1866 fput_light(file, fput_needed);
1867 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001868}
1869
1870void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
1871{
1872 struct pmem_data *data;
1873 int id;
1874 void *vaddr;
1875 struct pmem_region_node *region_node;
1876 struct list_head *elt;
1877 void *flush_start, *flush_end;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878#ifdef CONFIG_OUTER_CACHE
1879 unsigned long phy_start, phy_end;
1880#endif
1881 if (!is_pmem_file(file))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001882 return;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001883
1884 id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001885 if (!pmem[id].cached)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001886 return;
1887
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888 /* is_pmem_file fails if !file */
1889 data = file->private_data;
1890
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001891 down_read(&data->sem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001892 if (!has_allocation(file))
1893 goto end;
1894
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001895 vaddr = pmem_start_vaddr(id, data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896
1897 if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) {
1898 dmac_flush_range(vaddr,
1899 (void *)((unsigned long)vaddr +
1900 ((struct alloc_list *)(data->index))->size));
1901#ifdef CONFIG_OUTER_CACHE
1902 phy_start = pmem_start_addr_system(id, data);
1903
1904 phy_end = phy_start +
1905 ((struct alloc_list *)(data->index))->size;
1906
1907 outer_flush_range(phy_start, phy_end);
1908#endif
1909 goto end;
1910 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001911 /* if this isn't a submmapped file, flush the whole thing */
1912 if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001913 dmac_flush_range(vaddr, vaddr + pmem[id].len(id, data));
1914#ifdef CONFIG_OUTER_CACHE
1915 phy_start = (unsigned long)vaddr -
1916 (unsigned long)pmem[id].vbase + pmem[id].base;
1917
1918 phy_end = phy_start + pmem[id].len(id, data);
1919
1920 outer_flush_range(phy_start, phy_end);
1921#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001922 goto end;
1923 }
1924 /* otherwise, flush the region of the file we are drawing */
1925 list_for_each(elt, &data->region_list) {
1926 region_node = list_entry(elt, struct pmem_region_node, list);
1927 if ((offset >= region_node->region.offset) &&
1928 ((offset + len) <= (region_node->region.offset +
1929 region_node->region.len))) {
1930 flush_start = vaddr + region_node->region.offset;
1931 flush_end = flush_start + region_node->region.len;
1932 dmac_flush_range(flush_start, flush_end);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001933#ifdef CONFIG_OUTER_CACHE
1934
1935 phy_start = (unsigned long)flush_start -
1936 (unsigned long)pmem[id].vbase + pmem[id].base;
1937
1938 phy_end = phy_start + region_node->region.len;
1939
1940 outer_flush_range(phy_start, phy_end);
1941#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07001942 break;
1943 }
1944 }
1945end:
1946 up_read(&data->sem);
1947}
1948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949int pmem_cache_maint(struct file *file, unsigned int cmd,
1950 struct pmem_addr *pmem_addr)
1951{
1952 struct pmem_data *data;
1953 int id;
1954 unsigned long vaddr, paddr, length, offset,
1955 pmem_len, pmem_start_addr;
1956
1957 /* Called from kernel-space so file may be NULL */
1958 if (!file)
1959 return -EBADF;
1960
1961 data = file->private_data;
1962 id = get_id(file);
1963
1964 if (!pmem[id].cached)
1965 return 0;
1966
1967 offset = pmem_addr->offset;
1968 length = pmem_addr->length;
1969
1970 down_read(&data->sem);
1971 if (!has_allocation(file)) {
1972 up_read(&data->sem);
1973 return -EINVAL;
1974 }
1975 pmem_len = pmem[id].len(id, data);
1976 pmem_start_addr = pmem[id].start_addr(id, data);
1977 up_read(&data->sem);
1978
1979 if (offset + length > pmem_len)
1980 return -EINVAL;
1981
1982 vaddr = pmem_addr->vaddr;
1983 paddr = pmem_start_addr + offset;
1984
1985 DLOG("pmem cache maint on dev %s(id: %d)"
1986 "(vaddr %lx paddr %lx len %lu bytes)\n",
1987 get_name(file), id, vaddr, paddr, length);
1988 if (cmd == PMEM_CLEAN_INV_CACHES)
1989 clean_and_invalidate_caches(vaddr,
1990 length, paddr);
1991 else if (cmd == PMEM_CLEAN_CACHES)
1992 clean_caches(vaddr, length, paddr);
1993 else if (cmd == PMEM_INV_CACHES)
1994 invalidate_caches(vaddr, length, paddr);
1995
1996 return 0;
1997}
1998EXPORT_SYMBOL(pmem_cache_maint);
1999
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002000static int pmem_connect(unsigned long connect, struct file *file)
2001{
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002002 int ret = 0, put_needed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002003 struct file *src_file;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002004
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002005 if (!file) {
2006 pr_err("pmem: %s: NULL file pointer passed in, "
2007 "bailing out!\n", __func__);
2008 ret = -EINVAL;
2009 goto leave;
2010 }
2011
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002012 src_file = fget_light(connect, &put_needed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002013
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002014 if (!src_file) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015 pr_err("pmem: %s: src file not found!\n", __func__);
2016 ret = -EBADF;
2017 goto leave;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002018 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002019
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002020 if (src_file == file) { /* degenerative case, operator error */
2021 pr_err("pmem: %s: src_file and passed in file are "
2022 "the same; refusing to connect to self!\n", __func__);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002023 ret = -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024 goto put_src_file;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002025 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002027 if (unlikely(!is_pmem_file(src_file))) {
2028 pr_err("pmem: %s: src file is not a pmem file!\n",
2029 __func__);
2030 ret = -EINVAL;
2031 goto put_src_file;
2032 } else {
2033 struct pmem_data *src_data = src_file->private_data;
2034
2035 if (!src_data) {
2036 pr_err("pmem: %s: src file pointer has no"
2037 "private data, bailing out!\n", __func__);
2038 ret = -EINVAL;
2039 goto put_src_file;
2040 }
2041
2042 down_read(&src_data->sem);
2043
2044 if (unlikely(!has_allocation(src_file))) {
2045 up_read(&src_data->sem);
2046 pr_err("pmem: %s: src file has no allocation!\n",
2047 __func__);
2048 ret = -EINVAL;
2049 } else {
2050 struct pmem_data *data;
2051 int src_index = src_data->index;
2052
2053 up_read(&src_data->sem);
2054
2055 data = file->private_data;
2056 if (!data) {
2057 pr_err("pmem: %s: passed in file "
2058 "pointer has no private data, bailing"
2059 " out!\n", __func__);
2060 ret = -EINVAL;
2061 goto put_src_file;
2062 }
2063
2064 down_write(&data->sem);
2065 if (has_allocation(file) &&
2066 (data->index != src_index)) {
2067 up_write(&data->sem);
2068
2069 pr_err("pmem: %s: file is already "
2070 "mapped but doesn't match this "
2071 "src_file!\n", __func__);
2072 ret = -EINVAL;
2073 } else {
2074 data->index = src_index;
2075 data->flags |= PMEM_FLAGS_CONNECTED;
2076 data->master_fd = connect;
2077 data->master_file = src_file;
2078
2079 up_write(&data->sem);
2080
2081 DLOG("connect %p to %p\n", file, src_file);
2082 }
2083 }
2084 }
2085put_src_file:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002086 fput_light(src_file, put_needed);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002087leave:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002088 return ret;
2089}
2090
2091static void pmem_unlock_data_and_mm(struct pmem_data *data,
2092 struct mm_struct *mm)
2093{
2094 up_write(&data->sem);
2095 if (mm != NULL) {
2096 up_write(&mm->mmap_sem);
2097 mmput(mm);
2098 }
2099}
2100
2101static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
2102 struct mm_struct **locked_mm)
2103{
2104 int ret = 0;
2105 struct mm_struct *mm = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002106#if PMEM_DEBUG_MSGS
2107 char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
2108#endif
2109 DLOG("pid %u(%s) file %p(%ld)\n",
2110 current->pid, get_task_comm(currtask_name, current),
2111 file, file_count(file));
2112
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002113 *locked_mm = NULL;
2114lock_mm:
2115 down_read(&data->sem);
2116 if (PMEM_IS_SUBMAP(data)) {
2117 mm = get_task_mm(data->task);
2118 if (!mm) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002119 up_read(&data->sem);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002120#if PMEM_DEBUG
2121 pr_alert("pmem: can't remap - task is gone!\n");
2122#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002123 return -1;
2124 }
2125 }
2126 up_read(&data->sem);
2127
2128 if (mm)
2129 down_write(&mm->mmap_sem);
2130
2131 down_write(&data->sem);
2132 /* check that the file didn't get mmaped before we could take the
2133 * data sem, this should be safe b/c you can only submap each file
2134 * once */
2135 if (PMEM_IS_SUBMAP(data) && !mm) {
2136 pmem_unlock_data_and_mm(data, mm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002137 DLOG("mapping contention, repeating mmap op\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002138 goto lock_mm;
2139 }
2140 /* now check that vma.mm is still there, it could have been
2141 * deleted by vma_close before we could get the data->sem */
2142 if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
2143 /* might as well release this */
2144 if (data->flags & PMEM_FLAGS_SUBMAP) {
2145 put_task_struct(data->task);
2146 data->task = NULL;
2147 /* lower the submap flag to show the mm is gone */
2148 data->flags &= ~(PMEM_FLAGS_SUBMAP);
2149 }
2150 pmem_unlock_data_and_mm(data, mm);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002151#if PMEM_DEBUG
2152 pr_alert("pmem: vma.mm went away!\n");
2153#endif
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002154 return -1;
2155 }
2156 *locked_mm = mm;
2157 return ret;
2158}
2159
2160int pmem_remap(struct pmem_region *region, struct file *file,
2161 unsigned operation)
2162{
2163 int ret;
2164 struct pmem_region_node *region_node;
2165 struct mm_struct *mm = NULL;
2166 struct list_head *elt, *elt2;
2167 int id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002168 struct pmem_data *data;
2169
2170 DLOG("operation %#x, region offset %ld, region len %ld\n",
2171 operation, region->offset, region->len);
2172
2173 if (!is_pmem_file(file)) {
2174#if PMEM_DEBUG
2175 pr_err("pmem: remap request for non-pmem file descriptor\n");
2176#endif
2177 return -EINVAL;
2178 }
2179
2180 /* is_pmem_file fails if !file */
2181 data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002182
2183 /* pmem region must be aligned on a page boundry */
2184 if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
2185 !PMEM_IS_PAGE_ALIGNED(region->len))) {
2186#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002187 pr_err("pmem: request for unaligned pmem"
2188 "suballocation %lx %lx\n",
2189 region->offset, region->len);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002190#endif
2191 return -EINVAL;
2192 }
2193
2194 /* if userspace requests a region of len 0, there's nothing to do */
2195 if (region->len == 0)
2196 return 0;
2197
2198 /* lock the mm and data */
2199 ret = pmem_lock_data_and_mm(file, data, &mm);
2200 if (ret)
2201 return 0;
2202
2203 /* only the owner of the master file can remap the client fds
2204 * that back in it */
2205 if (!is_master_owner(file)) {
2206#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002207 pr_err("pmem: remap requested from non-master process\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002208#endif
2209 ret = -EINVAL;
2210 goto err;
2211 }
2212
2213 /* check that the requested range is within the src allocation */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002214 if (unlikely((region->offset > pmem[id].len(id, data)) ||
2215 (region->len > pmem[id].len(id, data)) ||
2216 (region->offset + region->len > pmem[id].len(id, data)))) {
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002217#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002218 pr_err("pmem: suballoc doesn't fit in src_file!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002219#endif
2220 ret = -EINVAL;
2221 goto err;
2222 }
2223
2224 if (operation == PMEM_MAP) {
2225 region_node = kmalloc(sizeof(struct pmem_region_node),
2226 GFP_KERNEL);
2227 if (!region_node) {
2228 ret = -ENOMEM;
2229#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002230 pr_alert("pmem: No space to allocate remap metadata!");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002231#endif
2232 goto err;
2233 }
2234 region_node->region = *region;
2235 list_add(&region_node->list, &data->region_list);
2236 } else if (operation == PMEM_UNMAP) {
2237 int found = 0;
2238 list_for_each_safe(elt, elt2, &data->region_list) {
2239 region_node = list_entry(elt, struct pmem_region_node,
2240 list);
2241 if (region->len == 0 ||
2242 (region_node->region.offset == region->offset &&
2243 region_node->region.len == region->len)) {
2244 list_del(elt);
2245 kfree(region_node);
2246 found = 1;
2247 }
2248 }
2249 if (!found) {
2250#if PMEM_DEBUG
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002251 pr_err("pmem: Unmap region does not map any"
2252 " mapped region!");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002253#endif
2254 ret = -EINVAL;
2255 goto err;
2256 }
2257 }
2258
2259 if (data->vma && PMEM_IS_SUBMAP(data)) {
2260 if (operation == PMEM_MAP)
2261 ret = pmem_remap_pfn_range(id, data->vma, data,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002262 region->offset, region->len);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002263 else if (operation == PMEM_UNMAP)
2264 ret = pmem_unmap_pfn_range(id, data->vma, data,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002265 region->offset, region->len);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002266 }
2267
2268err:
2269 pmem_unlock_data_and_mm(data, mm);
2270 return ret;
2271}
2272
2273static void pmem_revoke(struct file *file, struct pmem_data *data)
2274{
2275 struct pmem_region_node *region_node;
2276 struct list_head *elt, *elt2;
2277 struct mm_struct *mm = NULL;
2278 int id = get_id(file);
2279 int ret = 0;
2280
2281 data->master_file = NULL;
2282 ret = pmem_lock_data_and_mm(file, data, &mm);
2283 /* if lock_data_and_mm fails either the task that mapped the fd, or
2284 * the vma that mapped it have already gone away, nothing more
2285 * needs to be done */
2286 if (ret)
2287 return;
2288 /* unmap everything */
2289 /* delete the regions and region list nothing is mapped any more */
2290 if (data->vma)
2291 list_for_each_safe(elt, elt2, &data->region_list) {
2292 region_node = list_entry(elt, struct pmem_region_node,
2293 list);
2294 pmem_unmap_pfn_range(id, data->vma, data,
2295 region_node->region.offset,
2296 region_node->region.len);
2297 list_del(elt);
2298 kfree(region_node);
2299 }
2300 /* delete the master file */
2301 pmem_unlock_data_and_mm(data, mm);
2302}
2303
2304static void pmem_get_size(struct pmem_region *region, struct file *file)
2305{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002306 /* called via ioctl file op, so file guaranteed to be not NULL */
2307 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002308 int id = get_id(file);
2309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002310 down_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002311 if (!has_allocation(file)) {
2312 region->offset = 0;
2313 region->len = 0;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002314 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002315 region->offset = pmem[id].start_addr(id, data);
2316 region->len = pmem[id].len(id, data);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002317 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002318 up_read(&data->sem);
2319 DLOG("offset 0x%lx len 0x%lx\n", region->offset, region->len);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002320}
2321
2322
2323static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2324{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002325 /* called from user space as file op, so file guaranteed to be not
2326 * NULL
2327 */
2328 struct pmem_data *data = file->private_data;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002329 int id = get_id(file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002330#if PMEM_DEBUG_MSGS
2331 char currtask_name[
2332 FIELD_SIZEOF(struct task_struct, comm) + 1];
2333#endif
2334
2335 DLOG("pid %u(%s) file %p(%ld) cmd %#x, dev %s(id: %d)\n",
2336 current->pid, get_task_comm(currtask_name, current),
2337 file, file_count(file), cmd, get_name(file), id);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002338
2339 switch (cmd) {
2340 case PMEM_GET_PHYS:
2341 {
2342 struct pmem_region region;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002343
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002344 DLOG("get_phys\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002345 down_read(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002346 if (!has_allocation(file)) {
2347 region.offset = 0;
2348 region.len = 0;
2349 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002350 region.offset = pmem[id].start_addr(id, data);
2351 region.len = pmem[id].len(id, data);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002352 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002353 up_read(&data->sem);
2354
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002355 if (copy_to_user((void __user *)arg, &region,
2356 sizeof(struct pmem_region)))
2357 return -EFAULT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002358
2359 DLOG("pmem: successful request for "
2360 "physical address of pmem region id %d, "
2361 "offset 0x%lx, len 0x%lx\n",
2362 id, region.offset, region.len);
2363
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002364 break;
2365 }
2366 case PMEM_MAP:
2367 {
2368 struct pmem_region region;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002369 DLOG("map\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002370 if (copy_from_user(&region, (void __user *)arg,
2371 sizeof(struct pmem_region)))
2372 return -EFAULT;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002373 return pmem_remap(&region, file, PMEM_MAP);
2374 }
2375 break;
2376 case PMEM_UNMAP:
2377 {
2378 struct pmem_region region;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002379 DLOG("unmap\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002380 if (copy_from_user(&region, (void __user *)arg,
2381 sizeof(struct pmem_region)))
2382 return -EFAULT;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002383 return pmem_remap(&region, file, PMEM_UNMAP);
2384 break;
2385 }
2386 case PMEM_GET_SIZE:
2387 {
2388 struct pmem_region region;
2389 DLOG("get_size\n");
2390 pmem_get_size(&region, file);
2391 if (copy_to_user((void __user *)arg, &region,
2392 sizeof(struct pmem_region)))
2393 return -EFAULT;
2394 break;
2395 }
2396 case PMEM_GET_TOTAL_SIZE:
2397 {
2398 struct pmem_region region;
2399 DLOG("get total size\n");
2400 region.offset = 0;
2401 get_id(file);
2402 region.len = pmem[id].size;
2403 if (copy_to_user((void __user *)arg, &region,
2404 sizeof(struct pmem_region)))
2405 return -EFAULT;
2406 break;
2407 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002408 case PMEM_GET_FREE_SPACE:
2409 {
2410 struct pmem_freespace fs;
2411 DLOG("get freespace on %s(id: %d)\n",
2412 get_name(file), id);
2413
2414 mutex_lock(&pmem[id].arena_mutex);
2415 pmem[id].free_space(id, &fs);
2416 mutex_unlock(&pmem[id].arena_mutex);
2417
2418 DLOG("%s(id: %d) total free %lu, largest %lu\n",
2419 get_name(file), id, fs.total, fs.largest);
2420
2421 if (copy_to_user((void __user *)arg, &fs,
2422 sizeof(struct pmem_freespace)))
2423 return -EFAULT;
2424 break;
2425 }
2426
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002427 case PMEM_ALLOCATE:
2428 {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002429 int ret = 0;
2430 DLOG("allocate, id %d\n", id);
2431 down_write(&data->sem);
2432 if (has_allocation(file)) {
2433 pr_err("pmem: Existing allocation found on "
2434 "this file descrpitor\n");
2435 up_write(&data->sem);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002436 return -EINVAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002437 }
2438
2439 mutex_lock(&pmem[id].arena_mutex);
Laura Abbott1e36a022011-06-22 17:08:13 -07002440 data->index = pmem_allocate_from_id(id,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002441 arg,
2442 SZ_4K);
2443 mutex_unlock(&pmem[id].arena_mutex);
2444 ret = data->index == -1 ? -ENOMEM :
2445 data->index;
2446 up_write(&data->sem);
2447 return ret;
2448 }
2449 case PMEM_ALLOCATE_ALIGNED:
2450 {
2451 struct pmem_allocation alloc;
2452 int ret = 0;
2453
2454 if (copy_from_user(&alloc, (void __user *)arg,
2455 sizeof(struct pmem_allocation)))
2456 return -EFAULT;
2457 DLOG("allocate id align %d %u\n", id, alloc.align);
2458 down_write(&data->sem);
2459 if (has_allocation(file)) {
2460 pr_err("pmem: Existing allocation found on "
2461 "this file descrpitor\n");
2462 up_write(&data->sem);
2463 return -EINVAL;
2464 }
2465
2466 if (alloc.align & (alloc.align - 1)) {
2467 pr_err("pmem: Alignment is not a power of 2\n");
2468 return -EINVAL;
2469 }
2470
2471 if (alloc.align != SZ_4K &&
2472 (pmem[id].allocator_type !=
2473 PMEM_ALLOCATORTYPE_BITMAP)) {
2474 pr_err("pmem: Non 4k alignment requires bitmap"
2475 " allocator on %s\n", pmem[id].name);
2476 return -EINVAL;
2477 }
2478
2479 if (alloc.align > SZ_1M ||
2480 alloc.align < SZ_4K) {
2481 pr_err("pmem: Invalid Alignment (%u) "
2482 "specified\n", alloc.align);
2483 return -EINVAL;
2484 }
2485
2486 mutex_lock(&pmem[id].arena_mutex);
Laura Abbott1e36a022011-06-22 17:08:13 -07002487 data->index = pmem_allocate_from_id(id,
2488 alloc.size,
2489 alloc.align);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002490 mutex_unlock(&pmem[id].arena_mutex);
2491 ret = data->index == -1 ? -ENOMEM :
2492 data->index;
2493 up_write(&data->sem);
2494 return ret;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002495 }
2496 case PMEM_CONNECT:
2497 DLOG("connect\n");
2498 return pmem_connect(arg, file);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002499 case PMEM_CLEAN_INV_CACHES:
2500 case PMEM_CLEAN_CACHES:
2501 case PMEM_INV_CACHES:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002502 {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002503 struct pmem_addr pmem_addr;
2504
2505 if (copy_from_user(&pmem_addr, (void __user *)arg,
2506 sizeof(struct pmem_addr)))
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002507 return -EFAULT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002508
2509 return pmem_cache_maint(file, cmd, &pmem_addr);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002510 }
2511 default:
2512 if (pmem[id].ioctl)
2513 return pmem[id].ioctl(file, cmd, arg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002514
2515 DLOG("ioctl invalid (%#x)\n", cmd);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002516 return -EINVAL;
2517 }
2518 return 0;
2519}
2520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002521static void ioremap_pmem(int id)
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002522{
Laura Abbott1e36a022011-06-22 17:08:13 -07002523 DLOG("PMEMDEBUG: ioremaping for %s\n", pmem[id].name);
2524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002525 if (pmem[id].cached)
2526 pmem[id].vbase = ioremap_cached(pmem[id].base, pmem[id].size);
2527#ifdef ioremap_ext_buffered
2528 else if (pmem[id].buffered)
2529 pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
2530 pmem[id].size);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002531#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002532 else
2533 pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
2534}
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002535
2536int pmem_setup(struct android_pmem_platform_data *pdata,
2537 long (*ioctl)(struct file *, unsigned int, unsigned long),
2538 int (*release)(struct inode *, struct file *))
2539{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002540 int i, index = 0, id;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002541
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002542 if (id_count >= PMEM_MAX_DEVICES) {
2543 pr_alert("pmem: %s: unable to register driver(%s) - no more "
2544 "devices available!\n", __func__, pdata->name);
2545 goto err_no_mem;
2546 }
2547
2548 if (!pdata->size) {
2549 pr_alert("pmem: %s: unable to register pmem driver(%s) - zero "
2550 "size passed in!\n", __func__, pdata->name);
2551 goto err_no_mem;
2552 }
2553
2554 id = id_count++;
2555
2556 pmem[id].id = id;
2557
2558 if (pmem[id].allocate) {
2559 pr_alert("pmem: %s: unable to register pmem driver - "
2560 "duplicate registration of %s!\n",
2561 __func__, pdata->name);
2562 goto err_no_mem;
2563 }
2564
2565 pmem[id].allocator_type = pdata->allocator_type;
2566
2567 /* 'quantum' is a "hidden" variable that defaults to 0 in the board
2568 * files */
2569 pmem[id].quantum = pdata->quantum ?: PMEM_MIN_ALLOC;
2570 if (pmem[id].quantum < PMEM_MIN_ALLOC ||
2571 !is_power_of_2(pmem[id].quantum)) {
2572 pr_alert("pmem: %s: unable to register pmem driver %s - "
2573 "invalid quantum value (%#x)!\n",
2574 __func__, pdata->name, pmem[id].quantum);
2575 goto err_reset_pmem_info;
2576 }
2577
2578 if (pdata->size % pmem[id].quantum) {
2579 /* bad alignment for size! */
2580 pr_alert("pmem: %s: Unable to register driver %s - "
2581 "memory region size (%#lx) is not a multiple of "
2582 "quantum size(%#x)!\n", __func__, pdata->name,
2583 pdata->size, pmem[id].quantum);
2584 goto err_reset_pmem_info;
2585 }
2586
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002587 pmem[id].cached = pdata->cached;
2588 pmem[id].buffered = pdata->buffered;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002589 pmem[id].size = pdata->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002590 pmem[id].memory_type = pdata->memory_type;
2591 strlcpy(pmem[id].name, pdata->name, PMEM_NAME_SIZE);
2592
2593 pmem[id].num_entries = pmem[id].size / pmem[id].quantum;
2594
2595 memset(&pmem[id].kobj, 0, sizeof(pmem[0].kobj));
2596 pmem[id].kobj.kset = pmem_kset;
2597
2598 switch (pmem[id].allocator_type) {
2599 case PMEM_ALLOCATORTYPE_ALLORNOTHING:
2600 pmem[id].allocate = pmem_allocator_all_or_nothing;
2601 pmem[id].free = pmem_free_all_or_nothing;
2602 pmem[id].free_space = pmem_free_space_all_or_nothing;
2603 pmem[id].len = pmem_len_all_or_nothing;
2604 pmem[id].start_addr = pmem_start_addr_all_or_nothing;
2605 pmem[id].num_entries = 1;
2606 pmem[id].quantum = pmem[id].size;
2607 pmem[id].allocator.all_or_nothing.allocated = 0;
2608
2609 if (kobject_init_and_add(&pmem[id].kobj,
2610 &pmem_allornothing_ktype, NULL,
2611 "%s", pdata->name))
2612 goto out_put_kobj;
2613
2614 break;
2615
2616 case PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
2617 pmem[id].allocator.buddy_bestfit.buddy_bitmap = kmalloc(
2618 pmem[id].num_entries * sizeof(struct pmem_bits),
2619 GFP_KERNEL);
2620 if (!pmem[id].allocator.buddy_bestfit.buddy_bitmap)
2621 goto err_reset_pmem_info;
2622
2623 memset(pmem[id].allocator.buddy_bestfit.buddy_bitmap, 0,
2624 sizeof(struct pmem_bits) * pmem[id].num_entries);
2625
2626 for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--)
2627 if ((pmem[id].num_entries) & 1<<i) {
2628 PMEM_BUDDY_ORDER(id, index) = i;
2629 index = PMEM_BUDDY_NEXT_INDEX(id, index);
2630 }
2631 pmem[id].allocate = pmem_allocator_buddy_bestfit;
2632 pmem[id].free = pmem_free_buddy_bestfit;
2633 pmem[id].free_space = pmem_free_space_buddy_bestfit;
2634 pmem[id].len = pmem_len_buddy_bestfit;
2635 pmem[id].start_addr = pmem_start_addr_buddy_bestfit;
2636 if (kobject_init_and_add(&pmem[id].kobj,
2637 &pmem_buddy_bestfit_ktype, NULL,
2638 "%s", pdata->name))
2639 goto out_put_kobj;
2640
2641 break;
2642
2643 case PMEM_ALLOCATORTYPE_BITMAP: /* 0, default if not explicit */
2644 pmem[id].allocator.bitmap.bitm_alloc = kmalloc(
2645 PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS *
2646 sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
2647 GFP_KERNEL);
2648 if (!pmem[id].allocator.bitmap.bitm_alloc) {
2649 pr_alert("pmem: %s: Unable to register pmem "
2650 "driver %s - can't allocate "
2651 "bitm_alloc!\n",
2652 __func__, pdata->name);
2653 goto err_reset_pmem_info;
2654 }
2655
2656 if (kobject_init_and_add(&pmem[id].kobj,
2657 &pmem_bitmap_ktype, NULL,
2658 "%s", pdata->name))
2659 goto out_put_kobj;
2660
2661 for (i = 0; i < PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS; i++) {
2662 pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
2663 pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
2664 }
2665
2666 pmem[id].allocator.bitmap.bitmap_allocs =
2667 PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS;
2668
2669 pmem[id].allocator.bitmap.bitmap =
2670 kcalloc((pmem[id].num_entries + 31) / 32,
2671 sizeof(unsigned int), GFP_KERNEL);
2672 if (!pmem[id].allocator.bitmap.bitmap) {
2673 pr_alert("pmem: %s: Unable to register pmem "
2674 "driver - can't allocate bitmap!\n",
2675 __func__);
2676 goto err_cant_register_device;
2677 }
2678 pmem[id].allocator.bitmap.bitmap_free = pmem[id].num_entries;
2679
2680 pmem[id].allocate = pmem_allocator_bitmap;
2681 pmem[id].free = pmem_free_bitmap;
2682 pmem[id].free_space = pmem_free_space_bitmap;
2683 pmem[id].len = pmem_len_bitmap;
2684 pmem[id].start_addr = pmem_start_addr_bitmap;
2685
2686 DLOG("bitmap allocator id %d (%s), num_entries %u, raw size "
2687 "%lu, quanta size %u\n",
2688 id, pdata->name, pmem[id].allocator.bitmap.bitmap_free,
2689 pmem[id].size, pmem[id].quantum);
2690 break;
2691
2692 case PMEM_ALLOCATORTYPE_SYSTEM:
2693
2694 INIT_LIST_HEAD(&pmem[id].allocator.system_mem.alist);
2695
2696 pmem[id].allocator.system_mem.used = 0;
2697 pmem[id].vbase = NULL;
2698
2699 if (kobject_init_and_add(&pmem[id].kobj,
2700 &pmem_system_ktype, NULL,
2701 "%s", pdata->name))
2702 goto out_put_kobj;
2703
2704 pmem[id].allocate = pmem_allocator_system;
2705 pmem[id].free = pmem_free_system;
2706 pmem[id].free_space = pmem_free_space_system;
2707 pmem[id].len = pmem_len_system;
2708 pmem[id].start_addr = pmem_start_addr_system;
2709 pmem[id].num_entries = 0;
2710 pmem[id].quantum = PAGE_SIZE;
2711
2712 DLOG("system allocator id %d (%s), raw size %lu\n",
2713 id, pdata->name, pmem[id].size);
2714 break;
2715
2716 default:
2717 pr_alert("Invalid allocator type (%d) for pmem driver\n",
2718 pdata->allocator_type);
2719 goto err_reset_pmem_info;
2720 }
2721
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002722 pmem[id].ioctl = ioctl;
2723 pmem[id].release = release;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002724 mutex_init(&pmem[id].arena_mutex);
2725 mutex_init(&pmem[id].data_list_mutex);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002726 INIT_LIST_HEAD(&pmem[id].data_list);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002727
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002728 pmem[id].dev.name = pdata->name;
2729 pmem[id].dev.minor = id;
2730 pmem[id].dev.fops = &pmem_fops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002731 pr_info("pmem: Initializing %s (user-space) as %s\n",
2732 pdata->name, pdata->cached ? "cached" : "non-cached");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002733
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002734 if (misc_register(&pmem[id].dev)) {
2735 pr_alert("Unable to register pmem driver!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002736 goto err_cant_register_device;
2737 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002738
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002739 pmem[id].base = allocate_contiguous_memory_nomap(pmem[id].size,
2740 pmem[id].memory_type, PAGE_SIZE);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002741
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002742 pr_info("allocating %lu bytes at %p (%lx physical) for %s\n",
2743 pmem[id].size, pmem[id].vbase, pmem[id].base, pmem[id].name);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002744
2745 pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
Laura Abbott1e36a022011-06-22 17:08:13 -07002746 atomic_set(&pmem[id].allocation_cnt, 0);
2747 pmem[id].map_on_demand = pdata->map_on_demand;
2748
2749 if (pdata->setup_region)
2750 pmem[id].region_data = pdata->setup_region();
2751
2752 if (pdata->request_region)
2753 pmem[id].mem_request = pdata->request_region;
2754
2755 if (pdata->release_region)
2756 pmem[id].mem_release = pdata->release_region;
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002757
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002758 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002759
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002760err_cant_register_device:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002761out_put_kobj:
2762 kobject_put(&pmem[id].kobj);
2763 if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BUDDYBESTFIT)
2764 kfree(pmem[id].allocator.buddy_bestfit.buddy_bitmap);
2765 else if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BITMAP) {
2766 kfree(pmem[id].allocator.bitmap.bitmap);
2767 kfree(pmem[id].allocator.bitmap.bitm_alloc);
2768 }
2769err_reset_pmem_info:
2770 pmem[id].allocate = 0;
2771 pmem[id].dev.minor = -1;
2772err_no_mem:
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002773 return -1;
2774}
2775
2776static int pmem_probe(struct platform_device *pdev)
2777{
2778 struct android_pmem_platform_data *pdata;
2779
2780 if (!pdev || !pdev->dev.platform_data) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002781 pr_alert("Unable to probe pmem!\n");
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002782 return -1;
2783 }
2784 pdata = pdev->dev.platform_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002785
2786 pm_runtime_set_active(&pdev->dev);
2787 pm_runtime_enable(&pdev->dev);
2788
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002789 return pmem_setup(pdata, NULL, NULL);
2790}
2791
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002792static int pmem_remove(struct platform_device *pdev)
2793{
2794 int id = pdev->id;
2795 __free_page(pfn_to_page(pmem[id].garbage_pfn));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002796 pm_runtime_disable(&pdev->dev);
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002797 misc_deregister(&pmem[id].dev);
2798 return 0;
2799}
2800
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002801static int pmem_runtime_suspend(struct device *dev)
2802{
2803 dev_dbg(dev, "pm_runtime: suspending...\n");
2804 return 0;
2805}
2806
2807static int pmem_runtime_resume(struct device *dev)
2808{
2809 dev_dbg(dev, "pm_runtime: resuming...\n");
2810 return 0;
2811}
2812
2813static const struct dev_pm_ops pmem_dev_pm_ops = {
2814 .runtime_suspend = pmem_runtime_suspend,
2815 .runtime_resume = pmem_runtime_resume,
2816};
2817
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002818static struct platform_driver pmem_driver = {
2819 .probe = pmem_probe,
2820 .remove = pmem_remove,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002821 .driver = { .name = "android_pmem",
2822 .pm = &pmem_dev_pm_ops,
2823 }
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002824};
2825
2826
2827static int __init pmem_init(void)
2828{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002829 /* create /sys/kernel/<PMEM_SYSFS_DIR_NAME> directory */
2830 pmem_kset = kset_create_and_add(PMEM_SYSFS_DIR_NAME,
2831 NULL, kernel_kobj);
2832 if (!pmem_kset) {
2833 pr_err("pmem(%s):kset_create_and_add fail\n", __func__);
2834 return -ENOMEM;
2835 }
2836
Rebecca Schultza4ff0e82008-07-24 11:22:53 -07002837 return platform_driver_register(&pmem_driver);
2838}
2839
2840static void __exit pmem_exit(void)
2841{
2842 platform_driver_unregister(&pmem_driver);
2843}
2844
2845module_init(pmem_init);
2846module_exit(pmem_exit);
2847