blob: 22190a7fd58ee08dd35643f7d652a71259e1cb24 [file] [log] [blame]
Sasha Levin28b24c12015-04-14 15:44:57 -07001/*
2 * CMA DebugFS Interface
3 *
4 * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
5 */
6
7
8#include <linux/debugfs.h>
9#include <linux/cma.h>
Sasha Levin26b02a12015-04-14 15:44:59 -070010#include <linux/list.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
Sasha Levin83253302015-04-14 15:45:02 -070013#include <linux/mm_types.h>
Sasha Levin28b24c12015-04-14 15:44:57 -070014
15#include "cma.h"
16
Sasha Levin26b02a12015-04-14 15:44:59 -070017struct cma_mem {
18 struct hlist_node node;
19 struct page *p;
20 unsigned long n;
21};
22
Sasha Levin28b24c12015-04-14 15:44:57 -070023static struct dentry *cma_debugfs_root;
24
25static int cma_debugfs_get(void *data, u64 *val)
26{
27 unsigned long *p = data;
28
29 *val = *p;
30
31 return 0;
32}
Sasha Levin28b24c12015-04-14 15:44:57 -070033DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "%llu\n");
34
Dmitry Safonov2e32b942015-04-15 16:14:59 -070035static int cma_used_get(void *data, u64 *val)
36{
37 struct cma *cma = data;
38 unsigned long used;
39
40 mutex_lock(&cma->lock);
41 /* pages counter is smaller than sizeof(int) */
42 used = bitmap_weight(cma->bitmap, (int)cma->count);
43 mutex_unlock(&cma->lock);
44 *val = (u64)used << cma->order_per_bit;
45
46 return 0;
47}
Dmitry Safonov2e32b942015-04-15 16:14:59 -070048DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
49
50static int cma_maxchunk_get(void *data, u64 *val)
51{
52 struct cma *cma = data;
53 unsigned long maxchunk = 0;
54 unsigned long start, end = 0;
55
56 mutex_lock(&cma->lock);
57 for (;;) {
58 start = find_next_zero_bit(cma->bitmap, cma->count, end);
59 if (start >= cma->count)
60 break;
61 end = find_next_bit(cma->bitmap, cma->count, start);
62 maxchunk = max(end - start, maxchunk);
63 }
64 mutex_unlock(&cma->lock);
65 *val = (u64)maxchunk << cma->order_per_bit;
66
67 return 0;
68}
Dmitry Safonov2e32b942015-04-15 16:14:59 -070069DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
70
Sasha Levin26b02a12015-04-14 15:44:59 -070071static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
72{
73 spin_lock(&cma->mem_head_lock);
74 hlist_add_head(&mem->node, &cma->mem_head);
75 spin_unlock(&cma->mem_head_lock);
76}
77
Sasha Levin83253302015-04-14 15:45:02 -070078static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
79{
80 struct cma_mem *mem = NULL;
81
82 spin_lock(&cma->mem_head_lock);
83 if (!hlist_empty(&cma->mem_head)) {
84 mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
85 hlist_del_init(&mem->node);
86 }
87 spin_unlock(&cma->mem_head_lock);
88
89 return mem;
90}
91
92static int cma_free_mem(struct cma *cma, int count)
93{
94 struct cma_mem *mem = NULL;
95
96 while (count) {
97 mem = cma_get_entry_from_list(cma);
98 if (mem == NULL)
99 return 0;
100
101 if (mem->n <= count) {
102 cma_release(cma, mem->p, mem->n);
103 count -= mem->n;
104 kfree(mem);
105 } else if (cma->order_per_bit == 0) {
106 cma_release(cma, mem->p, count);
107 mem->p += count;
108 mem->n -= count;
109 count = 0;
110 cma_add_to_cma_mem_list(cma, mem);
111 } else {
112 pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
113 cma_add_to_cma_mem_list(cma, mem);
114 break;
115 }
116 }
117
118 return 0;
119
120}
121
122static int cma_free_write(void *data, u64 val)
123{
124 int pages = val;
125 struct cma *cma = data;
126
127 return cma_free_mem(cma, pages);
128}
Sasha Levin83253302015-04-14 15:45:02 -0700129DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
130
Sasha Levin26b02a12015-04-14 15:44:59 -0700131static int cma_alloc_mem(struct cma *cma, int count)
132{
133 struct cma_mem *mem;
134 struct page *p;
135
136 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
137 if (!mem)
138 return -ENOMEM;
139
Andrew Morton875abdb2015-04-14 15:45:05 -0700140 p = cma_alloc(cma, count, 0);
Sasha Levin26b02a12015-04-14 15:44:59 -0700141 if (!p) {
142 kfree(mem);
143 return -ENOMEM;
144 }
145
146 mem->p = p;
147 mem->n = count;
148
149 cma_add_to_cma_mem_list(cma, mem);
150
151 return 0;
152}
153
154static int cma_alloc_write(void *data, u64 val)
155{
156 int pages = val;
157 struct cma *cma = data;
158
159 return cma_alloc_mem(cma, pages);
160}
Sasha Levin26b02a12015-04-14 15:44:59 -0700161DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
162
Sasha Levin28b24c12015-04-14 15:44:57 -0700163static void cma_debugfs_add_one(struct cma *cma, int idx)
164{
165 struct dentry *tmp;
166 char name[16];
167 int u32s;
168
169 sprintf(name, "cma-%d", idx);
170
171 tmp = debugfs_create_dir(name, cma_debugfs_root);
172
Joonsoo Kim2292c0b2015-07-17 16:24:20 -0700173 debugfs_create_file("alloc", S_IWUSR, tmp, cma,
Sasha Levin26b02a12015-04-14 15:44:59 -0700174 &cma_alloc_fops);
175
Joonsoo Kim2292c0b2015-07-17 16:24:20 -0700176 debugfs_create_file("free", S_IWUSR, tmp, cma,
Sasha Levin83253302015-04-14 15:45:02 -0700177 &cma_free_fops);
178
Sasha Levin28b24c12015-04-14 15:44:57 -0700179 debugfs_create_file("base_pfn", S_IRUGO, tmp,
180 &cma->base_pfn, &cma_debugfs_fops);
181 debugfs_create_file("count", S_IRUGO, tmp,
182 &cma->count, &cma_debugfs_fops);
183 debugfs_create_file("order_per_bit", S_IRUGO, tmp,
Sasha Levin26b02a12015-04-14 15:44:59 -0700184 &cma->order_per_bit, &cma_debugfs_fops);
Dmitry Safonov2e32b942015-04-15 16:14:59 -0700185 debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops);
186 debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops);
Sasha Levin28b24c12015-04-14 15:44:57 -0700187
188 u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
189 debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s);
190}
191
192static int __init cma_debugfs_init(void)
193{
194 int i;
195
196 cma_debugfs_root = debugfs_create_dir("cma", NULL);
197 if (!cma_debugfs_root)
198 return -ENOMEM;
199
200 for (i = 0; i < cma_area_count; i++)
201 cma_debugfs_add_one(&cma_areas[i], i);
202
203 return 0;
204}
205late_initcall(cma_debugfs_init);