blob: 57b9205f6563dcc95a26769e9dea3399666e389f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Sasha Levin28b24c12015-04-14 15:44:57 -07002/*
3 * CMA DebugFS Interface
4 *
5 * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
6 */
7
8
9#include <linux/debugfs.h>
10#include <linux/cma.h>
Sasha Levin26b02a12015-04-14 15:44:59 -070011#include <linux/list.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
Sasha Levin83253302015-04-14 15:45:02 -070014#include <linux/mm_types.h>
Sasha Levin28b24c12015-04-14 15:44:57 -070015
16#include "cma.h"
17
Sasha Levin26b02a12015-04-14 15:44:59 -070018struct cma_mem {
19 struct hlist_node node;
20 struct page *p;
21 unsigned long n;
22};
23
Sasha Levin28b24c12015-04-14 15:44:57 -070024static struct dentry *cma_debugfs_root;
25
26static int cma_debugfs_get(void *data, u64 *val)
27{
28 unsigned long *p = data;
Prakash Gupta71d6cbd2019-08-06 12:30:09 +053029 int ret = -EPERM;
Sasha Levin28b24c12015-04-14 15:44:57 -070030
Prakash Gupta71d6cbd2019-08-06 12:30:09 +053031 if (kptr_restrict == 0) {
32 *val = *p;
33 ret = 0;
34 } else {
35 *val = 0;
36 }
Sasha Levin28b24c12015-04-14 15:44:57 -070037
Prakash Gupta71d6cbd2019-08-06 12:30:09 +053038 return ret;
Sasha Levin28b24c12015-04-14 15:44:57 -070039}
Prakash Gupta71d6cbd2019-08-06 12:30:09 +053040DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops, cma_debugfs_get, NULL, "0x%lx\n");
Sasha Levin28b24c12015-04-14 15:44:57 -070041
Dmitry Safonov2e32b942015-04-15 16:14:59 -070042static int cma_used_get(void *data, u64 *val)
43{
44 struct cma *cma = data;
45 unsigned long used;
46
47 mutex_lock(&cma->lock);
48 /* pages counter is smaller than sizeof(int) */
Joonsoo Kimd56e84b2015-07-17 16:24:23 -070049 used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
Dmitry Safonov2e32b942015-04-15 16:14:59 -070050 mutex_unlock(&cma->lock);
51 *val = (u64)used << cma->order_per_bit;
52
53 return 0;
54}
Dmitry Safonov2e32b942015-04-15 16:14:59 -070055DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops, cma_used_get, NULL, "%llu\n");
56
57static int cma_maxchunk_get(void *data, u64 *val)
58{
59 struct cma *cma = data;
60 unsigned long maxchunk = 0;
61 unsigned long start, end = 0;
Joonsoo Kimd56e84b2015-07-17 16:24:23 -070062 unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
Dmitry Safonov2e32b942015-04-15 16:14:59 -070063
64 mutex_lock(&cma->lock);
65 for (;;) {
Joonsoo Kimd56e84b2015-07-17 16:24:23 -070066 start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
Yue Hu13e1ea02019-05-13 17:16:37 -070067 if (start >= bitmap_maxno)
Dmitry Safonov2e32b942015-04-15 16:14:59 -070068 break;
Joonsoo Kimd56e84b2015-07-17 16:24:23 -070069 end = find_next_bit(cma->bitmap, bitmap_maxno, start);
Dmitry Safonov2e32b942015-04-15 16:14:59 -070070 maxchunk = max(end - start, maxchunk);
71 }
72 mutex_unlock(&cma->lock);
73 *val = (u64)maxchunk << cma->order_per_bit;
74
75 return 0;
76}
Dmitry Safonov2e32b942015-04-15 16:14:59 -070077DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops, cma_maxchunk_get, NULL, "%llu\n");
78
Prakash Gupta76adf142019-07-23 16:46:41 +053079#ifdef CONFIG_CMA_ALLOW_WRITE_DEBUGFS
Sasha Levin26b02a12015-04-14 15:44:59 -070080static void cma_add_to_cma_mem_list(struct cma *cma, struct cma_mem *mem)
81{
82 spin_lock(&cma->mem_head_lock);
83 hlist_add_head(&mem->node, &cma->mem_head);
84 spin_unlock(&cma->mem_head_lock);
85}
86
Sasha Levin83253302015-04-14 15:45:02 -070087static struct cma_mem *cma_get_entry_from_list(struct cma *cma)
88{
89 struct cma_mem *mem = NULL;
90
91 spin_lock(&cma->mem_head_lock);
92 if (!hlist_empty(&cma->mem_head)) {
93 mem = hlist_entry(cma->mem_head.first, struct cma_mem, node);
94 hlist_del_init(&mem->node);
95 }
96 spin_unlock(&cma->mem_head_lock);
97
98 return mem;
99}
100
101static int cma_free_mem(struct cma *cma, int count)
102{
103 struct cma_mem *mem = NULL;
104
105 while (count) {
106 mem = cma_get_entry_from_list(cma);
107 if (mem == NULL)
108 return 0;
109
110 if (mem->n <= count) {
111 cma_release(cma, mem->p, mem->n);
112 count -= mem->n;
113 kfree(mem);
114 } else if (cma->order_per_bit == 0) {
115 cma_release(cma, mem->p, count);
116 mem->p += count;
117 mem->n -= count;
118 count = 0;
119 cma_add_to_cma_mem_list(cma, mem);
120 } else {
121 pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
122 cma_add_to_cma_mem_list(cma, mem);
123 break;
124 }
125 }
126
127 return 0;
128
129}
130
131static int cma_free_write(void *data, u64 val)
132{
133 int pages = val;
134 struct cma *cma = data;
135
136 return cma_free_mem(cma, pages);
137}
Prakash Gupta76adf142019-07-23 16:46:41 +0530138#else
139#define cma_free_write NULL
140#endif
141
Sasha Levin83253302015-04-14 15:45:02 -0700142DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops, NULL, cma_free_write, "%llu\n");
143
Prakash Gupta76adf142019-07-23 16:46:41 +0530144#ifdef CONFIG_CMA_ALLOW_WRITE_DEBUGFS
Sasha Levin26b02a12015-04-14 15:44:59 -0700145static int cma_alloc_mem(struct cma *cma, int count)
146{
147 struct cma_mem *mem;
148 struct page *p;
149
150 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
151 if (!mem)
152 return -ENOMEM;
153
Marek Szyprowski65182022018-08-17 15:48:57 -0700154 p = cma_alloc(cma, count, 0, false);
Sasha Levin26b02a12015-04-14 15:44:59 -0700155 if (!p) {
156 kfree(mem);
157 return -ENOMEM;
158 }
159
160 mem->p = p;
161 mem->n = count;
162
163 cma_add_to_cma_mem_list(cma, mem);
164
165 return 0;
166}
167
168static int cma_alloc_write(void *data, u64 val)
169{
170 int pages = val;
171 struct cma *cma = data;
172
173 return cma_alloc_mem(cma, pages);
174}
Prakash Gupta76adf142019-07-23 16:46:41 +0530175#else
176#define cma_alloc_write NULL
177#endif
178
Sasha Levin26b02a12015-04-14 15:44:59 -0700179DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
180
Sasha Levin28b24c12015-04-14 15:44:57 -0700181static void cma_debugfs_add_one(struct cma *cma, int idx)
182{
183 struct dentry *tmp;
184 char name[16];
185 int u32s;
186
Prakash Guptada094e42017-08-18 15:16:21 -0700187 scnprintf(name, sizeof(name), "cma-%s", cma->name);
Sasha Levin28b24c12015-04-14 15:44:57 -0700188
189 tmp = debugfs_create_dir(name, cma_debugfs_root);
190
Joe Perches0825a6f2018-06-14 15:27:58 -0700191 debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
192 debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
193 debugfs_create_file("base_pfn", 0444, tmp,
194 &cma->base_pfn, &cma_debugfs_fops);
Prakash Gupta71d6cbd2019-08-06 12:30:09 +0530195
196 debugfs_create_ulong("count", 0444, tmp, &cma->count);
197 debugfs_create_u32("order_per_bit", 0444, tmp,
198 (u32 *)&cma->order_per_bit);
Joe Perches0825a6f2018-06-14 15:27:58 -0700199 debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
200 debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
Sasha Levin28b24c12015-04-14 15:44:57 -0700201
202 u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
Joe Perches0825a6f2018-06-14 15:27:58 -0700203 debugfs_create_u32_array("bitmap", 0444, tmp, (u32 *)cma->bitmap, u32s);
Sasha Levin28b24c12015-04-14 15:44:57 -0700204}
205
206static int __init cma_debugfs_init(void)
207{
208 int i;
209
210 cma_debugfs_root = debugfs_create_dir("cma", NULL);
211 if (!cma_debugfs_root)
212 return -ENOMEM;
213
214 for (i = 0; i < cma_area_count; i++)
215 cma_debugfs_add_one(&cma_areas[i], i);
216
217 return 0;
218}
219late_initcall(cma_debugfs_init);