blob: 27856c55dc847fa4bca0ebf8d1434e573223f336 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
Joe Perches25d04792012-03-16 21:43:50 -070028#define pr_fmt(fmt) "[TTM] " fmt
29
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/ttm/ttm_memory.h>
31#include <drm/ttm/ttm_module.h>
32#include <drm/ttm/ttm_page_alloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020033#include <linux/spinlock.h>
34#include <linux/sched.h>
35#include <linux/wait.h>
36#include <linux/mm.h>
37#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Roger Heec3fe392018-02-05 17:57:07 +080039#include <linux/swap.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020040
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020041#define TTM_MEMORY_ALLOC_RETRIES 4
42
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +020043struct ttm_mem_zone {
44 struct kobject kobj;
45 struct ttm_mem_global *glob;
46 const char *name;
47 uint64_t zone_mem;
48 uint64_t emer_mem;
49 uint64_t max_mem;
50 uint64_t swap_limit;
51 uint64_t used_mem;
52};
53
54static struct attribute ttm_mem_sys = {
55 .name = "zone_memory",
56 .mode = S_IRUGO
57};
58static struct attribute ttm_mem_emer = {
59 .name = "emergency_memory",
60 .mode = S_IRUGO | S_IWUSR
61};
62static struct attribute ttm_mem_max = {
63 .name = "available_memory",
64 .mode = S_IRUGO | S_IWUSR
65};
66static struct attribute ttm_mem_swap = {
67 .name = "swap_limit",
68 .mode = S_IRUGO | S_IWUSR
69};
70static struct attribute ttm_mem_used = {
71 .name = "used_memory",
72 .mode = S_IRUGO
73};
74
75static void ttm_mem_zone_kobj_release(struct kobject *kobj)
76{
77 struct ttm_mem_zone *zone =
78 container_of(kobj, struct ttm_mem_zone, kobj);
79
Joe Perches25d04792012-03-16 21:43:50 -070080 pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
81 zone->name, (unsigned long long)zone->used_mem >> 10);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +020082 kfree(zone);
83}
84
85static ssize_t ttm_mem_zone_show(struct kobject *kobj,
86 struct attribute *attr,
87 char *buffer)
88{
89 struct ttm_mem_zone *zone =
90 container_of(kobj, struct ttm_mem_zone, kobj);
91 uint64_t val = 0;
92
93 spin_lock(&zone->glob->lock);
94 if (attr == &ttm_mem_sys)
95 val = zone->zone_mem;
96 else if (attr == &ttm_mem_emer)
97 val = zone->emer_mem;
98 else if (attr == &ttm_mem_max)
99 val = zone->max_mem;
100 else if (attr == &ttm_mem_swap)
101 val = zone->swap_limit;
102 else if (attr == &ttm_mem_used)
103 val = zone->used_mem;
104 spin_unlock(&zone->glob->lock);
105
106 return snprintf(buffer, PAGE_SIZE, "%llu\n",
107 (unsigned long long) val >> 10);
108}
109
110static void ttm_check_swapping(struct ttm_mem_global *glob);
111
112static ssize_t ttm_mem_zone_store(struct kobject *kobj,
113 struct attribute *attr,
114 const char *buffer,
115 size_t size)
116{
117 struct ttm_mem_zone *zone =
118 container_of(kobj, struct ttm_mem_zone, kobj);
119 int chars;
120 unsigned long val;
121 uint64_t val64;
122
123 chars = sscanf(buffer, "%lu", &val);
124 if (chars == 0)
125 return size;
126
127 val64 = val;
128 val64 <<= 10;
129
130 spin_lock(&zone->glob->lock);
131 if (val64 > zone->zone_mem)
132 val64 = zone->zone_mem;
133 if (attr == &ttm_mem_emer) {
134 zone->emer_mem = val64;
135 if (zone->max_mem > val64)
136 zone->max_mem = val64;
137 } else if (attr == &ttm_mem_max) {
138 zone->max_mem = val64;
139 if (zone->emer_mem < val64)
140 zone->emer_mem = val64;
141 } else if (attr == &ttm_mem_swap)
142 zone->swap_limit = val64;
143 spin_unlock(&zone->glob->lock);
144
145 ttm_check_swapping(zone->glob);
146
147 return size;
148}
149
150static struct attribute *ttm_mem_zone_attrs[] = {
151 &ttm_mem_sys,
152 &ttm_mem_emer,
153 &ttm_mem_max,
154 &ttm_mem_swap,
155 &ttm_mem_used,
156 NULL
157};
158
Emese Revfy52cf25d2010-01-19 02:58:23 +0100159static const struct sysfs_ops ttm_mem_zone_ops = {
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200160 .show = &ttm_mem_zone_show,
161 .store = &ttm_mem_zone_store
162};
163
164static struct kobj_type ttm_mem_zone_kobj_type = {
165 .release = &ttm_mem_zone_kobj_release,
166 .sysfs_ops = &ttm_mem_zone_ops,
167 .default_attrs = ttm_mem_zone_attrs,
168};
169
Roger Heec3fe392018-02-05 17:57:07 +0800170static struct attribute ttm_mem_global_lower_mem_limit = {
171 .name = "lower_mem_limit",
172 .mode = S_IRUGO | S_IWUSR
173};
174
175static ssize_t ttm_mem_global_show(struct kobject *kobj,
176 struct attribute *attr,
177 char *buffer)
178{
179 struct ttm_mem_global *glob =
180 container_of(kobj, struct ttm_mem_global, kobj);
181 uint64_t val = 0;
182
183 spin_lock(&glob->lock);
184 val = glob->lower_mem_limit;
185 spin_unlock(&glob->lock);
186 /* convert from number of pages to KB */
187 val <<= (PAGE_SHIFT - 10);
188 return snprintf(buffer, PAGE_SIZE, "%llu\n",
189 (unsigned long long) val);
190}
191
192static ssize_t ttm_mem_global_store(struct kobject *kobj,
193 struct attribute *attr,
194 const char *buffer,
195 size_t size)
196{
197 int chars;
198 uint64_t val64;
199 unsigned long val;
200 struct ttm_mem_global *glob =
201 container_of(kobj, struct ttm_mem_global, kobj);
202
203 chars = sscanf(buffer, "%lu", &val);
204 if (chars == 0)
205 return size;
206
207 val64 = val;
208 /* convert from KB to number of pages */
209 val64 >>= (PAGE_SHIFT - 10);
210
211 spin_lock(&glob->lock);
212 glob->lower_mem_limit = val64;
213 spin_unlock(&glob->lock);
214
215 return size;
216}
217
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200218static void ttm_mem_global_kobj_release(struct kobject *kobj)
219{
220 struct ttm_mem_global *glob =
221 container_of(kobj, struct ttm_mem_global, kobj);
222
223 kfree(glob);
224}
225
Roger Heec3fe392018-02-05 17:57:07 +0800226static struct attribute *ttm_mem_global_attrs[] = {
227 &ttm_mem_global_lower_mem_limit,
228 NULL
229};
230
231static const struct sysfs_ops ttm_mem_global_ops = {
232 .show = &ttm_mem_global_show,
233 .store = &ttm_mem_global_store,
234};
235
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200236static struct kobj_type ttm_mem_glob_kobj_type = {
237 .release = &ttm_mem_global_kobj_release,
Roger Heec3fe392018-02-05 17:57:07 +0800238 .sysfs_ops = &ttm_mem_global_ops,
239 .default_attrs = ttm_mem_global_attrs,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200240};
241
242static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
243 bool from_wq, uint64_t extra)
244{
245 unsigned int i;
246 struct ttm_mem_zone *zone;
247 uint64_t target;
248
249 for (i = 0; i < glob->num_zones; ++i) {
250 zone = glob->zones[i];
251
252 if (from_wq)
253 target = zone->swap_limit;
254 else if (capable(CAP_SYS_ADMIN))
255 target = zone->emer_mem;
256 else
257 target = zone->max_mem;
258
259 target = (extra > target) ? 0ULL : target;
260
261 if (zone->used_mem > target)
262 return true;
263 }
264 return false;
265}
266
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200267/**
268 * At this point we only support a single shrink callback.
269 * Extend this if needed, perhaps using a linked list of callbacks.
270 * Note that this function is reentrant:
271 * many threads may try to swap out at any given time.
272 */
273
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200274static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
Roger Hedc947772017-12-21 17:42:53 +0800275 uint64_t extra, struct ttm_operation_ctx *ctx)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200276{
277 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200278
279 spin_lock(&glob->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200280
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200281 while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200282 spin_unlock(&glob->lock);
Roger Hedc947772017-12-21 17:42:53 +0800283 ret = ttm_bo_swapout(glob->bo_glob, ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200284 spin_lock(&glob->lock);
285 if (unlikely(ret != 0))
Roger Hea6c26af2017-12-18 19:50:08 +0800286 break;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200287 }
Roger Hea6c26af2017-12-18 19:50:08 +0800288
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200289 spin_unlock(&glob->lock);
290}
291
292static void ttm_shrink_work(struct work_struct *work)
293{
Roger Hedc947772017-12-21 17:42:53 +0800294 struct ttm_operation_ctx ctx = {
295 .interruptible = false,
296 .no_wait_gpu = false
297 };
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200298 struct ttm_mem_global *glob =
299 container_of(work, struct ttm_mem_global, work);
300
Roger Hedc947772017-12-21 17:42:53 +0800301 ttm_shrink(glob, true, 0ULL, &ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200302}
303
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200304static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
305 const struct sysinfo *si)
306{
307 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
308 uint64_t mem;
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200309 int ret;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200310
311 if (unlikely(!zone))
312 return -ENOMEM;
313
314 mem = si->totalram - si->totalhigh;
315 mem *= si->mem_unit;
316
317 zone->name = "kernel";
318 zone->zone_mem = mem;
319 zone->max_mem = mem >> 1;
320 zone->emer_mem = (mem >> 1) + (mem >> 2);
321 zone->swap_limit = zone->max_mem - (mem >> 3);
322 zone->used_mem = 0;
323 zone->glob = glob;
324 glob->zone_kernel = zone;
Robert P. J. Dayb642ed02010-03-13 10:36:32 +0000325 ret = kobject_init_and_add(
326 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200327 if (unlikely(ret != 0)) {
328 kobject_put(&zone->kobj);
329 return ret;
330 }
331 glob->zones[glob->num_zones++] = zone;
332 return 0;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200333}
334
335#ifdef CONFIG_HIGHMEM
336static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
337 const struct sysinfo *si)
338{
Dan Carpenter46a79fa2009-11-28 12:30:32 +0200339 struct ttm_mem_zone *zone;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200340 uint64_t mem;
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200341 int ret;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200342
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200343 if (si->totalhigh == 0)
344 return 0;
345
Dan Carpenter46a79fa2009-11-28 12:30:32 +0200346 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
347 if (unlikely(!zone))
348 return -ENOMEM;
349
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200350 mem = si->totalram;
351 mem *= si->mem_unit;
352
353 zone->name = "highmem";
354 zone->zone_mem = mem;
355 zone->max_mem = mem >> 1;
356 zone->emer_mem = (mem >> 1) + (mem >> 2);
357 zone->swap_limit = zone->max_mem - (mem >> 3);
358 zone->used_mem = 0;
359 zone->glob = glob;
360 glob->zone_highmem = zone;
Robert P. J. Dayb642ed02010-03-13 10:36:32 +0000361 ret = kobject_init_and_add(
Kees Cook109ab902014-09-11 13:53:54 -0700362 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
363 zone->name);
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200364 if (unlikely(ret != 0)) {
365 kobject_put(&zone->kobj);
366 return ret;
367 }
368 glob->zones[glob->num_zones++] = zone;
369 return 0;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200370}
371#else
372static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
373 const struct sysinfo *si)
374{
375 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
376 uint64_t mem;
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200377 int ret;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200378
379 if (unlikely(!zone))
380 return -ENOMEM;
381
382 mem = si->totalram;
383 mem *= si->mem_unit;
384
385 /**
386 * No special dma32 zone needed.
387 */
388
Dave Airlieec42a6e2009-12-08 15:58:08 +1000389 if (mem <= ((uint64_t) 1ULL << 32)) {
390 kfree(zone);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200391 return 0;
Dave Airlieec42a6e2009-12-08 15:58:08 +1000392 }
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200393
394 /*
395 * Limit max dma32 memory to 4GB for now
396 * until we can figure out how big this
397 * zone really is.
398 */
399
400 mem = ((uint64_t) 1ULL << 32);
401 zone->name = "dma32";
402 zone->zone_mem = mem;
403 zone->max_mem = mem >> 1;
404 zone->emer_mem = (mem >> 1) + (mem >> 2);
405 zone->swap_limit = zone->max_mem - (mem >> 3);
406 zone->used_mem = 0;
407 zone->glob = glob;
408 glob->zone_dma32 = zone;
Robert P. J. Dayb642ed02010-03-13 10:36:32 +0000409 ret = kobject_init_and_add(
410 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200411 if (unlikely(ret != 0)) {
412 kobject_put(&zone->kobj);
413 return ret;
414 }
415 glob->zones[glob->num_zones++] = zone;
416 return 0;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200417}
418#endif
419
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200420int ttm_mem_global_init(struct ttm_mem_global *glob)
421{
422 struct sysinfo si;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200423 int ret;
424 int i;
425 struct ttm_mem_zone *zone;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200426
427 spin_lock_init(&glob->lock);
428 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
429 INIT_WORK(&glob->work, ttm_shrink_work);
Robert P. J. Dayb642ed02010-03-13 10:36:32 +0000430 ret = kobject_init_and_add(
431 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200432 if (unlikely(ret != 0)) {
433 kobject_put(&glob->kobj);
434 return ret;
435 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200436
437 si_meminfo(&si);
438
Roger Heec3fe392018-02-05 17:57:07 +0800439 /* set it as 0 by default to keep original behavior of OOM */
440 glob->lower_mem_limit = 0;
441
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200442 ret = ttm_mem_init_kernel_zone(glob, &si);
443 if (unlikely(ret != 0))
444 goto out_no_zone;
445#ifdef CONFIG_HIGHMEM
446 ret = ttm_mem_init_highmem_zone(glob, &si);
447 if (unlikely(ret != 0))
448 goto out_no_zone;
449#else
450 ret = ttm_mem_init_dma32_zone(glob, &si);
451 if (unlikely(ret != 0))
452 goto out_no_zone;
453#endif
454 for (i = 0; i < glob->num_zones; ++i) {
455 zone = glob->zones[i];
Joe Perches25d04792012-03-16 21:43:50 -0700456 pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
457 zone->name, (unsigned long long)zone->max_mem >> 10);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200458 }
Pauli Nieminenc96af792010-04-01 12:45:03 +0000459 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -0400460 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200461 return 0;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200462out_no_zone:
463 ttm_mem_global_release(glob);
464 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200465}
466EXPORT_SYMBOL(ttm_mem_global_init);
467
468void ttm_mem_global_release(struct ttm_mem_global *glob)
469{
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200470 unsigned int i;
471 struct ttm_mem_zone *zone;
472
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000473 /* let the page allocator first stop the shrink work. */
474 ttm_page_alloc_fini();
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -0400475 ttm_dma_page_alloc_fini();
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000476
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200477 flush_workqueue(glob->swap_queue);
478 destroy_workqueue(glob->swap_queue);
479 glob->swap_queue = NULL;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200480 for (i = 0; i < glob->num_zones; ++i) {
481 zone = glob->zones[i];
482 kobject_del(&zone->kobj);
483 kobject_put(&zone->kobj);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000484 }
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200485 kobject_del(&glob->kobj);
486 kobject_put(&glob->kobj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200487}
488EXPORT_SYMBOL(ttm_mem_global_release);
489
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200490static void ttm_check_swapping(struct ttm_mem_global *glob)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200491{
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200492 bool needs_swapping = false;
493 unsigned int i;
494 struct ttm_mem_zone *zone;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200495
496 spin_lock(&glob->lock);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200497 for (i = 0; i < glob->num_zones; ++i) {
498 zone = glob->zones[i];
499 if (zone->used_mem > zone->swap_limit) {
500 needs_swapping = true;
501 break;
502 }
503 }
504
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200505 spin_unlock(&glob->lock);
506
507 if (unlikely(needs_swapping))
508 (void)queue_work(glob->swap_queue, &glob->work);
509
510}
511
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200512static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
513 struct ttm_mem_zone *single_zone,
514 uint64_t amount)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200515{
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200516 unsigned int i;
517 struct ttm_mem_zone *zone;
518
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200519 spin_lock(&glob->lock);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200520 for (i = 0; i < glob->num_zones; ++i) {
521 zone = glob->zones[i];
522 if (single_zone && zone != single_zone)
523 continue;
524 zone->used_mem -= amount;
525 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200526 spin_unlock(&glob->lock);
527}
528
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200529void ttm_mem_global_free(struct ttm_mem_global *glob,
530 uint64_t amount)
531{
532 return ttm_mem_global_free_zone(glob, NULL, amount);
533}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100534EXPORT_SYMBOL(ttm_mem_global_free);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200535
Roger Heec3fe392018-02-05 17:57:07 +0800536/*
537 * check if the available mem is under lower memory limit
538 *
539 * a. if no swap disk at all or free swap space is under swap_mem_limit
540 * but available system mem is bigger than sys_mem_limit, allow TTM
541 * allocation;
542 *
543 * b. if the available system mem is less than sys_mem_limit but free
544 * swap disk is bigger than swap_mem_limit, allow TTM allocation.
545 */
546bool
547ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
548 uint64_t num_pages,
549 struct ttm_operation_ctx *ctx)
550{
551 int64_t available;
552
553 if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
554 return false;
555
556 available = get_nr_swap_pages() + si_mem_available();
557 available -= num_pages;
558 if (available < glob->lower_mem_limit)
559 return true;
560
561 return false;
562}
563EXPORT_SYMBOL(ttm_check_under_lowerlimit);
564
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200565static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200566 struct ttm_mem_zone *single_zone,
567 uint64_t amount, bool reserve)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200568{
569 uint64_t limit;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200570 int ret = -ENOMEM;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200571 unsigned int i;
572 struct ttm_mem_zone *zone;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200573
574 spin_lock(&glob->lock);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200575 for (i = 0; i < glob->num_zones; ++i) {
576 zone = glob->zones[i];
577 if (single_zone && zone != single_zone)
578 continue;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200579
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200580 limit = (capable(CAP_SYS_ADMIN)) ?
581 zone->emer_mem : zone->max_mem;
582
583 if (zone->used_mem > limit)
584 goto out_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200585 }
586
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200587 if (reserve) {
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200588 for (i = 0; i < glob->num_zones; ++i) {
589 zone = glob->zones[i];
590 if (single_zone && zone != single_zone)
591 continue;
592 zone->used_mem += amount;
593 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200594 }
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200595
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200596 ret = 0;
597out_unlock:
598 spin_unlock(&glob->lock);
599 ttm_check_swapping(glob);
600
601 return ret;
602}
603
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200604
605static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
606 struct ttm_mem_zone *single_zone,
607 uint64_t memory,
Roger He279c01f2017-12-08 15:09:50 +0800608 struct ttm_operation_ctx *ctx)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200609{
610 int count = TTM_MEMORY_ALLOC_RETRIES;
611
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200612 while (unlikely(ttm_mem_global_reserve(glob,
613 single_zone,
614 memory, true)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200615 != 0)) {
Roger He279c01f2017-12-08 15:09:50 +0800616 if (ctx->no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200617 return -ENOMEM;
618 if (unlikely(count-- == 0))
619 return -ENOMEM;
Roger Hedc947772017-12-21 17:42:53 +0800620 ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200621 }
622
623 return 0;
624}
625
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200626int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
Roger He279c01f2017-12-08 15:09:50 +0800627 struct ttm_operation_ctx *ctx)
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200628{
629 /**
630 * Normal allocations of kernel memory are registered in
631 * all zones.
632 */
633
Roger He279c01f2017-12-08 15:09:50 +0800634 return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200635}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100636EXPORT_SYMBOL(ttm_mem_global_alloc);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200637
638int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
Roger He9de2fb92017-12-08 15:21:18 +0800639 struct page *page, uint64_t size,
640 struct ttm_operation_ctx *ctx)
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200641{
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200642 struct ttm_mem_zone *zone = NULL;
643
644 /**
645 * Page allocations may be registed in a single zone
646 * only if highmem or !dma32.
647 */
648
649#ifdef CONFIG_HIGHMEM
650 if (PageHighMem(page) && glob->zone_highmem != NULL)
651 zone = glob->zone_highmem;
652#else
653 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
654 zone = glob->zone_kernel;
655#endif
Roger He9de2fb92017-12-08 15:21:18 +0800656 return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200657}
658
Christian Königd188bfa2017-07-04 16:56:24 +0200659void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
660 uint64_t size)
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200661{
662 struct ttm_mem_zone *zone = NULL;
663
664#ifdef CONFIG_HIGHMEM
665 if (PageHighMem(page) && glob->zone_highmem != NULL)
666 zone = glob->zone_highmem;
667#else
668 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
669 zone = glob->zone_kernel;
670#endif
Christian Königd188bfa2017-07-04 16:56:24 +0200671 ttm_mem_global_free_zone(glob, zone, size);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200672}
673
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200674size_t ttm_round_pot(size_t size)
675{
676 if ((size & (size - 1)) == 0)
677 return size;
678 else if (size > PAGE_SIZE)
679 return PAGE_ALIGN(size);
680 else {
681 size_t tmp_size = 4;
682
683 while (tmp_size < size)
684 tmp_size <<= 1;
685
686 return tmp_size;
687 }
688 return 0;
689}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100690EXPORT_SYMBOL(ttm_round_pot);
Ken Wangecf6a632016-07-27 19:12:15 +0800691
692uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
693{
694 return glob->zone_kernel->max_mem;
695}
696EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);