blob: aa0c381369586b78504b7ae448e64cefbb2d5a41 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
Joe Perches25d04792012-03-16 21:43:50 -070028#define pr_fmt(fmt) "[TTM] " fmt
29
David Howells760285e2012-10-02 18:01:07 +010030#include <drm/ttm/ttm_memory.h>
31#include <drm/ttm/ttm_module.h>
32#include <drm/ttm/ttm_page_alloc.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020033#include <linux/spinlock.h>
34#include <linux/sched.h>
35#include <linux/wait.h>
36#include <linux/mm.h>
37#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090038#include <linux/slab.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020039
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020040#define TTM_MEMORY_ALLOC_RETRIES 4
41
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +020042struct ttm_mem_zone {
43 struct kobject kobj;
44 struct ttm_mem_global *glob;
45 const char *name;
46 uint64_t zone_mem;
47 uint64_t emer_mem;
48 uint64_t max_mem;
49 uint64_t swap_limit;
50 uint64_t used_mem;
51};
52
53static struct attribute ttm_mem_sys = {
54 .name = "zone_memory",
55 .mode = S_IRUGO
56};
57static struct attribute ttm_mem_emer = {
58 .name = "emergency_memory",
59 .mode = S_IRUGO | S_IWUSR
60};
61static struct attribute ttm_mem_max = {
62 .name = "available_memory",
63 .mode = S_IRUGO | S_IWUSR
64};
65static struct attribute ttm_mem_swap = {
66 .name = "swap_limit",
67 .mode = S_IRUGO | S_IWUSR
68};
69static struct attribute ttm_mem_used = {
70 .name = "used_memory",
71 .mode = S_IRUGO
72};
73
74static void ttm_mem_zone_kobj_release(struct kobject *kobj)
75{
76 struct ttm_mem_zone *zone =
77 container_of(kobj, struct ttm_mem_zone, kobj);
78
Joe Perches25d04792012-03-16 21:43:50 -070079 pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
80 zone->name, (unsigned long long)zone->used_mem >> 10);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +020081 kfree(zone);
82}
83
84static ssize_t ttm_mem_zone_show(struct kobject *kobj,
85 struct attribute *attr,
86 char *buffer)
87{
88 struct ttm_mem_zone *zone =
89 container_of(kobj, struct ttm_mem_zone, kobj);
90 uint64_t val = 0;
91
92 spin_lock(&zone->glob->lock);
93 if (attr == &ttm_mem_sys)
94 val = zone->zone_mem;
95 else if (attr == &ttm_mem_emer)
96 val = zone->emer_mem;
97 else if (attr == &ttm_mem_max)
98 val = zone->max_mem;
99 else if (attr == &ttm_mem_swap)
100 val = zone->swap_limit;
101 else if (attr == &ttm_mem_used)
102 val = zone->used_mem;
103 spin_unlock(&zone->glob->lock);
104
105 return snprintf(buffer, PAGE_SIZE, "%llu\n",
106 (unsigned long long) val >> 10);
107}
108
109static void ttm_check_swapping(struct ttm_mem_global *glob);
110
111static ssize_t ttm_mem_zone_store(struct kobject *kobj,
112 struct attribute *attr,
113 const char *buffer,
114 size_t size)
115{
116 struct ttm_mem_zone *zone =
117 container_of(kobj, struct ttm_mem_zone, kobj);
118 int chars;
119 unsigned long val;
120 uint64_t val64;
121
122 chars = sscanf(buffer, "%lu", &val);
123 if (chars == 0)
124 return size;
125
126 val64 = val;
127 val64 <<= 10;
128
129 spin_lock(&zone->glob->lock);
130 if (val64 > zone->zone_mem)
131 val64 = zone->zone_mem;
132 if (attr == &ttm_mem_emer) {
133 zone->emer_mem = val64;
134 if (zone->max_mem > val64)
135 zone->max_mem = val64;
136 } else if (attr == &ttm_mem_max) {
137 zone->max_mem = val64;
138 if (zone->emer_mem < val64)
139 zone->emer_mem = val64;
140 } else if (attr == &ttm_mem_swap)
141 zone->swap_limit = val64;
142 spin_unlock(&zone->glob->lock);
143
144 ttm_check_swapping(zone->glob);
145
146 return size;
147}
148
149static struct attribute *ttm_mem_zone_attrs[] = {
150 &ttm_mem_sys,
151 &ttm_mem_emer,
152 &ttm_mem_max,
153 &ttm_mem_swap,
154 &ttm_mem_used,
155 NULL
156};
157
Emese Revfy52cf25d2010-01-19 02:58:23 +0100158static const struct sysfs_ops ttm_mem_zone_ops = {
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200159 .show = &ttm_mem_zone_show,
160 .store = &ttm_mem_zone_store
161};
162
163static struct kobj_type ttm_mem_zone_kobj_type = {
164 .release = &ttm_mem_zone_kobj_release,
165 .sysfs_ops = &ttm_mem_zone_ops,
166 .default_attrs = ttm_mem_zone_attrs,
167};
168
169static void ttm_mem_global_kobj_release(struct kobject *kobj)
170{
171 struct ttm_mem_global *glob =
172 container_of(kobj, struct ttm_mem_global, kobj);
173
174 kfree(glob);
175}
176
177static struct kobj_type ttm_mem_glob_kobj_type = {
178 .release = &ttm_mem_global_kobj_release,
179};
180
181static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
182 bool from_wq, uint64_t extra)
183{
184 unsigned int i;
185 struct ttm_mem_zone *zone;
186 uint64_t target;
187
188 for (i = 0; i < glob->num_zones; ++i) {
189 zone = glob->zones[i];
190
191 if (from_wq)
192 target = zone->swap_limit;
193 else if (capable(CAP_SYS_ADMIN))
194 target = zone->emer_mem;
195 else
196 target = zone->max_mem;
197
198 target = (extra > target) ? 0ULL : target;
199
200 if (zone->used_mem > target)
201 return true;
202 }
203 return false;
204}
205
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200206/**
207 * At this point we only support a single shrink callback.
208 * Extend this if needed, perhaps using a linked list of callbacks.
209 * Note that this function is reentrant:
210 * many threads may try to swap out at any given time.
211 */
212
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200213static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
Roger Hedc947772017-12-21 17:42:53 +0800214 uint64_t extra, struct ttm_operation_ctx *ctx)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200215{
216 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200217
218 spin_lock(&glob->lock);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200219
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200220 while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200221 spin_unlock(&glob->lock);
Roger Hedc947772017-12-21 17:42:53 +0800222 ret = ttm_bo_swapout(glob->bo_glob, ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200223 spin_lock(&glob->lock);
224 if (unlikely(ret != 0))
Roger Hea6c26af2017-12-18 19:50:08 +0800225 break;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200226 }
Roger Hea6c26af2017-12-18 19:50:08 +0800227
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200228 spin_unlock(&glob->lock);
229}
230
231static void ttm_shrink_work(struct work_struct *work)
232{
Roger Hedc947772017-12-21 17:42:53 +0800233 struct ttm_operation_ctx ctx = {
234 .interruptible = false,
235 .no_wait_gpu = false
236 };
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200237 struct ttm_mem_global *glob =
238 container_of(work, struct ttm_mem_global, work);
239
Roger Hedc947772017-12-21 17:42:53 +0800240 ttm_shrink(glob, true, 0ULL, &ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200241}
242
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200243static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
244 const struct sysinfo *si)
245{
246 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
247 uint64_t mem;
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200248 int ret;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200249
250 if (unlikely(!zone))
251 return -ENOMEM;
252
253 mem = si->totalram - si->totalhigh;
254 mem *= si->mem_unit;
255
256 zone->name = "kernel";
257 zone->zone_mem = mem;
258 zone->max_mem = mem >> 1;
259 zone->emer_mem = (mem >> 1) + (mem >> 2);
260 zone->swap_limit = zone->max_mem - (mem >> 3);
261 zone->used_mem = 0;
262 zone->glob = glob;
263 glob->zone_kernel = zone;
Robert P. J. Dayb642ed02010-03-13 10:36:32 +0000264 ret = kobject_init_and_add(
265 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200266 if (unlikely(ret != 0)) {
267 kobject_put(&zone->kobj);
268 return ret;
269 }
270 glob->zones[glob->num_zones++] = zone;
271 return 0;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200272}
273
274#ifdef CONFIG_HIGHMEM
275static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
276 const struct sysinfo *si)
277{
Dan Carpenter46a79fa2009-11-28 12:30:32 +0200278 struct ttm_mem_zone *zone;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200279 uint64_t mem;
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200280 int ret;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200281
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200282 if (si->totalhigh == 0)
283 return 0;
284
Dan Carpenter46a79fa2009-11-28 12:30:32 +0200285 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
286 if (unlikely(!zone))
287 return -ENOMEM;
288
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200289 mem = si->totalram;
290 mem *= si->mem_unit;
291
292 zone->name = "highmem";
293 zone->zone_mem = mem;
294 zone->max_mem = mem >> 1;
295 zone->emer_mem = (mem >> 1) + (mem >> 2);
296 zone->swap_limit = zone->max_mem - (mem >> 3);
297 zone->used_mem = 0;
298 zone->glob = glob;
299 glob->zone_highmem = zone;
Robert P. J. Dayb642ed02010-03-13 10:36:32 +0000300 ret = kobject_init_and_add(
Kees Cook109ab902014-09-11 13:53:54 -0700301 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
302 zone->name);
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200303 if (unlikely(ret != 0)) {
304 kobject_put(&zone->kobj);
305 return ret;
306 }
307 glob->zones[glob->num_zones++] = zone;
308 return 0;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200309}
310#else
311static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
312 const struct sysinfo *si)
313{
314 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
315 uint64_t mem;
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200316 int ret;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200317
318 if (unlikely(!zone))
319 return -ENOMEM;
320
321 mem = si->totalram;
322 mem *= si->mem_unit;
323
324 /**
325 * No special dma32 zone needed.
326 */
327
Dave Airlieec42a6e2009-12-08 15:58:08 +1000328 if (mem <= ((uint64_t) 1ULL << 32)) {
329 kfree(zone);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200330 return 0;
Dave Airlieec42a6e2009-12-08 15:58:08 +1000331 }
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200332
333 /*
334 * Limit max dma32 memory to 4GB for now
335 * until we can figure out how big this
336 * zone really is.
337 */
338
339 mem = ((uint64_t) 1ULL << 32);
340 zone->name = "dma32";
341 zone->zone_mem = mem;
342 zone->max_mem = mem >> 1;
343 zone->emer_mem = (mem >> 1) + (mem >> 2);
344 zone->swap_limit = zone->max_mem - (mem >> 3);
345 zone->used_mem = 0;
346 zone->glob = glob;
347 glob->zone_dma32 = zone;
Robert P. J. Dayb642ed02010-03-13 10:36:32 +0000348 ret = kobject_init_and_add(
349 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200350 if (unlikely(ret != 0)) {
351 kobject_put(&zone->kobj);
352 return ret;
353 }
354 glob->zones[glob->num_zones++] = zone;
355 return 0;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200356}
357#endif
358
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200359int ttm_mem_global_init(struct ttm_mem_global *glob)
360{
361 struct sysinfo si;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200362 int ret;
363 int i;
364 struct ttm_mem_zone *zone;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200365
366 spin_lock_init(&glob->lock);
367 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
368 INIT_WORK(&glob->work, ttm_shrink_work);
Robert P. J. Dayb642ed02010-03-13 10:36:32 +0000369 ret = kobject_init_and_add(
370 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
Thomas Hellstrom759e4f832009-08-20 10:29:09 +0200371 if (unlikely(ret != 0)) {
372 kobject_put(&glob->kobj);
373 return ret;
374 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200375
376 si_meminfo(&si);
377
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200378 ret = ttm_mem_init_kernel_zone(glob, &si);
379 if (unlikely(ret != 0))
380 goto out_no_zone;
381#ifdef CONFIG_HIGHMEM
382 ret = ttm_mem_init_highmem_zone(glob, &si);
383 if (unlikely(ret != 0))
384 goto out_no_zone;
385#else
386 ret = ttm_mem_init_dma32_zone(glob, &si);
387 if (unlikely(ret != 0))
388 goto out_no_zone;
389#endif
390 for (i = 0; i < glob->num_zones; ++i) {
391 zone = glob->zones[i];
Joe Perches25d04792012-03-16 21:43:50 -0700392 pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
393 zone->name, (unsigned long long)zone->max_mem >> 10);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200394 }
Pauli Nieminenc96af792010-04-01 12:45:03 +0000395 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -0400396 ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200397 return 0;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200398out_no_zone:
399 ttm_mem_global_release(glob);
400 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200401}
402EXPORT_SYMBOL(ttm_mem_global_init);
403
404void ttm_mem_global_release(struct ttm_mem_global *glob)
405{
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200406 unsigned int i;
407 struct ttm_mem_zone *zone;
408
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000409 /* let the page allocator first stop the shrink work. */
410 ttm_page_alloc_fini();
Konrad Rzeszutek Wilk2334b752011-11-03 16:46:34 -0400411 ttm_dma_page_alloc_fini();
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000412
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200413 flush_workqueue(glob->swap_queue);
414 destroy_workqueue(glob->swap_queue);
415 glob->swap_queue = NULL;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200416 for (i = 0; i < glob->num_zones; ++i) {
417 zone = glob->zones[i];
418 kobject_del(&zone->kobj);
419 kobject_put(&zone->kobj);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000420 }
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200421 kobject_del(&glob->kobj);
422 kobject_put(&glob->kobj);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200423}
424EXPORT_SYMBOL(ttm_mem_global_release);
425
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200426static void ttm_check_swapping(struct ttm_mem_global *glob)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200427{
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200428 bool needs_swapping = false;
429 unsigned int i;
430 struct ttm_mem_zone *zone;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200431
432 spin_lock(&glob->lock);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200433 for (i = 0; i < glob->num_zones; ++i) {
434 zone = glob->zones[i];
435 if (zone->used_mem > zone->swap_limit) {
436 needs_swapping = true;
437 break;
438 }
439 }
440
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200441 spin_unlock(&glob->lock);
442
443 if (unlikely(needs_swapping))
444 (void)queue_work(glob->swap_queue, &glob->work);
445
446}
447
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200448static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
449 struct ttm_mem_zone *single_zone,
450 uint64_t amount)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200451{
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200452 unsigned int i;
453 struct ttm_mem_zone *zone;
454
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200455 spin_lock(&glob->lock);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200456 for (i = 0; i < glob->num_zones; ++i) {
457 zone = glob->zones[i];
458 if (single_zone && zone != single_zone)
459 continue;
460 zone->used_mem -= amount;
461 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200462 spin_unlock(&glob->lock);
463}
464
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200465void ttm_mem_global_free(struct ttm_mem_global *glob,
466 uint64_t amount)
467{
468 return ttm_mem_global_free_zone(glob, NULL, amount);
469}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100470EXPORT_SYMBOL(ttm_mem_global_free);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200471
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200472static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200473 struct ttm_mem_zone *single_zone,
474 uint64_t amount, bool reserve)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200475{
476 uint64_t limit;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200477 int ret = -ENOMEM;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200478 unsigned int i;
479 struct ttm_mem_zone *zone;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200480
481 spin_lock(&glob->lock);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200482 for (i = 0; i < glob->num_zones; ++i) {
483 zone = glob->zones[i];
484 if (single_zone && zone != single_zone)
485 continue;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200486
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200487 limit = (capable(CAP_SYS_ADMIN)) ?
488 zone->emer_mem : zone->max_mem;
489
490 if (zone->used_mem > limit)
491 goto out_unlock;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200492 }
493
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200494 if (reserve) {
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200495 for (i = 0; i < glob->num_zones; ++i) {
496 zone = glob->zones[i];
497 if (single_zone && zone != single_zone)
498 continue;
499 zone->used_mem += amount;
500 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200501 }
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200502
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200503 ret = 0;
504out_unlock:
505 spin_unlock(&glob->lock);
506 ttm_check_swapping(glob);
507
508 return ret;
509}
510
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200511
512static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
513 struct ttm_mem_zone *single_zone,
514 uint64_t memory,
Roger He279c01f2017-12-08 15:09:50 +0800515 struct ttm_operation_ctx *ctx)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200516{
517 int count = TTM_MEMORY_ALLOC_RETRIES;
518
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200519 while (unlikely(ttm_mem_global_reserve(glob,
520 single_zone,
521 memory, true)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200522 != 0)) {
Roger He279c01f2017-12-08 15:09:50 +0800523 if (ctx->no_wait_gpu)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200524 return -ENOMEM;
525 if (unlikely(count-- == 0))
526 return -ENOMEM;
Roger Hedc947772017-12-21 17:42:53 +0800527 ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200528 }
529
530 return 0;
531}
532
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200533int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
Roger He279c01f2017-12-08 15:09:50 +0800534 struct ttm_operation_ctx *ctx)
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200535{
536 /**
537 * Normal allocations of kernel memory are registered in
538 * all zones.
539 */
540
Roger He279c01f2017-12-08 15:09:50 +0800541 return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200542}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100543EXPORT_SYMBOL(ttm_mem_global_alloc);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200544
545int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
Roger He9de2fb92017-12-08 15:21:18 +0800546 struct page *page, uint64_t size,
547 struct ttm_operation_ctx *ctx)
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200548{
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200549 struct ttm_mem_zone *zone = NULL;
550
551 /**
552 * Page allocations may be registed in a single zone
553 * only if highmem or !dma32.
554 */
555
556#ifdef CONFIG_HIGHMEM
557 if (PageHighMem(page) && glob->zone_highmem != NULL)
558 zone = glob->zone_highmem;
559#else
560 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
561 zone = glob->zone_kernel;
562#endif
Roger He9de2fb92017-12-08 15:21:18 +0800563 return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200564}
565
Christian Königd188bfa2017-07-04 16:56:24 +0200566void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
567 uint64_t size)
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200568{
569 struct ttm_mem_zone *zone = NULL;
570
571#ifdef CONFIG_HIGHMEM
572 if (PageHighMem(page) && glob->zone_highmem != NULL)
573 zone = glob->zone_highmem;
574#else
575 if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
576 zone = glob->zone_kernel;
577#endif
Christian Königd188bfa2017-07-04 16:56:24 +0200578 ttm_mem_global_free_zone(glob, zone, size);
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200579}
580
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200581size_t ttm_round_pot(size_t size)
582{
583 if ((size & (size - 1)) == 0)
584 return size;
585 else if (size > PAGE_SIZE)
586 return PAGE_ALIGN(size);
587 else {
588 size_t tmp_size = 4;
589
590 while (tmp_size < size)
591 tmp_size <<= 1;
592
593 return tmp_size;
594 }
595 return 0;
596}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100597EXPORT_SYMBOL(ttm_round_pot);
Ken Wangecf6a632016-07-27 19:12:15 +0800598
599uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
600{
601 return glob->zone_kernel->max_mem;
602}
603EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);