blob: 971133106ec29c9ae1dbf5cacb0abc0ac3badf98 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Joe Perches25d04792012-03-16 21:43:50 -070031#define pr_fmt(fmt) "[TTM] " fmt
32
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020033#include <linux/sched.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020034#include <linux/pagemap.h>
Hugh Dickins3142b652011-06-27 16:18:17 -070035#include <linux/shmem_fs.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020036#include <linux/file.h>
David Howells760285e2012-10-02 18:01:07 +010037#include <drm/drm_cache.h>
David Howells760285e2012-10-02 18:01:07 +010038#include <drm/ttm/ttm_bo_driver.h>
David Howells760285e2012-10-02 18:01:07 +010039#include <drm/ttm/ttm_page_alloc.h>
Laura Abbotted3ba072017-05-08 15:58:17 -070040#ifdef CONFIG_X86
41#include <asm/set_memory.h>
42#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020043
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020044/**
Christian König97b7e1b2018-02-22 08:54:57 +010045 * Allocates a ttm structure for the given BO.
46 */
47int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
48{
49 struct ttm_bo_device *bdev = bo->bdev;
Christian König97b7e1b2018-02-22 08:54:57 +010050 uint32_t page_flags = 0;
51
52 reservation_object_assert_held(bo->resv);
Christian König97b7e1b2018-02-22 08:54:57 +010053
54 if (bdev->need_dma32)
55 page_flags |= TTM_PAGE_FLAG_DMA32;
56
57 if (bdev->no_retry)
58 page_flags |= TTM_PAGE_FLAG_NO_RETRY;
59
60 switch (bo->type) {
61 case ttm_bo_type_device:
62 if (zero_alloc)
63 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
Christian König45a9d152018-02-22 08:54:57 +010064 break;
Christian König97b7e1b2018-02-22 08:54:57 +010065 case ttm_bo_type_kernel:
Christian König97b7e1b2018-02-22 08:54:57 +010066 break;
67 case ttm_bo_type_sg:
Christian König45a9d152018-02-22 08:54:57 +010068 page_flags |= TTM_PAGE_FLAG_SG;
Christian König97b7e1b2018-02-22 08:54:57 +010069 break;
70 default:
Christian König45a9d152018-02-22 08:54:57 +010071 bo->ttm = NULL;
Christian König97b7e1b2018-02-22 08:54:57 +010072 pr_err("Illegal buffer object type\n");
Christian König45a9d152018-02-22 08:54:57 +010073 return -EINVAL;
Christian König97b7e1b2018-02-22 08:54:57 +010074 }
75
Christian König45a9d152018-02-22 08:54:57 +010076 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
77 page_flags);
78 if (unlikely(bo->ttm == NULL))
79 return -ENOMEM;
80
81 if (bo->type == ttm_bo_type_sg)
82 bo->ttm->sg = bo->sg;
83
84 return 0;
Christian König97b7e1b2018-02-22 08:54:57 +010085}
86
87/**
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020088 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020089 */
Tom St Denis5b4262d2018-01-25 13:24:03 -050090static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020091{
Michal Hocko20981052017-05-17 14:23:12 +020092 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
93 GFP_KERNEL | __GFP_ZERO);
Tom St Denis5b4262d2018-01-25 13:24:03 -050094 if (!ttm->pages)
95 return -ENOMEM;
96 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020097}
98
Tom St Denis5b4262d2018-01-25 13:24:03 -050099static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200100{
Michal Hocko20981052017-05-17 14:23:12 +0200101 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
Alexandre Courbot3d50d4d2014-08-04 18:28:54 +0900102 sizeof(*ttm->ttm.pages) +
Michal Hocko20981052017-05-17 14:23:12 +0200103 sizeof(*ttm->dma_address),
104 GFP_KERNEL | __GFP_ZERO);
Tom St Denis5b4262d2018-01-25 13:24:03 -0500105 if (!ttm->ttm.pages)
106 return -ENOMEM;
Alexandre Courbotaf1f85d2016-09-16 18:32:26 +0900107 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
Tom St Denis5b4262d2018-01-25 13:24:03 -0500108 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200109}
110
Christian König75a57662018-02-23 15:12:00 +0100111static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
112{
113 ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
114 sizeof(*ttm->dma_address),
115 GFP_KERNEL | __GFP_ZERO);
116 if (!ttm->dma_address)
117 return -ENOMEM;
118 return 0;
119}
120
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200121#ifdef CONFIG_X86
122static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000123 enum ttm_caching_state c_old,
124 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200125{
Francisco Jerezdb78e272010-01-12 18:49:43 +0100126 int ret = 0;
127
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200128 if (PageHighMem(p))
129 return 0;
130
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000131 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +0100132 /* p isn't in the default caching state, set it to
133 * writeback first to free its current memtype. */
134
135 ret = set_pages_wb(p, 1);
136 if (ret)
137 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200138 }
Francisco Jerezdb78e272010-01-12 18:49:43 +0100139
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000140 if (c_new == tt_wc)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100141 ret = set_memory_wc((unsigned long) page_address(p), 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000142 else if (c_new == tt_uncached)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100143 ret = set_pages_uc(p, 1);
144
145 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200146}
147#else /* CONFIG_X86 */
148static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000149 enum ttm_caching_state c_old,
150 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200151{
152 return 0;
153}
154#endif /* CONFIG_X86 */
155
156/*
157 * Change caching policy for the linear kernel map
158 * for range of pages in a ttm.
159 */
160
161static int ttm_tt_set_caching(struct ttm_tt *ttm,
162 enum ttm_caching_state c_state)
163{
164 int i, j;
165 struct page *cur_page;
166 int ret;
167
168 if (ttm->caching_state == c_state)
169 return 0;
170
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000171 if (ttm->state == tt_unpopulated) {
172 /* Change caching but don't populate */
173 ttm->caching_state = c_state;
174 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200175 }
176
177 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000178 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200179
180 for (i = 0; i < ttm->num_pages; ++i) {
181 cur_page = ttm->pages[i];
182 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000183 ret = ttm_tt_set_page_caching(cur_page,
184 ttm->caching_state,
185 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200186 if (unlikely(ret != 0))
187 goto out_err;
188 }
189 }
190
191 ttm->caching_state = c_state;
192
193 return 0;
194
195out_err:
196 for (j = 0; j < i; ++j) {
197 cur_page = ttm->pages[j];
198 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000199 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200200 ttm->caching_state);
201 }
202 }
203
204 return ret;
205}
206
207int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
208{
209 enum ttm_caching_state state;
210
211 if (placement & TTM_PL_FLAG_WC)
212 state = tt_wc;
213 else if (placement & TTM_PL_FLAG_UNCACHED)
214 state = tt_uncached;
215 else
216 state = tt_cached;
217
218 return ttm_tt_set_caching(ttm, state);
219}
Dave Airliedf67bed2009-10-30 13:31:26 +1000220EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200221
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200222void ttm_tt_destroy(struct ttm_tt *ttm)
223{
Christian König4279cb12016-06-06 10:17:51 +0200224 if (ttm == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200225 return;
226
Christian König2ff2bf12016-07-21 12:18:19 +0200227 ttm_tt_unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200228
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100229 if (ttm->state == tt_unbound)
230 ttm_tt_unpopulate(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200231
Jan Engelhardt5df23972011-04-04 01:25:18 +0200232 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200233 ttm->swap_storage)
234 fput(ttm->swap_storage);
235
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400236 ttm->swap_storage = NULL;
237 ttm->func->destroy(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200238}
239
Christian König75a57662018-02-23 15:12:00 +0100240void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
241 unsigned long size, uint32_t page_flags)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200242{
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400243 ttm->bdev = bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200244 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200245 ttm->caching_state = tt_cached;
246 ttm->page_flags = page_flags;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400247 ttm->state = tt_unpopulated;
Jerome Glissedea7e0a2012-01-03 17:37:37 -0500248 ttm->swap_storage = NULL;
Christian König75a57662018-02-23 15:12:00 +0100249}
250
251int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
252 unsigned long size, uint32_t page_flags)
253{
254 ttm_tt_init_fields(ttm, bdev, size, page_flags);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200255
Tom St Denis5b4262d2018-01-25 13:24:03 -0500256 if (ttm_tt_alloc_page_directory(ttm)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200257 ttm_tt_destroy(ttm);
Joe Perches25d04792012-03-16 21:43:50 -0700258 pr_err("Failed allocating page table\n");
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400259 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200260 }
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400261 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200262}
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400263EXPORT_SYMBOL(ttm_tt_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200264
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500265void ttm_tt_fini(struct ttm_tt *ttm)
266{
Michal Hocko20981052017-05-17 14:23:12 +0200267 kvfree(ttm->pages);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500268 ttm->pages = NULL;
269}
270EXPORT_SYMBOL(ttm_tt_fini);
271
272int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
Christian König231cdaf2018-02-21 20:34:13 +0100273 unsigned long size, uint32_t page_flags)
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500274{
275 struct ttm_tt *ttm = &ttm_dma->ttm;
276
Christian König75a57662018-02-23 15:12:00 +0100277 ttm_tt_init_fields(ttm, bdev, size, page_flags);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500278
279 INIT_LIST_HEAD(&ttm_dma->pages_list);
Tom St Denis5b4262d2018-01-25 13:24:03 -0500280 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500281 ttm_tt_destroy(ttm);
Joe Perches25d04792012-03-16 21:43:50 -0700282 pr_err("Failed allocating page table\n");
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500283 return -ENOMEM;
284 }
285 return 0;
286}
287EXPORT_SYMBOL(ttm_dma_tt_init);
288
Christian König75a57662018-02-23 15:12:00 +0100289int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
290 unsigned long size, uint32_t page_flags)
291{
292 struct ttm_tt *ttm = &ttm_dma->ttm;
293 int ret;
294
295 ttm_tt_init_fields(ttm, bdev, size, page_flags);
296
297 INIT_LIST_HEAD(&ttm_dma->pages_list);
298 if (page_flags & TTM_PAGE_FLAG_SG)
299 ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
300 else
301 ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
302 if (ret) {
303 ttm_tt_destroy(ttm);
304 pr_err("Failed allocating page table\n");
305 return -ENOMEM;
306 }
307 return 0;
308}
309EXPORT_SYMBOL(ttm_sg_tt_init);
310
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500311void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
312{
313 struct ttm_tt *ttm = &ttm_dma->ttm;
314
Christian König75a57662018-02-23 15:12:00 +0100315 if (ttm->pages)
316 kvfree(ttm->pages);
317 else
318 kvfree(ttm_dma->dma_address);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500319 ttm->pages = NULL;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500320 ttm_dma->dma_address = NULL;
321}
322EXPORT_SYMBOL(ttm_dma_tt_fini);
323
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200324void ttm_tt_unbind(struct ttm_tt *ttm)
325{
326 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200327
328 if (ttm->state == tt_bound) {
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400329 ret = ttm->func->unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200330 BUG_ON(ret);
331 ttm->state = tt_unbound;
332 }
333}
334
Roger He993baf12017-12-21 17:42:51 +0800335int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
336 struct ttm_operation_ctx *ctx)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200337{
338 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200339
340 if (!ttm)
341 return -EINVAL;
342
343 if (ttm->state == tt_bound)
344 return 0;
345
Christian König25893a12018-02-01 14:39:29 +0100346 ret = ttm_tt_populate(ttm, ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200347 if (ret)
348 return ret;
349
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400350 ret = ttm->func->bind(ttm, bo_mem);
Thomas Hellstrom7dcebb52010-10-29 10:46:49 +0200351 if (unlikely(ret != 0))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200352 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200353
354 ttm->state = tt_bound;
355
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200356 return 0;
357}
358EXPORT_SYMBOL(ttm_tt_bind);
359
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400360int ttm_tt_swapin(struct ttm_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200361{
362 struct address_space *swap_space;
363 struct file *swap_storage;
364 struct page *from_page;
365 struct page *to_page;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200366 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100367 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200368
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200369 swap_storage = ttm->swap_storage;
370 BUG_ON(swap_storage == NULL);
371
Al Viro93c76a32015-12-04 23:45:44 -0500372 swap_space = swap_storage->f_mapping;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200373
374 for (i = 0; i < ttm->num_pages; ++i) {
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500375 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
376
377 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
378 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
379
Maarten Maathuis290e55052010-02-20 03:22:21 +0100380 if (IS_ERR(from_page)) {
381 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200382 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100383 }
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400384 to_page = ttm->pages[i];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200385 if (unlikely(to_page == NULL))
386 goto out_err;
387
Akinobu Mita259a2902012-09-25 11:57:02 +0000388 copy_highpage(to_page, from_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300389 put_page(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200390 }
391
Jan Engelhardt5df23972011-04-04 01:25:18 +0200392 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200393 fput(swap_storage);
394 ttm->swap_storage = NULL;
395 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
396
397 return 0;
398out_err:
Maarten Maathuis290e55052010-02-20 03:22:21 +0100399 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200400}
401
Jan Engelhardt5df23972011-04-04 01:25:18 +0200402int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200403{
404 struct address_space *swap_space;
405 struct file *swap_storage;
406 struct page *from_page;
407 struct page *to_page;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200408 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100409 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200410
411 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
412 BUG_ON(ttm->caching_state != tt_cached);
413
Jan Engelhardt5df23972011-04-04 01:25:18 +0200414 if (!persistent_swap_storage) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200415 swap_storage = shmem_file_setup("ttm swap",
416 ttm->num_pages << PAGE_SHIFT,
417 0);
Viresh Kumar55579cf2015-07-31 14:08:24 +0530418 if (IS_ERR(swap_storage)) {
Joe Perches25d04792012-03-16 21:43:50 -0700419 pr_err("Failed allocating swap storage\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100420 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200421 }
Tom St Denisddde9852018-01-25 13:29:42 -0500422 } else {
Jan Engelhardt5df23972011-04-04 01:25:18 +0200423 swap_storage = persistent_swap_storage;
Tom St Denisddde9852018-01-25 13:29:42 -0500424 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200425
Al Viro93c76a32015-12-04 23:45:44 -0500426 swap_space = swap_storage->f_mapping;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200427
428 for (i = 0; i < ttm->num_pages; ++i) {
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500429 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
430
431 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
432
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200433 from_page = ttm->pages[i];
434 if (unlikely(from_page == NULL))
435 continue;
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500436
437 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
Viresh Kumar55579cf2015-07-31 14:08:24 +0530438 if (IS_ERR(to_page)) {
Maarten Maathuis290e55052010-02-20 03:22:21 +0100439 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200440 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100441 }
Akinobu Mita259a2902012-09-25 11:57:02 +0000442 copy_highpage(to_page, from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200443 set_page_dirty(to_page);
444 mark_page_accessed(to_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300445 put_page(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200446 }
447
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100448 ttm_tt_unpopulate(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200449 ttm->swap_storage = swap_storage;
450 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
Jan Engelhardt5df23972011-04-04 01:25:18 +0200451 if (persistent_swap_storage)
452 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200453
454 return 0;
455out_err:
Jan Engelhardt5df23972011-04-04 01:25:18 +0200456 if (!persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200457 fput(swap_storage);
458
Maarten Maathuis290e55052010-02-20 03:22:21 +0100459 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200460}
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100461
Christian Königec929372018-02-01 14:52:50 +0100462static void ttm_tt_add_mapping(struct ttm_tt *ttm)
463{
464 pgoff_t i;
465
466 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
467 return;
468
469 for (i = 0; i < ttm->num_pages; ++i)
470 ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
471}
472
Christian König25893a12018-02-01 14:39:29 +0100473int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
474{
Christian Königec929372018-02-01 14:52:50 +0100475 int ret;
476
Christian König25893a12018-02-01 14:39:29 +0100477 if (ttm->state != tt_unpopulated)
478 return 0;
479
Christian Könige44fcf72018-02-22 12:00:05 +0100480 if (ttm->bdev->driver->ttm_tt_populate)
481 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
482 else
483 ret = ttm_pool_populate(ttm, ctx);
Christian Königec929372018-02-01 14:52:50 +0100484 if (!ret)
485 ttm_tt_add_mapping(ttm);
486 return ret;
Christian König25893a12018-02-01 14:39:29 +0100487}
488
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100489static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
490{
491 pgoff_t i;
492 struct page **page = ttm->pages;
493
Thomas Hellstrom1b76af52014-02-05 09:18:26 +0100494 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
495 return;
496
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100497 for (i = 0; i < ttm->num_pages; ++i) {
498 (*page)->mapping = NULL;
499 (*page++)->index = 0;
500 }
501}
502
503void ttm_tt_unpopulate(struct ttm_tt *ttm)
504{
505 if (ttm->state == tt_unpopulated)
506 return;
507
508 ttm_tt_clear_mapping(ttm);
Christian Könige44fcf72018-02-22 12:00:05 +0100509 if (ttm->bdev->driver->ttm_tt_unpopulate)
510 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
511 else
512 ttm_pool_unpopulate(ttm);
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100513}