blob: f93cd108b19de7e48da4ec478ce8fa32dff34baf [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Joe Perches25d04792012-03-16 21:43:50 -070031#define pr_fmt(fmt) "[TTM] " fmt
32
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020033#include <linux/sched.h>
34#include <linux/highmem.h>
35#include <linux/pagemap.h>
Hugh Dickins3142b652011-06-27 16:18:17 -070036#include <linux/shmem_fs.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020037#include <linux/file.h>
38#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040040#include <linux/export.h>
David Howells760285e2012-10-02 18:01:07 +010041#include <drm/drm_cache.h>
David Howells760285e2012-10-02 18:01:07 +010042#include <drm/ttm/ttm_module.h>
43#include <drm/ttm/ttm_bo_driver.h>
44#include <drm/ttm/ttm_placement.h>
45#include <drm/ttm/ttm_page_alloc.h>
Laura Abbotted3ba072017-05-08 15:58:17 -070046#ifdef CONFIG_X86
47#include <asm/set_memory.h>
48#endif
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020049
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020050/**
51 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020052 */
Tom St Denis5b4262d2018-01-25 13:24:03 -050053static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020054{
Michal Hocko20981052017-05-17 14:23:12 +020055 ttm->pages = kvmalloc_array(ttm->num_pages, sizeof(void*),
56 GFP_KERNEL | __GFP_ZERO);
Tom St Denis5b4262d2018-01-25 13:24:03 -050057 if (!ttm->pages)
58 return -ENOMEM;
59 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020060}
61
Tom St Denis5b4262d2018-01-25 13:24:03 -050062static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020063{
Michal Hocko20981052017-05-17 14:23:12 +020064 ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
Alexandre Courbot3d50d4d2014-08-04 18:28:54 +090065 sizeof(*ttm->ttm.pages) +
Michal Hocko20981052017-05-17 14:23:12 +020066 sizeof(*ttm->dma_address),
67 GFP_KERNEL | __GFP_ZERO);
Tom St Denis5b4262d2018-01-25 13:24:03 -050068 if (!ttm->ttm.pages)
69 return -ENOMEM;
Alexandre Courbotaf1f85d2016-09-16 18:32:26 +090070 ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
Tom St Denis5b4262d2018-01-25 13:24:03 -050071 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020072}
73
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020074#ifdef CONFIG_X86
75static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +100076 enum ttm_caching_state c_old,
77 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020078{
Francisco Jerezdb78e272010-01-12 18:49:43 +010079 int ret = 0;
80
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020081 if (PageHighMem(p))
82 return 0;
83
Francisco Jerezf0e2f382010-02-20 07:30:15 +100084 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +010085 /* p isn't in the default caching state, set it to
86 * writeback first to free its current memtype. */
87
88 ret = set_pages_wb(p, 1);
89 if (ret)
90 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020091 }
Francisco Jerezdb78e272010-01-12 18:49:43 +010092
Francisco Jerezf0e2f382010-02-20 07:30:15 +100093 if (c_new == tt_wc)
Francisco Jerezdb78e272010-01-12 18:49:43 +010094 ret = set_memory_wc((unsigned long) page_address(p), 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +100095 else if (c_new == tt_uncached)
Francisco Jerezdb78e272010-01-12 18:49:43 +010096 ret = set_pages_uc(p, 1);
97
98 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020099}
100#else /* CONFIG_X86 */
101static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000102 enum ttm_caching_state c_old,
103 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200104{
105 return 0;
106}
107#endif /* CONFIG_X86 */
108
109/*
110 * Change caching policy for the linear kernel map
111 * for range of pages in a ttm.
112 */
113
114static int ttm_tt_set_caching(struct ttm_tt *ttm,
115 enum ttm_caching_state c_state)
116{
117 int i, j;
118 struct page *cur_page;
119 int ret;
120
121 if (ttm->caching_state == c_state)
122 return 0;
123
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000124 if (ttm->state == tt_unpopulated) {
125 /* Change caching but don't populate */
126 ttm->caching_state = c_state;
127 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200128 }
129
130 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000131 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200132
133 for (i = 0; i < ttm->num_pages; ++i) {
134 cur_page = ttm->pages[i];
135 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000136 ret = ttm_tt_set_page_caching(cur_page,
137 ttm->caching_state,
138 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200139 if (unlikely(ret != 0))
140 goto out_err;
141 }
142 }
143
144 ttm->caching_state = c_state;
145
146 return 0;
147
148out_err:
149 for (j = 0; j < i; ++j) {
150 cur_page = ttm->pages[j];
151 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000152 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200153 ttm->caching_state);
154 }
155 }
156
157 return ret;
158}
159
160int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
161{
162 enum ttm_caching_state state;
163
164 if (placement & TTM_PL_FLAG_WC)
165 state = tt_wc;
166 else if (placement & TTM_PL_FLAG_UNCACHED)
167 state = tt_uncached;
168 else
169 state = tt_cached;
170
171 return ttm_tt_set_caching(ttm, state);
172}
Dave Airliedf67bed2009-10-30 13:31:26 +1000173EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200174
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200175void ttm_tt_destroy(struct ttm_tt *ttm)
176{
Christian König4279cb12016-06-06 10:17:51 +0200177 if (ttm == NULL)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200178 return;
179
Christian König2ff2bf12016-07-21 12:18:19 +0200180 ttm_tt_unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200181
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100182 if (ttm->state == tt_unbound)
183 ttm_tt_unpopulate(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200184
Jan Engelhardt5df23972011-04-04 01:25:18 +0200185 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200186 ttm->swap_storage)
187 fput(ttm->swap_storage);
188
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400189 ttm->swap_storage = NULL;
190 ttm->func->destroy(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200191}
192
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400193int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
Christian König231cdaf2018-02-21 20:34:13 +0100194 unsigned long size, uint32_t page_flags)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200195{
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400196 ttm->bdev = bdev;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200197 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200198 ttm->caching_state = tt_cached;
199 ttm->page_flags = page_flags;
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400200 ttm->state = tt_unpopulated;
Jerome Glissedea7e0a2012-01-03 17:37:37 -0500201 ttm->swap_storage = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200202
Tom St Denis5b4262d2018-01-25 13:24:03 -0500203 if (ttm_tt_alloc_page_directory(ttm)) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200204 ttm_tt_destroy(ttm);
Joe Perches25d04792012-03-16 21:43:50 -0700205 pr_err("Failed allocating page table\n");
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400206 return -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200207 }
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400208 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200209}
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400210EXPORT_SYMBOL(ttm_tt_init);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200211
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500212void ttm_tt_fini(struct ttm_tt *ttm)
213{
Michal Hocko20981052017-05-17 14:23:12 +0200214 kvfree(ttm->pages);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500215 ttm->pages = NULL;
216}
217EXPORT_SYMBOL(ttm_tt_fini);
218
219int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
Christian König231cdaf2018-02-21 20:34:13 +0100220 unsigned long size, uint32_t page_flags)
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500221{
222 struct ttm_tt *ttm = &ttm_dma->ttm;
223
224 ttm->bdev = bdev;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500225 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
226 ttm->caching_state = tt_cached;
227 ttm->page_flags = page_flags;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500228 ttm->state = tt_unpopulated;
Jerome Glissedea7e0a2012-01-03 17:37:37 -0500229 ttm->swap_storage = NULL;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500230
231 INIT_LIST_HEAD(&ttm_dma->pages_list);
Tom St Denis5b4262d2018-01-25 13:24:03 -0500232 if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500233 ttm_tt_destroy(ttm);
Joe Perches25d04792012-03-16 21:43:50 -0700234 pr_err("Failed allocating page table\n");
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500235 return -ENOMEM;
236 }
237 return 0;
238}
239EXPORT_SYMBOL(ttm_dma_tt_init);
240
241void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
242{
243 struct ttm_tt *ttm = &ttm_dma->ttm;
244
Michal Hocko20981052017-05-17 14:23:12 +0200245 kvfree(ttm->pages);
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500246 ttm->pages = NULL;
Jerome Glisse8e7e7052011-11-09 17:15:26 -0500247 ttm_dma->dma_address = NULL;
248}
249EXPORT_SYMBOL(ttm_dma_tt_fini);
250
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200251void ttm_tt_unbind(struct ttm_tt *ttm)
252{
253 int ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200254
255 if (ttm->state == tt_bound) {
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400256 ret = ttm->func->unbind(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200257 BUG_ON(ret);
258 ttm->state = tt_unbound;
259 }
260}
261
Roger He993baf12017-12-21 17:42:51 +0800262int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
263 struct ttm_operation_ctx *ctx)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200264{
265 int ret = 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200266
267 if (!ttm)
268 return -EINVAL;
269
270 if (ttm->state == tt_bound)
271 return 0;
272
Christian König25893a12018-02-01 14:39:29 +0100273 ret = ttm_tt_populate(ttm, ctx);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200274 if (ret)
275 return ret;
276
Jerome Glisse649bf3c2011-11-01 20:46:13 -0400277 ret = ttm->func->bind(ttm, bo_mem);
Thomas Hellstrom7dcebb52010-10-29 10:46:49 +0200278 if (unlikely(ret != 0))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200279 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200280
281 ttm->state = tt_bound;
282
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200283 return 0;
284}
285EXPORT_SYMBOL(ttm_tt_bind);
286
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400287int ttm_tt_swapin(struct ttm_tt *ttm)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200288{
289 struct address_space *swap_space;
290 struct file *swap_storage;
291 struct page *from_page;
292 struct page *to_page;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200293 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100294 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200295
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200296 swap_storage = ttm->swap_storage;
297 BUG_ON(swap_storage == NULL);
298
Al Viro93c76a32015-12-04 23:45:44 -0500299 swap_space = swap_storage->f_mapping;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200300
301 for (i = 0; i < ttm->num_pages; ++i) {
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500302 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
303
304 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
305 from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
306
Maarten Maathuis290e55052010-02-20 03:22:21 +0100307 if (IS_ERR(from_page)) {
308 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200309 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100310 }
Jerome Glisseb1e5f172011-11-02 23:59:28 -0400311 to_page = ttm->pages[i];
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200312 if (unlikely(to_page == NULL))
313 goto out_err;
314
Akinobu Mita259a2902012-09-25 11:57:02 +0000315 copy_highpage(to_page, from_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300316 put_page(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200317 }
318
Jan Engelhardt5df23972011-04-04 01:25:18 +0200319 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200320 fput(swap_storage);
321 ttm->swap_storage = NULL;
322 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
323
324 return 0;
325out_err:
Maarten Maathuis290e55052010-02-20 03:22:21 +0100326 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200327}
328
Jan Engelhardt5df23972011-04-04 01:25:18 +0200329int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200330{
331 struct address_space *swap_space;
332 struct file *swap_storage;
333 struct page *from_page;
334 struct page *to_page;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200335 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100336 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200337
338 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
339 BUG_ON(ttm->caching_state != tt_cached);
340
Jan Engelhardt5df23972011-04-04 01:25:18 +0200341 if (!persistent_swap_storage) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200342 swap_storage = shmem_file_setup("ttm swap",
343 ttm->num_pages << PAGE_SHIFT,
344 0);
Viresh Kumar55579cf2015-07-31 14:08:24 +0530345 if (IS_ERR(swap_storage)) {
Joe Perches25d04792012-03-16 21:43:50 -0700346 pr_err("Failed allocating swap storage\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100347 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200348 }
Tom St Denisddde9852018-01-25 13:29:42 -0500349 } else {
Jan Engelhardt5df23972011-04-04 01:25:18 +0200350 swap_storage = persistent_swap_storage;
Tom St Denisddde9852018-01-25 13:29:42 -0500351 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200352
Al Viro93c76a32015-12-04 23:45:44 -0500353 swap_space = swap_storage->f_mapping;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200354
355 for (i = 0; i < ttm->num_pages; ++i) {
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500356 gfp_t gfp_mask = mapping_gfp_mask(swap_space);
357
358 gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
359
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200360 from_page = ttm->pages[i];
361 if (unlikely(from_page == NULL))
362 continue;
Andrey Grodzovskycb5f1a52017-12-22 08:12:40 -0500363
364 to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
Viresh Kumar55579cf2015-07-31 14:08:24 +0530365 if (IS_ERR(to_page)) {
Maarten Maathuis290e55052010-02-20 03:22:21 +0100366 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200367 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100368 }
Akinobu Mita259a2902012-09-25 11:57:02 +0000369 copy_highpage(to_page, from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200370 set_page_dirty(to_page);
371 mark_page_accessed(to_page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300372 put_page(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200373 }
374
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100375 ttm_tt_unpopulate(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200376 ttm->swap_storage = swap_storage;
377 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
Jan Engelhardt5df23972011-04-04 01:25:18 +0200378 if (persistent_swap_storage)
379 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200380
381 return 0;
382out_err:
Jan Engelhardt5df23972011-04-04 01:25:18 +0200383 if (!persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200384 fput(swap_storage);
385
Maarten Maathuis290e55052010-02-20 03:22:21 +0100386 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200387}
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100388
Christian Königec929372018-02-01 14:52:50 +0100389static void ttm_tt_add_mapping(struct ttm_tt *ttm)
390{
391 pgoff_t i;
392
393 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
394 return;
395
396 for (i = 0; i < ttm->num_pages; ++i)
397 ttm->pages[i]->mapping = ttm->bdev->dev_mapping;
398}
399
Christian König25893a12018-02-01 14:39:29 +0100400int ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
401{
Christian Königec929372018-02-01 14:52:50 +0100402 int ret;
403
Christian König25893a12018-02-01 14:39:29 +0100404 if (ttm->state != tt_unpopulated)
405 return 0;
406
Christian Könige44fcf72018-02-22 12:00:05 +0100407 if (ttm->bdev->driver->ttm_tt_populate)
408 ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
409 else
410 ret = ttm_pool_populate(ttm, ctx);
Christian Königec929372018-02-01 14:52:50 +0100411 if (!ret)
412 ttm_tt_add_mapping(ttm);
413 return ret;
Christian König25893a12018-02-01 14:39:29 +0100414}
415
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100416static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
417{
418 pgoff_t i;
419 struct page **page = ttm->pages;
420
Thomas Hellstrom1b76af52014-02-05 09:18:26 +0100421 if (ttm->page_flags & TTM_PAGE_FLAG_SG)
422 return;
423
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100424 for (i = 0; i < ttm->num_pages; ++i) {
425 (*page)->mapping = NULL;
426 (*page++)->index = 0;
427 }
428}
429
430void ttm_tt_unpopulate(struct ttm_tt *ttm)
431{
432 if (ttm->state == tt_unpopulated)
433 return;
434
435 ttm_tt_clear_mapping(ttm);
Christian Könige44fcf72018-02-22 12:00:05 +0100436 if (ttm->bdev->driver->ttm_tt_unpopulate)
437 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
438 else
439 ttm_pool_unpopulate(ttm);
Thomas Hellstrom58aa6622014-01-03 11:47:23 +0100440}