blob: c68b0e770d164db6ade3e3c06bbf3271a1524210 [file] [log] [blame]
Thomas Hellstromba4e7d92009-06-10 15:20:19 +02001/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020031#include <linux/sched.h>
32#include <linux/highmem.h>
33#include <linux/pagemap.h>
Hugh Dickins3142b652011-06-27 16:18:17 -070034#include <linux/shmem_fs.h>
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020035#include <linux/file.h>
36#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Paul Gortmaker2d1a8a42011-08-30 18:16:33 -040038#include <linux/export.h>
Dave Airliec9c97b82009-08-27 09:53:47 +100039#include "drm_cache.h"
Dave Airlie72e942d2010-03-09 06:33:26 +000040#include "drm_mem_util.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020041#include "ttm/ttm_module.h"
42#include "ttm/ttm_bo_driver.h"
43#include "ttm/ttm_placement.h"
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000044#include "ttm/ttm_page_alloc.h"
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020045
46static int ttm_tt_swapin(struct ttm_tt *ttm);
47
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020048/**
49 * Allocates storage for pointers to the pages that back the ttm.
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020050 */
51static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
52{
Dave Airlie72e942d2010-03-09 06:33:26 +000053 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050054 ttm->dma_address = drm_calloc_large(ttm->num_pages,
55 sizeof(*ttm->dma_address));
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020056}
57
58static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
59{
Dave Airlie72e942d2010-03-09 06:33:26 +000060 drm_free_large(ttm->pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020061 ttm->pages = NULL;
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050062 drm_free_large(ttm->dma_address);
63 ttm->dma_address = NULL;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020064}
65
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020066static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
67{
68 struct page *p;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000069 struct list_head h;
Thomas Hellstroma987fca2009-08-18 16:51:56 +020070 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020071 int ret;
72
73 while (NULL == (p = ttm->pages[index])) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020074
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000075 INIT_LIST_HEAD(&h);
76
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -050077 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
Dave Airliea2c06ee2011-02-23 14:24:01 +100078 &ttm->dma_address[index]);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000079
80 if (ret != 0)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020081 return NULL;
82
Pauli Nieminen1403b1a2010-04-01 12:44:57 +000083 p = list_first_entry(&h, struct page, lru);
84
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +020085 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
86 if (unlikely(ret != 0))
87 goto out_err;
88
89 if (PageHighMem(p))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020090 ttm->pages[--ttm->first_himem_page] = p;
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +020091 else
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020092 ttm->pages[++ttm->last_lomem_page] = p;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +020093 }
94 return p;
95out_err:
96 put_page(p);
97 return NULL;
98}
99
100struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
101{
102 int ret;
103
104 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
105 ret = ttm_tt_swapin(ttm);
106 if (unlikely(ret != 0))
107 return NULL;
108 }
109 return __ttm_tt_get_page(ttm, index);
110}
111
112int ttm_tt_populate(struct ttm_tt *ttm)
113{
114 struct page *page;
115 unsigned long i;
116 struct ttm_backend *be;
117 int ret;
118
119 if (ttm->state != tt_unpopulated)
120 return 0;
121
122 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
123 ret = ttm_tt_swapin(ttm);
124 if (unlikely(ret != 0))
125 return ret;
126 }
127
128 be = ttm->be;
129
130 for (i = 0; i < ttm->num_pages; ++i) {
131 page = __ttm_tt_get_page(ttm, i);
132 if (!page)
133 return -ENOMEM;
134 }
135
136 be->func->populate(be, ttm->num_pages, ttm->pages,
Konrad Rzeszutek Wilk27e8b232010-12-02 10:24:13 -0500137 ttm->dummy_read_page, ttm->dma_address);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200138 ttm->state = tt_unbound;
139 return 0;
140}
Thomas Hellstrom4bfd75c2009-12-06 21:46:27 +0100141EXPORT_SYMBOL(ttm_tt_populate);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200142
143#ifdef CONFIG_X86
144static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000145 enum ttm_caching_state c_old,
146 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200147{
Francisco Jerezdb78e272010-01-12 18:49:43 +0100148 int ret = 0;
149
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200150 if (PageHighMem(p))
151 return 0;
152
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000153 if (c_old != tt_cached) {
Francisco Jerezdb78e272010-01-12 18:49:43 +0100154 /* p isn't in the default caching state, set it to
155 * writeback first to free its current memtype. */
156
157 ret = set_pages_wb(p, 1);
158 if (ret)
159 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200160 }
Francisco Jerezdb78e272010-01-12 18:49:43 +0100161
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000162 if (c_new == tt_wc)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100163 ret = set_memory_wc((unsigned long) page_address(p), 1);
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000164 else if (c_new == tt_uncached)
Francisco Jerezdb78e272010-01-12 18:49:43 +0100165 ret = set_pages_uc(p, 1);
166
167 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200168}
169#else /* CONFIG_X86 */
170static inline int ttm_tt_set_page_caching(struct page *p,
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000171 enum ttm_caching_state c_old,
172 enum ttm_caching_state c_new)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200173{
174 return 0;
175}
176#endif /* CONFIG_X86 */
177
178/*
179 * Change caching policy for the linear kernel map
180 * for range of pages in a ttm.
181 */
182
183static int ttm_tt_set_caching(struct ttm_tt *ttm,
184 enum ttm_caching_state c_state)
185{
186 int i, j;
187 struct page *cur_page;
188 int ret;
189
190 if (ttm->caching_state == c_state)
191 return 0;
192
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000193 if (ttm->state == tt_unpopulated) {
194 /* Change caching but don't populate */
195 ttm->caching_state = c_state;
196 return 0;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200197 }
198
199 if (ttm->caching_state == tt_cached)
Dave Airliec9c97b82009-08-27 09:53:47 +1000200 drm_clflush_pages(ttm->pages, ttm->num_pages);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200201
202 for (i = 0; i < ttm->num_pages; ++i) {
203 cur_page = ttm->pages[i];
204 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000205 ret = ttm_tt_set_page_caching(cur_page,
206 ttm->caching_state,
207 c_state);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200208 if (unlikely(ret != 0))
209 goto out_err;
210 }
211 }
212
213 ttm->caching_state = c_state;
214
215 return 0;
216
217out_err:
218 for (j = 0; j < i; ++j) {
219 cur_page = ttm->pages[j];
220 if (likely(cur_page != NULL)) {
Francisco Jerezf0e2f382010-02-20 07:30:15 +1000221 (void)ttm_tt_set_page_caching(cur_page, c_state,
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200222 ttm->caching_state);
223 }
224 }
225
226 return ret;
227}
228
229int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
230{
231 enum ttm_caching_state state;
232
233 if (placement & TTM_PL_FLAG_WC)
234 state = tt_wc;
235 else if (placement & TTM_PL_FLAG_UNCACHED)
236 state = tt_uncached;
237 else
238 state = tt_cached;
239
240 return ttm_tt_set_caching(ttm, state);
241}
Dave Airliedf67bed2009-10-30 13:31:26 +1000242EXPORT_SYMBOL(ttm_tt_set_placement_caching);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200243
244static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
245{
246 int i;
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000247 unsigned count = 0;
248 struct list_head h;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200249 struct page *cur_page;
250 struct ttm_backend *be = ttm->be;
251
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000252 INIT_LIST_HEAD(&h);
253
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200254 if (be)
255 be->func->clear(be);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200256 for (i = 0; i < ttm->num_pages; ++i) {
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000257
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200258 cur_page = ttm->pages[i];
259 ttm->pages[i] = NULL;
260 if (cur_page) {
261 if (page_count(cur_page) != 1)
262 printk(KERN_ERR TTM_PFX
263 "Erroneous page count. "
264 "Leaking pages.\n");
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200265 ttm_mem_global_free_page(ttm->glob->mem_glob,
Thomas Hellstrom5fd9cba2009-08-17 16:28:39 +0200266 cur_page);
Pauli Nieminen1403b1a2010-04-01 12:44:57 +0000267 list_add(&cur_page->lru, &h);
268 count++;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200269 }
270 }
Konrad Rzeszutek Wilkf9820a42010-11-29 13:52:18 -0500271 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
Dave Airliea2c06ee2011-02-23 14:24:01 +1000272 ttm->dma_address);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200273 ttm->state = tt_unpopulated;
274 ttm->first_himem_page = ttm->num_pages;
275 ttm->last_lomem_page = -1;
276}
277
278void ttm_tt_destroy(struct ttm_tt *ttm)
279{
280 struct ttm_backend *be;
281
282 if (unlikely(ttm == NULL))
283 return;
284
285 be = ttm->be;
286 if (likely(be != NULL)) {
287 be->func->destroy(be);
288 ttm->be = NULL;
289 }
290
291 if (likely(ttm->pages != NULL)) {
Jerome Glisse33164972011-11-01 15:45:57 -0400292 ttm_tt_free_alloced_pages(ttm);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200293
294 ttm_tt_free_page_directory(ttm);
295 }
296
Jan Engelhardt5df23972011-04-04 01:25:18 +0200297 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200298 ttm->swap_storage)
299 fput(ttm->swap_storage);
300
301 kfree(ttm);
302}
303
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200304struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
305 uint32_t page_flags, struct page *dummy_read_page)
306{
307 struct ttm_bo_driver *bo_driver = bdev->driver;
308 struct ttm_tt *ttm;
309
310 if (!bo_driver)
311 return NULL;
312
313 ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
314 if (!ttm)
315 return NULL;
316
Thomas Hellstroma987fca2009-08-18 16:51:56 +0200317 ttm->glob = bdev->glob;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200318 ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
319 ttm->first_himem_page = ttm->num_pages;
320 ttm->last_lomem_page = -1;
321 ttm->caching_state = tt_cached;
322 ttm->page_flags = page_flags;
323
324 ttm->dummy_read_page = dummy_read_page;
325
326 ttm_tt_alloc_page_directory(ttm);
327 if (!ttm->pages) {
328 ttm_tt_destroy(ttm);
329 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
330 return NULL;
331 }
332 ttm->be = bo_driver->create_ttm_backend_entry(bdev);
333 if (!ttm->be) {
334 ttm_tt_destroy(ttm);
335 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
336 return NULL;
337 }
338 ttm->state = tt_unpopulated;
339 return ttm;
340}
341
342void ttm_tt_unbind(struct ttm_tt *ttm)
343{
344 int ret;
345 struct ttm_backend *be = ttm->be;
346
347 if (ttm->state == tt_bound) {
348 ret = be->func->unbind(be);
349 BUG_ON(ret);
350 ttm->state = tt_unbound;
351 }
352}
353
354int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
355{
356 int ret = 0;
357 struct ttm_backend *be;
358
359 if (!ttm)
360 return -EINVAL;
361
362 if (ttm->state == tt_bound)
363 return 0;
364
365 be = ttm->be;
366
367 ret = ttm_tt_populate(ttm);
368 if (ret)
369 return ret;
370
371 ret = be->func->bind(be, bo_mem);
Thomas Hellstrom7dcebb52010-10-29 10:46:49 +0200372 if (unlikely(ret != 0))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200373 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200374
375 ttm->state = tt_bound;
376
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200377 return 0;
378}
379EXPORT_SYMBOL(ttm_tt_bind);
380
381static int ttm_tt_swapin(struct ttm_tt *ttm)
382{
383 struct address_space *swap_space;
384 struct file *swap_storage;
385 struct page *from_page;
386 struct page *to_page;
387 void *from_virtual;
388 void *to_virtual;
389 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100390 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200391
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200392 swap_storage = ttm->swap_storage;
393 BUG_ON(swap_storage == NULL);
394
395 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
396
397 for (i = 0; i < ttm->num_pages; ++i) {
Hugh Dickins3142b652011-06-27 16:18:17 -0700398 from_page = shmem_read_mapping_page(swap_space, i);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100399 if (IS_ERR(from_page)) {
400 ret = PTR_ERR(from_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200401 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100402 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200403 to_page = __ttm_tt_get_page(ttm, i);
404 if (unlikely(to_page == NULL))
405 goto out_err;
406
407 preempt_disable();
408 from_virtual = kmap_atomic(from_page, KM_USER0);
409 to_virtual = kmap_atomic(to_page, KM_USER1);
410 memcpy(to_virtual, from_virtual, PAGE_SIZE);
411 kunmap_atomic(to_virtual, KM_USER1);
412 kunmap_atomic(from_virtual, KM_USER0);
413 preempt_enable();
414 page_cache_release(from_page);
415 }
416
Jan Engelhardt5df23972011-04-04 01:25:18 +0200417 if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200418 fput(swap_storage);
419 ttm->swap_storage = NULL;
420 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
421
422 return 0;
423out_err:
424 ttm_tt_free_alloced_pages(ttm);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100425 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200426}
427
Jan Engelhardt5df23972011-04-04 01:25:18 +0200428int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200429{
430 struct address_space *swap_space;
431 struct file *swap_storage;
432 struct page *from_page;
433 struct page *to_page;
434 void *from_virtual;
435 void *to_virtual;
436 int i;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100437 int ret = -ENOMEM;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200438
439 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
440 BUG_ON(ttm->caching_state != tt_cached);
441
Jan Engelhardt5df23972011-04-04 01:25:18 +0200442 if (!persistent_swap_storage) {
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200443 swap_storage = shmem_file_setup("ttm swap",
444 ttm->num_pages << PAGE_SHIFT,
445 0);
446 if (unlikely(IS_ERR(swap_storage))) {
447 printk(KERN_ERR "Failed allocating swap storage.\n");
Maarten Maathuis290e55052010-02-20 03:22:21 +0100448 return PTR_ERR(swap_storage);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200449 }
450 } else
Jan Engelhardt5df23972011-04-04 01:25:18 +0200451 swap_storage = persistent_swap_storage;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200452
453 swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
454
455 for (i = 0; i < ttm->num_pages; ++i) {
456 from_page = ttm->pages[i];
457 if (unlikely(from_page == NULL))
458 continue;
Hugh Dickins3142b652011-06-27 16:18:17 -0700459 to_page = shmem_read_mapping_page(swap_space, i);
Maarten Maathuis290e55052010-02-20 03:22:21 +0100460 if (unlikely(IS_ERR(to_page))) {
461 ret = PTR_ERR(to_page);
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200462 goto out_err;
Maarten Maathuis290e55052010-02-20 03:22:21 +0100463 }
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200464 preempt_disable();
465 from_virtual = kmap_atomic(from_page, KM_USER0);
466 to_virtual = kmap_atomic(to_page, KM_USER1);
467 memcpy(to_virtual, from_virtual, PAGE_SIZE);
468 kunmap_atomic(to_virtual, KM_USER1);
469 kunmap_atomic(from_virtual, KM_USER0);
470 preempt_enable();
471 set_page_dirty(to_page);
472 mark_page_accessed(to_page);
473 page_cache_release(to_page);
474 }
475
476 ttm_tt_free_alloced_pages(ttm);
477 ttm->swap_storage = swap_storage;
478 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
Jan Engelhardt5df23972011-04-04 01:25:18 +0200479 if (persistent_swap_storage)
480 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200481
482 return 0;
483out_err:
Jan Engelhardt5df23972011-04-04 01:25:18 +0200484 if (!persistent_swap_storage)
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200485 fput(swap_storage);
486
Maarten Maathuis290e55052010-02-20 03:22:21 +0100487 return ret;
Thomas Hellstromba4e7d92009-06-10 15:20:19 +0200488}