Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Yehuda Sadeh | 3d14c5d | 2010-04-06 15:14:15 -0700 | [diff] [blame] | 2 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 3 | #include <linux/gfp.h> |
Yan, Zheng | e4339d28 | 2014-09-16 17:50:45 +0800 | [diff] [blame] | 4 | #include <linux/slab.h> |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 5 | #include <linux/pagemap.h> |
| 6 | #include <linux/highmem.h> |
Yehuda Sadeh | 3d14c5d | 2010-04-06 15:14:15 -0700 | [diff] [blame] | 7 | #include <linux/ceph/pagelist.h> |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 8 | |
Yehuda Sadeh | 3d4401d | 2010-09-03 12:57:11 -0700 | [diff] [blame] | 9 | static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) |
| 10 | { |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 11 | if (pl->mapped_tail) { |
| 12 | struct page *page = list_entry(pl->head.prev, struct page, lru); |
| 13 | kunmap(page); |
| 14 | pl->mapped_tail = NULL; |
| 15 | } |
Yehuda Sadeh | 3d4401d | 2010-09-03 12:57:11 -0700 | [diff] [blame] | 16 | } |
| 17 | |
Yan, Zheng | e4339d28 | 2014-09-16 17:50:45 +0800 | [diff] [blame] | 18 | void ceph_pagelist_release(struct ceph_pagelist *pl) |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 19 | { |
Elena Reshetova | 0e1a5ee | 2017-03-17 14:10:29 +0200 | [diff] [blame] | 20 | if (!refcount_dec_and_test(&pl->refcnt)) |
Yan, Zheng | e4339d28 | 2014-09-16 17:50:45 +0800 | [diff] [blame] | 21 | return; |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 22 | ceph_pagelist_unmap_tail(pl); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 23 | while (!list_empty(&pl->head)) { |
| 24 | struct page *page = list_first_entry(&pl->head, struct page, |
| 25 | lru); |
| 26 | list_del(&page->lru); |
| 27 | __free_page(page); |
| 28 | } |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 29 | ceph_pagelist_free_reserve(pl); |
Yan, Zheng | e4339d28 | 2014-09-16 17:50:45 +0800 | [diff] [blame] | 30 | kfree(pl); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 31 | } |
Yehuda Sadeh | 3d14c5d | 2010-04-06 15:14:15 -0700 | [diff] [blame] | 32 | EXPORT_SYMBOL(ceph_pagelist_release); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 33 | |
| 34 | static int ceph_pagelist_addpage(struct ceph_pagelist *pl) |
| 35 | { |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 36 | struct page *page; |
| 37 | |
| 38 | if (!pl->num_pages_free) { |
| 39 | page = __page_cache_alloc(GFP_NOFS); |
| 40 | } else { |
| 41 | page = list_first_entry(&pl->free_list, struct page, lru); |
| 42 | list_del(&page->lru); |
Sage Weil | 240634e | 2010-10-05 12:03:23 -0700 | [diff] [blame] | 43 | --pl->num_pages_free; |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 44 | } |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 45 | if (!page) |
| 46 | return -ENOMEM; |
| 47 | pl->room += PAGE_SIZE; |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 48 | ceph_pagelist_unmap_tail(pl); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 49 | list_add_tail(&page->lru, &pl->head); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 50 | pl->mapped_tail = kmap(page); |
| 51 | return 0; |
| 52 | } |
| 53 | |
Yehuda Sadeh | 68b4476 | 2010-04-06 15:01:27 -0700 | [diff] [blame] | 54 | int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len) |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 55 | { |
| 56 | while (pl->room < len) { |
| 57 | size_t bit = pl->room; |
| 58 | int ret; |
| 59 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 60 | memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 61 | buf, bit); |
| 62 | pl->length += bit; |
| 63 | pl->room -= bit; |
| 64 | buf += bit; |
| 65 | len -= bit; |
| 66 | ret = ceph_pagelist_addpage(pl); |
| 67 | if (ret) |
| 68 | return ret; |
| 69 | } |
| 70 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 71 | memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 72 | pl->length += len; |
| 73 | pl->room -= len; |
| 74 | return 0; |
| 75 | } |
Yehuda Sadeh | 3d14c5d | 2010-04-06 15:14:15 -0700 | [diff] [blame] | 76 | EXPORT_SYMBOL(ceph_pagelist_append); |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 77 | |
Ben Hutchings | ae86b9e | 2012-07-10 10:55:35 +0000 | [diff] [blame] | 78 | /* Allocate enough pages for a pagelist to append the given amount |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 79 | * of data without without allocating. |
| 80 | * Returns: 0 on success, -ENOMEM on error. |
| 81 | */ |
| 82 | int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space) |
| 83 | { |
| 84 | if (space <= pl->room) |
| 85 | return 0; |
| 86 | space -= pl->room; |
| 87 | space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */ |
| 88 | |
| 89 | while (space > pl->num_pages_free) { |
| 90 | struct page *page = __page_cache_alloc(GFP_NOFS); |
| 91 | if (!page) |
| 92 | return -ENOMEM; |
| 93 | list_add_tail(&page->lru, &pl->free_list); |
| 94 | ++pl->num_pages_free; |
| 95 | } |
| 96 | return 0; |
| 97 | } |
| 98 | EXPORT_SYMBOL(ceph_pagelist_reserve); |
| 99 | |
Ben Hutchings | ae86b9e | 2012-07-10 10:55:35 +0000 | [diff] [blame] | 100 | /* Free any pages that have been preallocated. */ |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 101 | int ceph_pagelist_free_reserve(struct ceph_pagelist *pl) |
| 102 | { |
| 103 | while (!list_empty(&pl->free_list)) { |
| 104 | struct page *page = list_first_entry(&pl->free_list, |
| 105 | struct page, lru); |
| 106 | list_del(&page->lru); |
| 107 | __free_page(page); |
| 108 | --pl->num_pages_free; |
| 109 | } |
| 110 | BUG_ON(pl->num_pages_free); |
| 111 | return 0; |
| 112 | } |
| 113 | EXPORT_SYMBOL(ceph_pagelist_free_reserve); |
| 114 | |
Ben Hutchings | ae86b9e | 2012-07-10 10:55:35 +0000 | [diff] [blame] | 115 | /* Create a truncation point. */ |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 116 | void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, |
| 117 | struct ceph_pagelist_cursor *c) |
| 118 | { |
| 119 | c->pl = pl; |
| 120 | c->page_lru = pl->head.prev; |
| 121 | c->room = pl->room; |
| 122 | } |
| 123 | EXPORT_SYMBOL(ceph_pagelist_set_cursor); |
| 124 | |
Ben Hutchings | ae86b9e | 2012-07-10 10:55:35 +0000 | [diff] [blame] | 125 | /* Truncate a pagelist to the given point. Move extra pages to reserve. |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 126 | * This won't sleep. |
| 127 | * Returns: 0 on success, |
| 128 | * -EINVAL if the pagelist doesn't match the trunc point pagelist |
| 129 | */ |
| 130 | int ceph_pagelist_truncate(struct ceph_pagelist *pl, |
| 131 | struct ceph_pagelist_cursor *c) |
| 132 | { |
| 133 | struct page *page; |
| 134 | |
| 135 | if (pl != c->pl) |
| 136 | return -EINVAL; |
| 137 | ceph_pagelist_unmap_tail(pl); |
| 138 | while (pl->head.prev != c->page_lru) { |
| 139 | page = list_entry(pl->head.prev, struct page, lru); |
Wei Yongjun | cc4829e | 2012-09-05 14:34:32 +0800 | [diff] [blame] | 140 | /* move from pagelist to reserve */ |
| 141 | list_move_tail(&page->lru, &pl->free_list); |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 142 | ++pl->num_pages_free; |
| 143 | } |
| 144 | pl->room = c->room; |
| 145 | if (!list_empty(&pl->head)) { |
| 146 | page = list_entry(pl->head.prev, struct page, lru); |
| 147 | pl->mapped_tail = kmap(page); |
| 148 | } |
| 149 | return 0; |
| 150 | } |
| 151 | EXPORT_SYMBOL(ceph_pagelist_truncate); |