Yehuda Sadeh | 3d14c5d | 2010-04-06 15:14:15 -0700 | [diff] [blame] | 1 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 2 | #include <linux/gfp.h> |
Yan, Zheng | e4339d28 | 2014-09-16 17:50:45 +0800 | [diff] [blame] | 3 | #include <linux/slab.h> |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 4 | #include <linux/pagemap.h> |
| 5 | #include <linux/highmem.h> |
Yehuda Sadeh | 3d14c5d | 2010-04-06 15:14:15 -0700 | [diff] [blame] | 6 | #include <linux/ceph/pagelist.h> |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 7 | |
Yehuda Sadeh | 3d4401d | 2010-09-03 12:57:11 -0700 | [diff] [blame] | 8 | static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) |
| 9 | { |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 10 | if (pl->mapped_tail) { |
| 11 | struct page *page = list_entry(pl->head.prev, struct page, lru); |
| 12 | kunmap(page); |
| 13 | pl->mapped_tail = NULL; |
| 14 | } |
Yehuda Sadeh | 3d4401d | 2010-09-03 12:57:11 -0700 | [diff] [blame] | 15 | } |
| 16 | |
Yan, Zheng | e4339d28 | 2014-09-16 17:50:45 +0800 | [diff] [blame] | 17 | void ceph_pagelist_release(struct ceph_pagelist *pl) |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 18 | { |
Yan, Zheng | e4339d28 | 2014-09-16 17:50:45 +0800 | [diff] [blame] | 19 | if (!atomic_dec_and_test(&pl->refcnt)) |
| 20 | return; |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 21 | ceph_pagelist_unmap_tail(pl); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 22 | while (!list_empty(&pl->head)) { |
| 23 | struct page *page = list_first_entry(&pl->head, struct page, |
| 24 | lru); |
| 25 | list_del(&page->lru); |
| 26 | __free_page(page); |
| 27 | } |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 28 | ceph_pagelist_free_reserve(pl); |
Yan, Zheng | e4339d28 | 2014-09-16 17:50:45 +0800 | [diff] [blame] | 29 | kfree(pl); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 30 | } |
Yehuda Sadeh | 3d14c5d | 2010-04-06 15:14:15 -0700 | [diff] [blame] | 31 | EXPORT_SYMBOL(ceph_pagelist_release); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 32 | |
| 33 | static int ceph_pagelist_addpage(struct ceph_pagelist *pl) |
| 34 | { |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 35 | struct page *page; |
| 36 | |
| 37 | if (!pl->num_pages_free) { |
| 38 | page = __page_cache_alloc(GFP_NOFS); |
| 39 | } else { |
| 40 | page = list_first_entry(&pl->free_list, struct page, lru); |
| 41 | list_del(&page->lru); |
Sage Weil | 240634e | 2010-10-05 12:03:23 -0700 | [diff] [blame] | 42 | --pl->num_pages_free; |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 43 | } |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 44 | if (!page) |
| 45 | return -ENOMEM; |
| 46 | pl->room += PAGE_SIZE; |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 47 | ceph_pagelist_unmap_tail(pl); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 48 | list_add_tail(&page->lru, &pl->head); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 49 | pl->mapped_tail = kmap(page); |
| 50 | return 0; |
| 51 | } |
| 52 | |
Yehuda Sadeh | 68b4476 | 2010-04-06 15:01:27 -0700 | [diff] [blame] | 53 | int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len) |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 54 | { |
| 55 | while (pl->room < len) { |
| 56 | size_t bit = pl->room; |
| 57 | int ret; |
| 58 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 59 | memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 60 | buf, bit); |
| 61 | pl->length += bit; |
| 62 | pl->room -= bit; |
| 63 | buf += bit; |
| 64 | len -= bit; |
| 65 | ret = ceph_pagelist_addpage(pl); |
| 66 | if (ret) |
| 67 | return ret; |
| 68 | } |
| 69 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 70 | memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len); |
Sage Weil | 58bb3b3 | 2009-12-23 12:12:31 -0800 | [diff] [blame] | 71 | pl->length += len; |
| 72 | pl->room -= len; |
| 73 | return 0; |
| 74 | } |
Yehuda Sadeh | 3d14c5d | 2010-04-06 15:14:15 -0700 | [diff] [blame] | 75 | EXPORT_SYMBOL(ceph_pagelist_append); |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 76 | |
Ben Hutchings | ae86b9e | 2012-07-10 10:55:35 +0000 | [diff] [blame] | 77 | /* Allocate enough pages for a pagelist to append the given amount |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 78 | * of data without without allocating. |
| 79 | * Returns: 0 on success, -ENOMEM on error. |
| 80 | */ |
| 81 | int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space) |
| 82 | { |
| 83 | if (space <= pl->room) |
| 84 | return 0; |
| 85 | space -= pl->room; |
| 86 | space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */ |
| 87 | |
| 88 | while (space > pl->num_pages_free) { |
| 89 | struct page *page = __page_cache_alloc(GFP_NOFS); |
| 90 | if (!page) |
| 91 | return -ENOMEM; |
| 92 | list_add_tail(&page->lru, &pl->free_list); |
| 93 | ++pl->num_pages_free; |
| 94 | } |
| 95 | return 0; |
| 96 | } |
| 97 | EXPORT_SYMBOL(ceph_pagelist_reserve); |
| 98 | |
Ben Hutchings | ae86b9e | 2012-07-10 10:55:35 +0000 | [diff] [blame] | 99 | /* Free any pages that have been preallocated. */ |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 100 | int ceph_pagelist_free_reserve(struct ceph_pagelist *pl) |
| 101 | { |
| 102 | while (!list_empty(&pl->free_list)) { |
| 103 | struct page *page = list_first_entry(&pl->free_list, |
| 104 | struct page, lru); |
| 105 | list_del(&page->lru); |
| 106 | __free_page(page); |
| 107 | --pl->num_pages_free; |
| 108 | } |
| 109 | BUG_ON(pl->num_pages_free); |
| 110 | return 0; |
| 111 | } |
| 112 | EXPORT_SYMBOL(ceph_pagelist_free_reserve); |
| 113 | |
Ben Hutchings | ae86b9e | 2012-07-10 10:55:35 +0000 | [diff] [blame] | 114 | /* Create a truncation point. */ |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 115 | void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, |
| 116 | struct ceph_pagelist_cursor *c) |
| 117 | { |
| 118 | c->pl = pl; |
| 119 | c->page_lru = pl->head.prev; |
| 120 | c->room = pl->room; |
| 121 | } |
| 122 | EXPORT_SYMBOL(ceph_pagelist_set_cursor); |
| 123 | |
Ben Hutchings | ae86b9e | 2012-07-10 10:55:35 +0000 | [diff] [blame] | 124 | /* Truncate a pagelist to the given point. Move extra pages to reserve. |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 125 | * This won't sleep. |
| 126 | * Returns: 0 on success, |
| 127 | * -EINVAL if the pagelist doesn't match the trunc point pagelist |
| 128 | */ |
| 129 | int ceph_pagelist_truncate(struct ceph_pagelist *pl, |
| 130 | struct ceph_pagelist_cursor *c) |
| 131 | { |
| 132 | struct page *page; |
| 133 | |
| 134 | if (pl != c->pl) |
| 135 | return -EINVAL; |
| 136 | ceph_pagelist_unmap_tail(pl); |
| 137 | while (pl->head.prev != c->page_lru) { |
| 138 | page = list_entry(pl->head.prev, struct page, lru); |
Wei Yongjun | cc4829e | 2012-09-05 14:34:32 +0800 | [diff] [blame] | 139 | /* move from pagelist to reserve */ |
| 140 | list_move_tail(&page->lru, &pl->free_list); |
Greg Farnum | ac0b74d | 2010-09-17 10:10:55 -0700 | [diff] [blame] | 141 | ++pl->num_pages_free; |
| 142 | } |
| 143 | pl->room = c->room; |
| 144 | if (!list_empty(&pl->head)) { |
| 145 | page = list_entry(pl->head.prev, struct page, lru); |
| 146 | pl->mapped_tail = kmap(page); |
| 147 | } |
| 148 | return 0; |
| 149 | } |
| 150 | EXPORT_SYMBOL(ceph_pagelist_truncate); |