Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2013 |
| 3 | * Phillip Lougher <phillip@squashfs.org.uk> |
| 4 | * |
| 5 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 6 | * the COPYING file in the top-level directory. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/fs.h> |
| 10 | #include <linux/vfs.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/string.h> |
| 14 | #include <linux/pagemap.h> |
| 15 | #include <linux/mutex.h> |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 16 | #include <linux/mm_inline.h> |
Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 17 | |
| 18 | #include "squashfs_fs.h" |
| 19 | #include "squashfs_fs_sb.h" |
| 20 | #include "squashfs_fs_i.h" |
| 21 | #include "squashfs.h" |
| 22 | #include "page_actor.h" |
| 23 | |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 24 | static void release_actor_pages(struct page **page, int pages, int error) |
| 25 | { |
| 26 | int i; |
| 27 | |
| 28 | for (i = 0; i < pages; i++) { |
| 29 | if (!page[i]) |
| 30 | continue; |
| 31 | flush_dcache_page(page[i]); |
| 32 | if (!error) |
| 33 | SetPageUptodate(page[i]); |
| 34 | else { |
| 35 | SetPageError(page[i]); |
| 36 | zero_user_segment(page[i], 0, PAGE_SIZE); |
| 37 | } |
| 38 | unlock_page(page[i]); |
| 39 | put_page(page[i]); |
| 40 | } |
| 41 | kfree(page); |
| 42 | } |
| 43 | |
| 44 | /* |
| 45 | * Create a "page actor" which will kmap and kunmap the |
| 46 | * page cache pages appropriately within the decompressor |
| 47 | */ |
| 48 | static struct squashfs_page_actor *actor_from_page_cache( |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 49 | unsigned int actor_pages, struct page *target_page, |
| 50 | struct list_head *rpages, unsigned int *nr_pages, int start_index, |
| 51 | struct address_space *mapping) |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 52 | { |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 53 | struct page **page; |
| 54 | struct squashfs_page_actor *actor; |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 55 | int i, n; |
| 56 | gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 57 | |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 58 | page = kmalloc_array(actor_pages, sizeof(void *), GFP_KERNEL); |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 59 | if (!page) |
| 60 | return NULL; |
| 61 | |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 62 | for (i = 0, n = start_index; i < actor_pages; i++, n++) { |
| 63 | if (target_page == NULL && rpages && !list_empty(rpages)) { |
| 64 | struct page *cur_page = lru_to_page(rpages); |
| 65 | |
| 66 | if (cur_page->index < start_index + actor_pages) { |
| 67 | list_del(&cur_page->lru); |
| 68 | --(*nr_pages); |
| 69 | if (add_to_page_cache_lru(cur_page, mapping, |
| 70 | cur_page->index, gfp)) |
| 71 | put_page(cur_page); |
| 72 | else |
| 73 | target_page = cur_page; |
| 74 | } else |
| 75 | rpages = NULL; |
| 76 | } |
| 77 | |
| 78 | if (target_page && target_page->index == n) { |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 79 | page[i] = target_page; |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 80 | target_page = NULL; |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 81 | } else { |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 82 | page[i] = grab_cache_page_nowait(mapping, n); |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 83 | if (page[i] == NULL) |
| 84 | continue; |
| 85 | } |
| 86 | |
| 87 | if (PageUptodate(page[i])) { |
| 88 | unlock_page(page[i]); |
| 89 | put_page(page[i]); |
| 90 | page[i] = NULL; |
| 91 | } |
| 92 | } |
| 93 | |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 94 | actor = squashfs_page_actor_init(page, actor_pages, 0, |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 95 | release_actor_pages); |
| 96 | if (!actor) { |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 97 | release_actor_pages(page, actor_pages, -ENOMEM); |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 98 | kfree(page); |
| 99 | return NULL; |
| 100 | } |
| 101 | return actor; |
| 102 | } |
Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 103 | |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 104 | int squashfs_readpages_block(struct page *target_page, |
| 105 | struct list_head *readahead_pages, |
| 106 | unsigned int *nr_pages, |
| 107 | struct address_space *mapping, |
| 108 | int page_index, u64 block, int bsize) |
Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 109 | |
| 110 | { |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 111 | struct squashfs_page_actor *actor; |
| 112 | struct inode *inode = mapping->host; |
Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 113 | struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; |
Adrien Schildknecht | 60cc09a | 2016-09-29 15:25:30 -0700 | [diff] [blame] | 114 | int start_index, end_index, file_end, actor_pages, res; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 115 | int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; |
Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 116 | |
Adrien Schildknecht | 60cc09a | 2016-09-29 15:25:30 -0700 | [diff] [blame] | 117 | /* |
| 118 | * If readpage() is called on an uncompressed datablock, we can just |
| 119 | * read the pages instead of fetching the whole block. |
| 120 | * This greatly improves the performance when a process keep doing |
| 121 | * random reads because we only fetch the necessary data. |
| 122 | * The readahead algorithm will take care of doing speculative reads |
| 123 | * if necessary. |
| 124 | * We can't read more than 1 block even if readahead provides use more |
| 125 | * pages because we don't know yet if the next block is compressed or |
| 126 | * not. |
| 127 | */ |
| 128 | if (bsize && !SQUASHFS_COMPRESSED_BLOCK(bsize)) { |
| 129 | u64 block_end = block + msblk->block_size; |
| 130 | |
| 131 | block += (page_index & mask) * PAGE_SIZE; |
| 132 | actor_pages = (block_end - block) / PAGE_SIZE; |
| 133 | if (*nr_pages < actor_pages) |
| 134 | actor_pages = *nr_pages; |
| 135 | start_index = page_index; |
| 136 | bsize = min_t(int, bsize, (PAGE_SIZE * actor_pages) |
| 137 | | SQUASHFS_COMPRESSED_BIT_BLOCK); |
| 138 | } else { |
| 139 | file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
| 140 | start_index = page_index & ~mask; |
| 141 | end_index = start_index | mask; |
| 142 | if (end_index > file_end) |
| 143 | end_index = file_end; |
| 144 | actor_pages = end_index - start_index + 1; |
| 145 | } |
Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 146 | |
Adrien Schildknecht | d840c1d | 2016-10-14 21:03:54 -0700 | [diff] [blame] | 147 | actor = actor_from_page_cache(actor_pages, target_page, |
| 148 | readahead_pages, nr_pages, start_index, |
| 149 | mapping); |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 150 | if (!actor) |
| 151 | return -ENOMEM; |
Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 152 | |
Adrien Schildknecht | 6c8928f | 2016-10-17 18:22:32 -0700 | [diff] [blame] | 153 | res = squashfs_read_data_async(inode->i_sb, block, bsize, NULL, |
| 154 | actor); |
| 155 | return res < 0 ? res : 0; |
Phillip Lougher | 0d455c1 | 2013-11-13 02:04:19 +0000 | [diff] [blame] | 156 | } |