blob: cb485d8e0e91b1b2ff1cb9b0330339c51c15b8a4 [file] [log] [blame]
Phillip Lougher0d455c12013-11-13 02:04:19 +00001/*
2 * Copyright (c) 2013
3 * Phillip Lougher <phillip@squashfs.org.uk>
4 *
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9#include <linux/fs.h>
10#include <linux/vfs.h>
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/string.h>
14#include <linux/pagemap.h>
15#include <linux/mutex.h>
16
17#include "squashfs_fs.h"
18#include "squashfs_fs_sb.h"
19#include "squashfs_fs_i.h"
20#include "squashfs.h"
21#include "page_actor.h"
22
23static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
24 int pages, struct page **page);
25
26/* Read separately compressed datablock directly into page cache */
27int squashfs_readpage_block(struct page *target_page, u64 block, int bsize)
28
29{
30 struct inode *inode = target_page->mapping->host;
31 struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
32
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030033 int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT;
34 int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1;
Phillip Lougher0d455c12013-11-13 02:04:19 +000035 int start_index = target_page->index & ~mask;
36 int end_index = start_index | mask;
37 int i, n, pages, missing_pages, bytes, res = -ENOMEM;
38 struct page **page;
39 struct squashfs_page_actor *actor;
40 void *pageaddr;
41
42 if (end_index > file_end)
43 end_index = file_end;
44
45 pages = end_index - start_index + 1;
46
Fabian Frederick14694882014-08-06 16:03:50 -070047 page = kmalloc_array(pages, sizeof(void *), GFP_KERNEL);
Phillip Lougher0d455c12013-11-13 02:04:19 +000048 if (page == NULL)
49 return res;
50
51 /*
52 * Create a "page actor" which will kmap and kunmap the
53 * page cache pages appropriately within the decompressor
54 */
55 actor = squashfs_page_actor_init_special(page, pages, 0);
56 if (actor == NULL)
57 goto out;
58
59 /* Try to grab all the pages covered by the Squashfs block */
60 for (missing_pages = 0, i = 0, n = start_index; i < pages; i++, n++) {
61 page[i] = (n == target_page->index) ? target_page :
62 grab_cache_page_nowait(target_page->mapping, n);
63
64 if (page[i] == NULL) {
65 missing_pages++;
66 continue;
67 }
68
69 if (PageUptodate(page[i])) {
70 unlock_page(page[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030071 put_page(page[i]);
Phillip Lougher0d455c12013-11-13 02:04:19 +000072 page[i] = NULL;
73 missing_pages++;
74 }
75 }
76
77 if (missing_pages) {
78 /*
79 * Couldn't get one or more pages, this page has either
80 * been VM reclaimed, but others are still in the page cache
81 * and uptodate, or we're racing with another thread in
82 * squashfs_readpage also trying to grab them. Fall back to
83 * using an intermediate buffer.
84 */
85 res = squashfs_read_cache(target_page, block, bsize, pages,
86 page);
Phillip Lougher6d565402013-11-24 00:40:49 +000087 if (res < 0)
88 goto mark_errored;
89
Phillip Lougher0d455c12013-11-13 02:04:19 +000090 goto out;
91 }
92
93 /* Decompress directly into the page cache buffers */
94 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);
95 if (res < 0)
96 goto mark_errored;
97
98 /* Last page may have trailing bytes not filled */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030099 bytes = res % PAGE_SIZE;
Phillip Lougher0d455c12013-11-13 02:04:19 +0000100 if (bytes) {
101 pageaddr = kmap_atomic(page[pages - 1]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300102 memset(pageaddr + bytes, 0, PAGE_SIZE - bytes);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000103 kunmap_atomic(pageaddr);
104 }
105
106 /* Mark pages as uptodate, unlock and release */
107 for (i = 0; i < pages; i++) {
108 flush_dcache_page(page[i]);
109 SetPageUptodate(page[i]);
110 unlock_page(page[i]);
111 if (page[i] != target_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300112 put_page(page[i]);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000113 }
114
115 kfree(actor);
116 kfree(page);
117
118 return 0;
119
120mark_errored:
121 /* Decompression failed, mark pages as errored. Target_page is
122 * dealt with by the caller
123 */
124 for (i = 0; i < pages; i++) {
Phillip Lougher6d565402013-11-24 00:40:49 +0000125 if (page[i] == NULL || page[i] == target_page)
Phillip Lougher0d455c12013-11-13 02:04:19 +0000126 continue;
127 flush_dcache_page(page[i]);
128 SetPageError(page[i]);
129 unlock_page(page[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300130 put_page(page[i]);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000131 }
132
133out:
134 kfree(actor);
135 kfree(page);
136 return res;
137}
138
139
140static int squashfs_read_cache(struct page *target_page, u64 block, int bsize,
141 int pages, struct page **page)
142{
143 struct inode *i = target_page->mapping->host;
144 struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb,
145 block, bsize);
146 int bytes = buffer->length, res = buffer->error, n, offset = 0;
147 void *pageaddr;
148
149 if (res) {
150 ERROR("Unable to read page, block %llx, size %x\n", block,
151 bsize);
152 goto out;
153 }
154
155 for (n = 0; n < pages && bytes > 0; n++,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300156 bytes -= PAGE_SIZE, offset += PAGE_SIZE) {
157 int avail = min_t(int, bytes, PAGE_SIZE);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000158
159 if (page[n] == NULL)
160 continue;
161
162 pageaddr = kmap_atomic(page[n]);
163 squashfs_copy_data(pageaddr, buffer, offset, avail);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300164 memset(pageaddr + avail, 0, PAGE_SIZE - avail);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000165 kunmap_atomic(pageaddr);
166 flush_dcache_page(page[n]);
167 SetPageUptodate(page[n]);
168 unlock_page(page[n]);
169 if (page[n] != target_page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300170 put_page(page[n]);
Phillip Lougher0d455c12013-11-13 02:04:19 +0000171 }
172
173out:
174 squashfs_cache_put(buffer);
175 return res;
176}