blob: 329a056b73b178958e71308c35b945a0c39de834 [file] [log] [blame]
Ryusuke Konishiae980432018-09-04 15:46:30 -07001// SPDX-License-Identifier: GPL-2.0+
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -07002/*
3 * page.c - buffer/page management specific to NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
Ryusuke Konishi4b420ab2016-05-23 16:23:09 -07007 * Written by Ryusuke Konishi and Seiji Kihara.
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -07008 */
9
10#include <linux/pagemap.h>
11#include <linux/writeback.h>
12#include <linux/swap.h>
13#include <linux/bitops.h>
14#include <linux/page-flags.h>
15#include <linux/list.h>
16#include <linux/highmem.h>
17#include <linux/pagevec.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/gfp.h>
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070019#include "nilfs.h"
20#include "page.h"
21#include "mdt.h"
22
23
Ryusuke Konishi4ce5c342016-08-02 14:05:28 -070024#define NILFS_BUFFER_INHERENT_BITS \
25 (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \
26 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070027
28static struct buffer_head *
29__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
30 int blkbits, unsigned long b_state)
31
32{
33 unsigned long first_block;
34 struct buffer_head *bh;
35
36 if (!page_has_buffers(page))
37 create_empty_buffers(page, 1 << blkbits, b_state);
38
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030039 first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070040 bh = nilfs_page_get_nth_block(page, block - first_block);
41
42 touch_buffer(bh);
43 wait_on_buffer(bh);
44 return bh;
45}
46
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070047struct buffer_head *nilfs_grab_buffer(struct inode *inode,
48 struct address_space *mapping,
49 unsigned long blkoff,
50 unsigned long b_state)
51{
52 int blkbits = inode->i_blkbits;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030053 pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
Ryusuke Konishic1c1d70922010-08-29 12:44:56 +090054 struct page *page;
55 struct buffer_head *bh;
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070056
57 page = grab_cache_page(mapping, index);
58 if (unlikely(!page))
59 return NULL;
60
61 bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
62 if (unlikely(!bh)) {
63 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030064 put_page(page);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070065 return NULL;
66 }
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070067 return bh;
68}
69
70/**
71 * nilfs_forget_buffer - discard dirty state
72 * @inode: owner inode of the buffer
73 * @bh: buffer head of the buffer to be discarded
74 */
75void nilfs_forget_buffer(struct buffer_head *bh)
76{
77 struct page *page = bh->b_page;
Ryusuke Konishiead8ecf2015-04-16 12:46:28 -070078 const unsigned long clear_bits =
Ryusuke Konishi4ce5c342016-08-02 14:05:28 -070079 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
80 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
81 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070082
83 lock_buffer(bh);
Ryusuke Konishiead8ecf2015-04-16 12:46:28 -070084 set_mask_bits(&bh->b_state, clear_bits, 0);
Ryusuke Konishi84338232009-05-05 21:52:06 +090085 if (nilfs_page_buffers_clean(page))
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070086 __nilfs_clear_page_dirty(page);
87
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -070088 bh->b_blocknr = -1;
89 ClearPageUptodate(page);
90 ClearPageMappedToDisk(page);
91 unlock_buffer(bh);
92 brelse(bh);
93}
94
95/**
96 * nilfs_copy_buffer -- copy buffer data and flags
97 * @dbh: destination buffer
98 * @sbh: source buffer
99 */
100void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
101{
102 void *kaddr0, *kaddr1;
103 unsigned long bits;
104 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
105 struct buffer_head *bh;
106
Cong Wang7b9c0972011-11-25 23:14:33 +0800107 kaddr0 = kmap_atomic(spage);
108 kaddr1 = kmap_atomic(dpage);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700109 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
Cong Wang7b9c0972011-11-25 23:14:33 +0800110 kunmap_atomic(kaddr1);
111 kunmap_atomic(kaddr0);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700112
113 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
114 dbh->b_blocknr = sbh->b_blocknr;
115 dbh->b_bdev = sbh->b_bdev;
116
117 bh = dbh;
Ryusuke Konishi4ce5c342016-08-02 14:05:28 -0700118 bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700119 while ((bh = bh->b_this_page) != dbh) {
120 lock_buffer(bh);
121 bits &= bh->b_state;
122 unlock_buffer(bh);
123 }
Ryusuke Konishi4ce5c342016-08-02 14:05:28 -0700124 if (bits & BIT(BH_Uptodate))
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700125 SetPageUptodate(dpage);
126 else
127 ClearPageUptodate(dpage);
Ryusuke Konishi4ce5c342016-08-02 14:05:28 -0700128 if (bits & BIT(BH_Mapped))
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700129 SetPageMappedToDisk(dpage);
130 else
131 ClearPageMappedToDisk(dpage);
132}
133
134/**
135 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
136 * @page: page to be checked
137 *
138 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
139 * Otherwise, it returns non-zero value.
140 */
141int nilfs_page_buffers_clean(struct page *page)
142{
143 struct buffer_head *bh, *head;
144
145 bh = head = page_buffers(page);
146 do {
147 if (buffer_dirty(bh))
148 return 0;
149 bh = bh->b_this_page;
150 } while (bh != head);
151 return 1;
152}
153
154void nilfs_page_bug(struct page *page)
155{
156 struct address_space *m;
Ryusuke Konishiaa405b12011-05-05 12:56:51 +0900157 unsigned long ino;
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700158
159 if (unlikely(!page)) {
160 printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
161 return;
162 }
163
164 m = page->mapping;
Ryusuke Konishiaa405b12011-05-05 12:56:51 +0900165 ino = m ? m->host->i_ino : 0;
166
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700167 printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
168 "mapping=%p ino=%lu\n",
Joonsoo Kimfe896d12016-03-17 14:19:26 -0700169 page, page_ref_count(page),
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700170 (unsigned long long)page->index, page->flags, m, ino);
171
172 if (page_has_buffers(page)) {
173 struct buffer_head *bh, *head;
174 int i = 0;
175
176 bh = head = page_buffers(page);
177 do {
178 printk(KERN_CRIT
179 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
180 i++, bh, atomic_read(&bh->b_count),
181 (unsigned long long)bh->b_blocknr, bh->b_state);
182 bh = bh->b_this_page;
183 } while (bh != head);
184 }
185}
186
187/**
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700188 * nilfs_copy_page -- copy the page with buffers
189 * @dst: destination page
190 * @src: source page
191 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
192 *
Ryusuke Konishi7a650042010-03-14 03:32:40 +0900193 * This function is for both data pages and btnode pages. The dirty flag
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700194 * should be treated by caller. The page must not be under i/o.
195 * Both src and dst page must be locked
196 */
197static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
198{
199 struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
200 unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
201
202 BUG_ON(PageWriteback(dst));
203
204 sbh = sbufs = page_buffers(src);
205 if (!page_has_buffers(dst))
206 create_empty_buffers(dst, sbh->b_size, 0);
207
208 if (copy_dirty)
Ryusuke Konishi4ce5c342016-08-02 14:05:28 -0700209 mask |= BIT(BH_Dirty);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700210
211 dbh = dbufs = page_buffers(dst);
212 do {
213 lock_buffer(sbh);
214 lock_buffer(dbh);
215 dbh->b_state = sbh->b_state & mask;
216 dbh->b_blocknr = sbh->b_blocknr;
217 dbh->b_bdev = sbh->b_bdev;
218 sbh = sbh->b_this_page;
219 dbh = dbh->b_this_page;
220 } while (dbh != dbufs);
221
222 copy_highpage(dst, src);
223
224 if (PageUptodate(src) && !PageUptodate(dst))
225 SetPageUptodate(dst);
226 else if (!PageUptodate(src) && PageUptodate(dst))
227 ClearPageUptodate(dst);
228 if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
229 SetPageMappedToDisk(dst);
230 else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
231 ClearPageMappedToDisk(dst);
232
233 do {
234 unlock_buffer(sbh);
235 unlock_buffer(dbh);
236 sbh = sbh->b_this_page;
237 dbh = dbh->b_this_page;
238 } while (dbh != dbufs);
239}
240
241int nilfs_copy_dirty_pages(struct address_space *dmap,
242 struct address_space *smap)
243{
244 struct pagevec pvec;
245 unsigned int i;
246 pgoff_t index = 0;
247 int err = 0;
248
Mel Gorman86679822017-11-15 17:37:52 -0800249 pagevec_init(&pvec);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700250repeat:
Jan Kara67fd7072017-11-15 17:35:19 -0800251 if (!pagevec_lookup_tag(&pvec, smap, &index, PAGECACHE_TAG_DIRTY))
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700252 return 0;
253
254 for (i = 0; i < pagevec_count(&pvec); i++) {
255 struct page *page = pvec.pages[i], *dpage;
256
257 lock_page(page);
258 if (unlikely(!PageDirty(page)))
259 NILFS_PAGE_BUG(page, "inconsistent dirty state");
260
261 dpage = grab_cache_page(dmap, page->index);
262 if (unlikely(!dpage)) {
263 /* No empty page is added to the page cache */
264 err = -ENOMEM;
265 unlock_page(page);
266 break;
267 }
268 if (unlikely(!page_has_buffers(page)))
269 NILFS_PAGE_BUG(page,
270 "found empty page in dat page cache");
271
272 nilfs_copy_page(dpage, page, 1);
273 __set_page_dirty_nobuffers(dpage);
274
275 unlock_page(dpage);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300276 put_page(dpage);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700277 unlock_page(page);
278 }
279 pagevec_release(&pvec);
280 cond_resched();
281
282 if (likely(!err))
283 goto repeat;
284 return err;
285}
286
287/**
Ryusuke Konishi7a650042010-03-14 03:32:40 +0900288 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700289 * @dmap: destination page cache
290 * @smap: source page cache
291 *
292 * No pages must no be added to the cache during this process.
293 * This must be ensured by the caller.
294 */
295void nilfs_copy_back_pages(struct address_space *dmap,
296 struct address_space *smap)
297{
298 struct pagevec pvec;
299 unsigned int i, n;
300 pgoff_t index = 0;
301 int err;
302
Mel Gorman86679822017-11-15 17:37:52 -0800303 pagevec_init(&pvec);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700304repeat:
Jan Kara397162f2017-09-06 16:21:43 -0700305 n = pagevec_lookup(&pvec, smap, &index);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700306 if (!n)
307 return;
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700308
309 for (i = 0; i < pagevec_count(&pvec); i++) {
310 struct page *page = pvec.pages[i], *dpage;
311 pgoff_t offset = page->index;
312
313 lock_page(page);
314 dpage = find_lock_page(dmap, offset);
315 if (dpage) {
316 /* override existing page on the destination cache */
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700317 WARN_ON(PageDirty(dpage));
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700318 nilfs_copy_page(dpage, page, 0);
319 unlock_page(dpage);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300320 put_page(dpage);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700321 } else {
322 struct page *page2;
323
324 /* move the page to the destination cache */
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700325 xa_lock_irq(&smap->i_pages);
326 page2 = radix_tree_delete(&smap->i_pages, offset);
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700327 WARN_ON(page2 != page);
328
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700329 smap->nrpages--;
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700330 xa_unlock_irq(&smap->i_pages);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700331
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700332 xa_lock_irq(&dmap->i_pages);
333 err = radix_tree_insert(&dmap->i_pages, offset, page);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700334 if (unlikely(err < 0)) {
Ryusuke Konishi1f5abe72009-04-06 19:01:55 -0700335 WARN_ON(err == -EEXIST);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700336 page->mapping = NULL;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300337 put_page(page); /* for cache */
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700338 } else {
339 page->mapping = dmap;
340 dmap->nrpages++;
341 if (PageDirty(page))
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700342 radix_tree_tag_set(&dmap->i_pages,
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700343 offset,
344 PAGECACHE_TAG_DIRTY);
345 }
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700346 xa_unlock_irq(&dmap->i_pages);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700347 }
348 unlock_page(page);
349 }
350 pagevec_release(&pvec);
351 cond_resched();
352
353 goto repeat;
354}
355
Vyacheslav Dubeyko8c26c4e2013-04-30 15:27:48 -0700356/**
357 * nilfs_clear_dirty_pages - discard dirty pages in address space
358 * @mapping: address space with dirty pages for discarding
359 * @silent: suppress [true] or print [false] warning messages
360 */
361void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700362{
363 struct pagevec pvec;
364 unsigned int i;
365 pgoff_t index = 0;
366
Mel Gorman86679822017-11-15 17:37:52 -0800367 pagevec_init(&pvec);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700368
Jan Kara67fd7072017-11-15 17:35:19 -0800369 while (pagevec_lookup_tag(&pvec, mapping, &index,
370 PAGECACHE_TAG_DIRTY)) {
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700371 for (i = 0; i < pagevec_count(&pvec); i++) {
372 struct page *page = pvec.pages[i];
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700373
374 lock_page(page);
Vyacheslav Dubeyko8c26c4e2013-04-30 15:27:48 -0700375 nilfs_clear_dirty_page(page, silent);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700376 unlock_page(page);
377 }
378 pagevec_release(&pvec);
379 cond_resched();
380 }
381}
382
Vyacheslav Dubeyko8c26c4e2013-04-30 15:27:48 -0700383/**
384 * nilfs_clear_dirty_page - discard dirty page
385 * @page: dirty page that will be discarded
386 * @silent: suppress [true] or print [false] warning messages
387 */
388void nilfs_clear_dirty_page(struct page *page, bool silent)
389{
390 struct inode *inode = page->mapping->host;
391 struct super_block *sb = inode->i_sb;
392
Vyacheslav Dubeykodc33f5f2013-04-30 15:27:50 -0700393 BUG_ON(!PageLocked(page));
Vyacheslav Dubeyko8c26c4e2013-04-30 15:27:48 -0700394
Ryusuke Konishid6517de2016-08-02 14:05:14 -0700395 if (!silent)
396 nilfs_msg(sb, KERN_WARNING,
397 "discard dirty page: offset=%lld, ino=%lu",
398 page_offset(page), inode->i_ino);
Vyacheslav Dubeyko8c26c4e2013-04-30 15:27:48 -0700399
400 ClearPageUptodate(page);
401 ClearPageMappedToDisk(page);
402
403 if (page_has_buffers(page)) {
404 struct buffer_head *bh, *head;
Ryusuke Konishiead8ecf2015-04-16 12:46:28 -0700405 const unsigned long clear_bits =
Ryusuke Konishi4ce5c342016-08-02 14:05:28 -0700406 (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
407 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
408 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
Vyacheslav Dubeyko8c26c4e2013-04-30 15:27:48 -0700409
410 bh = head = page_buffers(page);
411 do {
412 lock_buffer(bh);
Ryusuke Konishid6517de2016-08-02 14:05:14 -0700413 if (!silent)
414 nilfs_msg(sb, KERN_WARNING,
415 "discard dirty block: blocknr=%llu, size=%zu",
416 (u64)bh->b_blocknr, bh->b_size);
417
Ryusuke Konishiead8ecf2015-04-16 12:46:28 -0700418 set_mask_bits(&bh->b_state, clear_bits, 0);
Vyacheslav Dubeyko8c26c4e2013-04-30 15:27:48 -0700419 unlock_buffer(bh);
420 } while (bh = bh->b_this_page, bh != head);
421 }
422
423 __nilfs_clear_page_dirty(page);
424}
425
Ryusuke Konishi0c6c44c2016-05-23 16:23:39 -0700426unsigned int nilfs_page_count_clean_buffers(struct page *page,
427 unsigned int from, unsigned int to)
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700428{
Ryusuke Konishi0c6c44c2016-05-23 16:23:39 -0700429 unsigned int block_start, block_end;
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700430 struct buffer_head *bh, *head;
Ryusuke Konishi0c6c44c2016-05-23 16:23:39 -0700431 unsigned int nc = 0;
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700432
433 for (bh = head = page_buffers(page), block_start = 0;
434 bh != head || !block_start;
435 block_start = block_end, bh = bh->b_this_page) {
436 block_end = block_start + bh->b_size;
437 if (block_end > from && block_start < to && !buffer_dirty(bh))
438 nc++;
439 }
440 return nc;
441}
Ryusuke Konishiae53a0a2010-12-26 23:30:02 +0900442
Christoph Hellwigb83ae6d2015-01-14 10:42:37 +0100443void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
Ryusuke Konishiebdfed42010-09-06 12:05:43 +0900444{
Ryusuke Konishiaa405b12011-05-05 12:56:51 +0900445 mapping->host = inode;
Ryusuke Konishiebdfed42010-09-06 12:05:43 +0900446 mapping->flags = 0;
447 mapping_set_gfp_mask(mapping, GFP_NOFS);
Rafael Aquini252aa6f2012-12-11 16:02:35 -0800448 mapping->private_data = NULL;
Ryusuke Konishid611b222011-03-30 11:49:20 +0900449 mapping->a_ops = &empty_aops;
Ryusuke Konishiebdfed42010-09-06 12:05:43 +0900450}
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700451
452/*
453 * NILFS2 needs clear_page_dirty() in the following two cases:
454 *
455 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
456 * page dirty flags when it copies back pages from the shadow cache
457 * (gcdat->{i_mapping,i_btnode_cache}) to its original cache
458 * (dat->{i_mapping,i_btnode_cache}).
459 *
460 * 2) Some B-tree operations like insertion or deletion may dispose buffers
461 * in dirty state, and this needs to cancel the dirty state of their pages.
462 */
463int __nilfs_clear_page_dirty(struct page *page)
464{
465 struct address_space *mapping = page->mapping;
466
467 if (mapping) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700468 xa_lock_irq(&mapping->i_pages);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700469 if (test_bit(PG_dirty, &page->flags)) {
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700470 radix_tree_tag_clear(&mapping->i_pages,
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700471 page_index(page),
472 PAGECACHE_TAG_DIRTY);
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700473 xa_unlock_irq(&mapping->i_pages);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700474 return clear_page_dirty_for_io(page);
475 }
Matthew Wilcoxb93b0162018-04-10 16:36:56 -0700476 xa_unlock_irq(&mapping->i_pages);
Ryusuke Konishi0bd49f92009-04-06 19:01:27 -0700477 return 0;
478 }
479 return TestClearPageDirty(page);
480}
Ryusuke Konishi622daaff2010-12-26 16:38:43 +0900481
482/**
483 * nilfs_find_uncommitted_extent - find extent of uncommitted data
484 * @inode: inode
485 * @start_blk: start block offset (in)
486 * @blkoff: start offset of the found extent (out)
487 *
488 * This function searches an extent of buffers marked "delayed" which
489 * starts from a block offset equal to or larger than @start_blk. If
490 * such an extent was found, this will store the start offset in
491 * @blkoff and return its length in blocks. Otherwise, zero is
492 * returned.
493 */
494unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
495 sector_t start_blk,
496 sector_t *blkoff)
497{
498 unsigned int i;
499 pgoff_t index;
500 unsigned int nblocks_in_page;
501 unsigned long length = 0;
502 sector_t b;
503 struct pagevec pvec;
504 struct page *page;
505
506 if (inode->i_mapping->nrpages == 0)
507 return 0;
508
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300509 index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
510 nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
Ryusuke Konishi622daaff2010-12-26 16:38:43 +0900511
Mel Gorman86679822017-11-15 17:37:52 -0800512 pagevec_init(&pvec);
Ryusuke Konishi622daaff2010-12-26 16:38:43 +0900513
514repeat:
515 pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
516 pvec.pages);
517 if (pvec.nr == 0)
518 return length;
519
520 if (length > 0 && pvec.pages[0]->index > index)
521 goto out;
522
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300523 b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
Ryusuke Konishi622daaff2010-12-26 16:38:43 +0900524 i = 0;
525 do {
526 page = pvec.pages[i];
527
528 lock_page(page);
529 if (page_has_buffers(page)) {
530 struct buffer_head *bh, *head;
531
532 bh = head = page_buffers(page);
533 do {
534 if (b < start_blk)
535 continue;
536 if (buffer_delay(bh)) {
537 if (length == 0)
538 *blkoff = b;
539 length++;
540 } else if (length > 0) {
541 goto out_locked;
542 }
543 } while (++b, bh = bh->b_this_page, bh != head);
544 } else {
545 if (length > 0)
546 goto out_locked;
547
548 b += nblocks_in_page;
549 }
550 unlock_page(page);
551
552 } while (++i < pagevec_count(&pvec));
553
554 index = page->index + 1;
555 pagevec_release(&pvec);
556 cond_resched();
557 goto repeat;
558
559out_locked:
560 unlock_page(page);
561out:
562 pagevec_release(&pvec);
563 return length;
564}