blob: 2e566d90bb94d53f2b121f275e6cbe57acafd122 [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
Andreas Dilger1dc563a2015-11-08 18:09:37 -050026 * Copyright (c) 2011, 2015, Intel Corporation.
Peng Taod7e09d02013-05-02 16:46:55 +080027 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * Implementation of cl_page for VVP layer.
33 *
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
36 */
37
38#define DEBUG_SUBSYSTEM S_LLITE
39
John L. Hammond3a52f802016-03-30 19:48:48 -040040#include <linux/atomic.h>
41#include <linux/bitops.h>
42#include <linux/mm.h>
43#include <linux/mutex.h>
44#include <linux/page-flags.h>
45#include <linux/pagemap.h>
46
Greg Kroah-Hartman67a235f2014-07-11 21:51:41 -070047#include "../include/lustre_lite.h"
Peng Taod7e09d02013-05-02 16:46:55 +080048
John L. Hammond0d345652016-03-30 19:48:45 -040049#include "llite_internal.h"
Peng Taod7e09d02013-05-02 16:46:55 +080050#include "vvp_internal.h"
51
52/*****************************************************************************
53 *
54 * Page operations.
55 *
56 */
57
John L. Hammond3a52f802016-03-30 19:48:48 -040058static void vvp_page_fini_common(struct vvp_page *vpg)
Peng Taod7e09d02013-05-02 16:46:55 +080059{
John L. Hammond3a52f802016-03-30 19:48:48 -040060 struct page *vmpage = vpg->vpg_page;
Peng Taod7e09d02013-05-02 16:46:55 +080061
Oleg Drokin6e168182016-02-16 00:46:46 -050062 LASSERT(vmpage);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030063 put_page(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +080064}
65
66static void vvp_page_fini(const struct lu_env *env,
67 struct cl_page_slice *slice)
68{
John L. Hammond3a52f802016-03-30 19:48:48 -040069 struct vvp_page *vpg = cl2vvp_page(slice);
70 struct page *vmpage = vpg->vpg_page;
Peng Taod7e09d02013-05-02 16:46:55 +080071
72 /*
73 * vmpage->private was already cleared when page was moved into
74 * VPG_FREEING state.
75 */
76 LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
John L. Hammond3a52f802016-03-30 19:48:48 -040077 vvp_page_fini_common(vpg);
Peng Taod7e09d02013-05-02 16:46:55 +080078}
79
80static int vvp_page_own(const struct lu_env *env,
81 const struct cl_page_slice *slice, struct cl_io *io,
82 int nonblock)
83{
John L. Hammond3a52f802016-03-30 19:48:48 -040084 struct vvp_page *vpg = cl2vvp_page(slice);
85 struct page *vmpage = vpg->vpg_page;
Peng Taod7e09d02013-05-02 16:46:55 +080086
Oleg Drokin6e168182016-02-16 00:46:46 -050087 LASSERT(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +080088 if (nonblock) {
89 if (!trylock_page(vmpage))
90 return -EAGAIN;
91
92 if (unlikely(PageWriteback(vmpage))) {
93 unlock_page(vmpage);
94 return -EAGAIN;
95 }
96
97 return 0;
98 }
99
100 lock_page(vmpage);
101 wait_on_page_writeback(vmpage);
John L. Hammond3a52f802016-03-30 19:48:48 -0400102
Peng Taod7e09d02013-05-02 16:46:55 +0800103 return 0;
104}
105
106static void vvp_page_assume(const struct lu_env *env,
107 const struct cl_page_slice *slice,
108 struct cl_io *unused)
109{
110 struct page *vmpage = cl2vm_page(slice);
111
Oleg Drokin6e168182016-02-16 00:46:46 -0500112 LASSERT(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800113 LASSERT(PageLocked(vmpage));
114 wait_on_page_writeback(vmpage);
115}
116
117static void vvp_page_unassume(const struct lu_env *env,
118 const struct cl_page_slice *slice,
119 struct cl_io *unused)
120{
121 struct page *vmpage = cl2vm_page(slice);
122
Oleg Drokin6e168182016-02-16 00:46:46 -0500123 LASSERT(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800124 LASSERT(PageLocked(vmpage));
125}
126
127static void vvp_page_disown(const struct lu_env *env,
128 const struct cl_page_slice *slice, struct cl_io *io)
129{
130 struct page *vmpage = cl2vm_page(slice);
131
Oleg Drokin6e168182016-02-16 00:46:46 -0500132 LASSERT(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800133 LASSERT(PageLocked(vmpage));
134
135 unlock_page(cl2vm_page(slice));
136}
137
138static void vvp_page_discard(const struct lu_env *env,
139 const struct cl_page_slice *slice,
140 struct cl_io *unused)
141{
142 struct page *vmpage = cl2vm_page(slice);
John L. Hammond3a52f802016-03-30 19:48:48 -0400143 struct vvp_page *vpg = cl2vvp_page(slice);
Peng Taod7e09d02013-05-02 16:46:55 +0800144
Oleg Drokin6e168182016-02-16 00:46:46 -0500145 LASSERT(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800146 LASSERT(PageLocked(vmpage));
147
John L. Hammond3a52f802016-03-30 19:48:48 -0400148 if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
Jinshan Xiongfd7444f2016-03-30 19:48:33 -0400149 ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
Peng Taod7e09d02013-05-02 16:46:55 +0800150
Jinshan Xiong7addf402016-03-30 19:48:32 -0400151 ll_invalidate_page(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800152}
153
Peng Taod7e09d02013-05-02 16:46:55 +0800154static void vvp_page_delete(const struct lu_env *env,
155 const struct cl_page_slice *slice)
156{
157 struct page *vmpage = cl2vm_page(slice);
158 struct inode *inode = vmpage->mapping->host;
159 struct cl_object *obj = slice->cpl_obj;
Jinshan Xiongd9d47902016-03-30 19:48:28 -0400160 struct cl_page *page = slice->cpl_page;
161 int refc;
Peng Taod7e09d02013-05-02 16:46:55 +0800162
163 LASSERT(PageLocked(vmpage));
Jinshan Xiongd9d47902016-03-30 19:48:28 -0400164 LASSERT((struct cl_page *)vmpage->private == page);
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400165 LASSERT(inode == vvp_object_inode(obj));
Peng Taod7e09d02013-05-02 16:46:55 +0800166
John L. Hammond3a52f802016-03-30 19:48:48 -0400167 vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
Jinshan Xiongd9d47902016-03-30 19:48:28 -0400168
169 /* Drop the reference count held in vvp_page_init */
170 refc = atomic_dec_return(&page->cp_ref);
171 LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
172
173 ClearPageUptodate(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800174 ClearPagePrivate(vmpage);
175 vmpage->private = 0;
176 /*
177 * Reference from vmpage to cl_page is removed, but the reference back
178 * is still here. It is removed later in vvp_page_fini().
179 */
180}
181
182static void vvp_page_export(const struct lu_env *env,
183 const struct cl_page_slice *slice,
184 int uptodate)
185{
186 struct page *vmpage = cl2vm_page(slice);
187
Oleg Drokin6e168182016-02-16 00:46:46 -0500188 LASSERT(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800189 LASSERT(PageLocked(vmpage));
190 if (uptodate)
191 SetPageUptodate(vmpage);
192 else
193 ClearPageUptodate(vmpage);
194}
195
196static int vvp_page_is_vmlocked(const struct lu_env *env,
197 const struct cl_page_slice *slice)
198{
199 return PageLocked(cl2vm_page(slice)) ? -EBUSY : -ENODATA;
200}
201
202static int vvp_page_prep_read(const struct lu_env *env,
203 const struct cl_page_slice *slice,
204 struct cl_io *unused)
205{
Peng Taod7e09d02013-05-02 16:46:55 +0800206 /* Skip the page already marked as PG_uptodate. */
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800207 return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800208}
209
210static int vvp_page_prep_write(const struct lu_env *env,
211 const struct cl_page_slice *slice,
212 struct cl_io *unused)
213{
214 struct page *vmpage = cl2vm_page(slice);
Oleg Drokin176d1b92015-07-30 18:49:55 -0400215 struct cl_page *pg = slice->cpl_page;
Peng Taod7e09d02013-05-02 16:46:55 +0800216
217 LASSERT(PageLocked(vmpage));
218 LASSERT(!PageDirty(vmpage));
219
Oleg Drokin176d1b92015-07-30 18:49:55 -0400220 /* ll_writepage path is not a sync write, so need to set page writeback
Oleg Drokinc0894c62016-02-24 22:00:30 -0500221 * flag
222 */
Oleg Drokin176d1b92015-07-30 18:49:55 -0400223 if (!pg->cp_sync_io)
224 set_page_writeback(vmpage);
225
John L. Hammond3a52f802016-03-30 19:48:48 -0400226 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
Peng Taod7e09d02013-05-02 16:46:55 +0800227
228 return 0;
229}
230
231/**
232 * Handles page transfer errors at VM level.
233 *
234 * This takes inode as a separate argument, because inode on which error is to
235 * be set can be different from \a vmpage inode in case of direct-io.
236 */
237static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
238{
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400239 struct vvp_object *obj = cl_inode2vvp(inode);
Peng Taod7e09d02013-05-02 16:46:55 +0800240
241 if (ioret == 0) {
242 ClearPageError(vmpage);
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400243 obj->vob_discard_page_warned = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800244 } else {
245 SetPageError(vmpage);
246 if (ioret == -ENOSPC)
247 set_bit(AS_ENOSPC, &inode->i_mapping->flags);
248 else
249 set_bit(AS_EIO, &inode->i_mapping->flags);
250
251 if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400252 obj->vob_discard_page_warned == 0) {
253 obj->vob_discard_page_warned = 1;
Peng Taod7e09d02013-05-02 16:46:55 +0800254 ll_dirty_page_discard_warn(vmpage, ioret);
255 }
256 }
257}
258
259static void vvp_page_completion_read(const struct lu_env *env,
260 const struct cl_page_slice *slice,
261 int ioret)
262{
John L. Hammond3a52f802016-03-30 19:48:48 -0400263 struct vvp_page *vpg = cl2vvp_page(slice);
264 struct page *vmpage = vpg->vpg_page;
Jinshan Xiong7addf402016-03-30 19:48:32 -0400265 struct cl_page *page = slice->cpl_page;
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400266 struct inode *inode = vvp_object_inode(page->cp_obj);
Peng Taod7e09d02013-05-02 16:46:55 +0800267
268 LASSERT(PageLocked(vmpage));
269 CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
270
John L. Hammond3a52f802016-03-30 19:48:48 -0400271 if (vpg->vpg_defer_uptodate)
Peng Taod7e09d02013-05-02 16:46:55 +0800272 ll_ra_count_put(ll_i2sbi(inode), 1);
273
274 if (ioret == 0) {
John L. Hammond3a52f802016-03-30 19:48:48 -0400275 if (!vpg->vpg_defer_uptodate)
Peng Taod7e09d02013-05-02 16:46:55 +0800276 cl_page_export(env, page, 1);
John L. Hammond3a52f802016-03-30 19:48:48 -0400277 } else {
278 vpg->vpg_defer_uptodate = 0;
279 }
Peng Taod7e09d02013-05-02 16:46:55 +0800280
Oleg Drokin6e168182016-02-16 00:46:46 -0500281 if (!page->cp_sync_io)
Peng Taod7e09d02013-05-02 16:46:55 +0800282 unlock_page(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800283}
284
285static void vvp_page_completion_write(const struct lu_env *env,
286 const struct cl_page_slice *slice,
287 int ioret)
288{
John L. Hammond3a52f802016-03-30 19:48:48 -0400289 struct vvp_page *vpg = cl2vvp_page(slice);
Peng Taod7e09d02013-05-02 16:46:55 +0800290 struct cl_page *pg = slice->cpl_page;
John L. Hammond3a52f802016-03-30 19:48:48 -0400291 struct page *vmpage = vpg->vpg_page;
Peng Taod7e09d02013-05-02 16:46:55 +0800292
Peng Taod7e09d02013-05-02 16:46:55 +0800293 CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
294
295 /*
296 * TODO: Actually it makes sense to add the page into oap pending
297 * list again and so that we don't need to take the page out from
298 * SoM write pending list, if we just meet a recoverable error,
299 * -ENOMEM, etc.
300 * To implement this, we just need to return a non zero value in
301 * ->cpo_completion method. The underlying transfer should be notified
302 * and then re-add the page into pending transfer queue. -jay
303 */
304
John L. Hammond3a52f802016-03-30 19:48:48 -0400305 vpg->vpg_write_queued = 0;
306 vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
Peng Taod7e09d02013-05-02 16:46:55 +0800307
Oleg Drokin6e168182016-02-16 00:46:46 -0500308 if (pg->cp_sync_io) {
Oleg Drokin176d1b92015-07-30 18:49:55 -0400309 LASSERT(PageLocked(vmpage));
310 LASSERT(!PageWriteback(vmpage));
311 } else {
312 LASSERT(PageWriteback(vmpage));
313 /*
314 * Only mark the page error only when it's an async write
315 * because applications won't wait for IO to finish.
316 */
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400317 vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
Peng Taod7e09d02013-05-02 16:46:55 +0800318
Oleg Drokin176d1b92015-07-30 18:49:55 -0400319 end_page_writeback(vmpage);
320 }
Peng Taod7e09d02013-05-02 16:46:55 +0800321}
322
323/**
324 * Implements cl_page_operations::cpo_make_ready() method.
325 *
326 * This is called to yank a page from the transfer cache and to send it out as
327 * a part of transfer. This function try-locks the page. If try-lock failed,
328 * page is owned by some concurrent IO, and should be skipped (this is bad,
329 * but hopefully rare situation, as it usually results in transfer being
330 * shorter than possible).
331 *
332 * \retval 0 success, page can be placed into transfer
333 *
334 * \retval -EAGAIN page is either used by concurrent IO has been
335 * truncated. Skip it.
336 */
337static int vvp_page_make_ready(const struct lu_env *env,
338 const struct cl_page_slice *slice)
339{
340 struct page *vmpage = cl2vm_page(slice);
341 struct cl_page *pg = slice->cpl_page;
342 int result = 0;
343
344 lock_page(vmpage);
345 if (clear_page_dirty_for_io(vmpage)) {
346 LASSERT(pg->cp_state == CPS_CACHED);
Oleg Drokinc0894c62016-02-24 22:00:30 -0500347 /* This actually clears the dirty bit in the radix tree. */
Peng Taod7e09d02013-05-02 16:46:55 +0800348 set_page_writeback(vmpage);
John L. Hammond3a52f802016-03-30 19:48:48 -0400349 vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
Peng Taod7e09d02013-05-02 16:46:55 +0800350 CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
351 } else if (pg->cp_state == CPS_PAGEOUT) {
352 /* is it possible for osc_flush_async_page() to already
Oleg Drokinc0894c62016-02-24 22:00:30 -0500353 * make it ready?
354 */
Peng Taod7e09d02013-05-02 16:46:55 +0800355 result = -EALREADY;
356 } else {
357 CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
358 pg->cp_state);
359 LBUG();
360 }
361 unlock_page(vmpage);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800362 return result;
Peng Taod7e09d02013-05-02 16:46:55 +0800363}
364
Jinshan Xiongfd7444f2016-03-30 19:48:33 -0400365static int vvp_page_is_under_lock(const struct lu_env *env,
366 const struct cl_page_slice *slice,
367 struct cl_io *io, pgoff_t *max_index)
368{
369 if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
370 io->ci_type == CIT_FAULT) {
John L. Hammonde0a81442016-03-30 19:48:52 -0400371 struct vvp_io *vio = vvp_env_io(env);
Jinshan Xiongfd7444f2016-03-30 19:48:33 -0400372
John L. Hammonde0a81442016-03-30 19:48:52 -0400373 if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
Jinshan Xiongfd7444f2016-03-30 19:48:33 -0400374 *max_index = CL_PAGE_EOF;
375 }
376 return 0;
377}
378
Peng Taod7e09d02013-05-02 16:46:55 +0800379static int vvp_page_print(const struct lu_env *env,
380 const struct cl_page_slice *slice,
381 void *cookie, lu_printer_t printer)
382{
John L. Hammond3a52f802016-03-30 19:48:48 -0400383 struct vvp_page *vpg = cl2vvp_page(slice);
384 struct page *vmpage = vpg->vpg_page;
Peng Taod7e09d02013-05-02 16:46:55 +0800385
Joe Perches2d00bd12014-11-23 11:28:50 -0800386 (*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
John L. Hammond3a52f802016-03-30 19:48:48 -0400387 vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
388 vpg->vpg_write_queued, vmpage);
Oleg Drokin6e168182016-02-16 00:46:46 -0500389 if (vmpage) {
Peng Taod7e09d02013-05-02 16:46:55 +0800390 (*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
391 (long)vmpage->flags, page_count(vmpage),
392 page_mapcount(vmpage), vmpage->private,
James Simmonsbadc9fe2016-03-31 10:18:37 -0400393 vmpage->index,
Peng Taod7e09d02013-05-02 16:46:55 +0800394 list_empty(&vmpage->lru) ? "not-" : "");
395 }
John L. Hammond3a52f802016-03-30 19:48:48 -0400396
Peng Taod7e09d02013-05-02 16:46:55 +0800397 (*printer)(env, cookie, "\n");
John L. Hammond3a52f802016-03-30 19:48:48 -0400398
399 return 0;
400}
401
402static int vvp_page_fail(const struct lu_env *env,
403 const struct cl_page_slice *slice)
404{
405 /*
406 * Cached read?
407 */
408 LBUG();
409
Peng Taod7e09d02013-05-02 16:46:55 +0800410 return 0;
411}
412
413static const struct cl_page_operations vvp_page_ops = {
414 .cpo_own = vvp_page_own,
415 .cpo_assume = vvp_page_assume,
416 .cpo_unassume = vvp_page_unassume,
417 .cpo_disown = vvp_page_disown,
Peng Taod7e09d02013-05-02 16:46:55 +0800418 .cpo_discard = vvp_page_discard,
419 .cpo_delete = vvp_page_delete,
Peng Taod7e09d02013-05-02 16:46:55 +0800420 .cpo_export = vvp_page_export,
421 .cpo_is_vmlocked = vvp_page_is_vmlocked,
422 .cpo_fini = vvp_page_fini,
423 .cpo_print = vvp_page_print,
Jinshan Xiongfd7444f2016-03-30 19:48:33 -0400424 .cpo_is_under_lock = vvp_page_is_under_lock,
Peng Taod7e09d02013-05-02 16:46:55 +0800425 .io = {
426 [CRT_READ] = {
427 .cpo_prep = vvp_page_prep_read,
428 .cpo_completion = vvp_page_completion_read,
John L. Hammond3a52f802016-03-30 19:48:48 -0400429 .cpo_make_ready = vvp_page_fail,
Peng Taod7e09d02013-05-02 16:46:55 +0800430 },
431 [CRT_WRITE] = {
432 .cpo_prep = vvp_page_prep_write,
433 .cpo_completion = vvp_page_completion_write,
434 .cpo_make_ready = vvp_page_make_ready,
John L. Hammond3a52f802016-03-30 19:48:48 -0400435 },
436 },
Peng Taod7e09d02013-05-02 16:46:55 +0800437};
438
John L. Hammond3a52f802016-03-30 19:48:48 -0400439static int vvp_transient_page_prep(const struct lu_env *env,
440 const struct cl_page_slice *slice,
441 struct cl_io *unused)
442{
443 /* transient page should always be sent. */
444 return 0;
445}
446
Peng Taod7e09d02013-05-02 16:46:55 +0800447static void vvp_transient_page_verify(const struct cl_page *page)
448{
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400449 struct inode *inode = vvp_object_inode(page->cp_obj);
Peng Taod7e09d02013-05-02 16:46:55 +0800450
Al Viro59551022016-01-22 15:40:57 -0500451 LASSERT(!inode_trylock(inode));
Peng Taod7e09d02013-05-02 16:46:55 +0800452}
453
454static int vvp_transient_page_own(const struct lu_env *env,
455 const struct cl_page_slice *slice,
456 struct cl_io *unused, int nonblock)
457{
458 vvp_transient_page_verify(slice->cpl_page);
459 return 0;
460}
461
462static void vvp_transient_page_assume(const struct lu_env *env,
463 const struct cl_page_slice *slice,
464 struct cl_io *unused)
465{
466 vvp_transient_page_verify(slice->cpl_page);
467}
468
469static void vvp_transient_page_unassume(const struct lu_env *env,
470 const struct cl_page_slice *slice,
471 struct cl_io *unused)
472{
473 vvp_transient_page_verify(slice->cpl_page);
474}
475
476static void vvp_transient_page_disown(const struct lu_env *env,
477 const struct cl_page_slice *slice,
478 struct cl_io *unused)
479{
480 vvp_transient_page_verify(slice->cpl_page);
481}
482
483static void vvp_transient_page_discard(const struct lu_env *env,
484 const struct cl_page_slice *slice,
485 struct cl_io *unused)
486{
487 struct cl_page *page = slice->cpl_page;
488
489 vvp_transient_page_verify(slice->cpl_page);
490
491 /*
492 * For transient pages, remove it from the radix tree.
493 */
494 cl_page_delete(env, page);
495}
496
497static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
498 const struct cl_page_slice *slice)
499{
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400500 struct inode *inode = vvp_object_inode(slice->cpl_obj);
Peng Taod7e09d02013-05-02 16:46:55 +0800501 int locked;
502
Al Viro59551022016-01-22 15:40:57 -0500503 locked = !inode_trylock(inode);
Peng Taod7e09d02013-05-02 16:46:55 +0800504 if (!locked)
Al Viro59551022016-01-22 15:40:57 -0500505 inode_unlock(inode);
Peng Taod7e09d02013-05-02 16:46:55 +0800506 return locked ? -EBUSY : -ENODATA;
507}
508
509static void
510vvp_transient_page_completion(const struct lu_env *env,
511 const struct cl_page_slice *slice,
512 int ioret)
513{
514 vvp_transient_page_verify(slice->cpl_page);
515}
516
517static void vvp_transient_page_fini(const struct lu_env *env,
518 struct cl_page_slice *slice)
519{
John L. Hammond3a52f802016-03-30 19:48:48 -0400520 struct vvp_page *vpg = cl2vvp_page(slice);
Peng Taod7e09d02013-05-02 16:46:55 +0800521 struct cl_page *clp = slice->cpl_page;
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400522 struct vvp_object *clobj = cl2vvp(clp->cp_obj);
Peng Taod7e09d02013-05-02 16:46:55 +0800523
John L. Hammond3a52f802016-03-30 19:48:48 -0400524 vvp_page_fini_common(vpg);
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400525 LASSERT(!inode_trylock(clobj->vob_inode));
526 clobj->vob_transient_pages--;
Peng Taod7e09d02013-05-02 16:46:55 +0800527}
528
529static const struct cl_page_operations vvp_transient_page_ops = {
530 .cpo_own = vvp_transient_page_own,
531 .cpo_assume = vvp_transient_page_assume,
532 .cpo_unassume = vvp_transient_page_unassume,
533 .cpo_disown = vvp_transient_page_disown,
534 .cpo_discard = vvp_transient_page_discard,
Peng Taod7e09d02013-05-02 16:46:55 +0800535 .cpo_fini = vvp_transient_page_fini,
536 .cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
537 .cpo_print = vvp_page_print,
Jinshan Xiongfd7444f2016-03-30 19:48:33 -0400538 .cpo_is_under_lock = vvp_page_is_under_lock,
Peng Taod7e09d02013-05-02 16:46:55 +0800539 .io = {
540 [CRT_READ] = {
John L. Hammond3a52f802016-03-30 19:48:48 -0400541 .cpo_prep = vvp_transient_page_prep,
Peng Taod7e09d02013-05-02 16:46:55 +0800542 .cpo_completion = vvp_transient_page_completion,
543 },
544 [CRT_WRITE] = {
John L. Hammond3a52f802016-03-30 19:48:48 -0400545 .cpo_prep = vvp_transient_page_prep,
Peng Taod7e09d02013-05-02 16:46:55 +0800546 .cpo_completion = vvp_transient_page_completion,
547 }
548 }
549};
550
551int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
Jinshan Xiong7addf402016-03-30 19:48:32 -0400552 struct cl_page *page, pgoff_t index)
Peng Taod7e09d02013-05-02 16:46:55 +0800553{
John L. Hammond3a52f802016-03-30 19:48:48 -0400554 struct vvp_page *vpg = cl_object_page_slice(obj, page);
Jinshan Xiong7addf402016-03-30 19:48:32 -0400555 struct page *vmpage = page->cp_vmpage;
Peng Taod7e09d02013-05-02 16:46:55 +0800556
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400557 CLOBINVRNT(env, obj, vvp_object_invariant(obj));
Peng Taod7e09d02013-05-02 16:46:55 +0800558
John L. Hammond3a52f802016-03-30 19:48:48 -0400559 vpg->vpg_page = vmpage;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300560 get_page(vmpage);
Peng Taod7e09d02013-05-02 16:46:55 +0800561
John L. Hammond3a52f802016-03-30 19:48:48 -0400562 INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
Peng Taod7e09d02013-05-02 16:46:55 +0800563 if (page->cp_type == CPT_CACHEABLE) {
Jinshan Xiongd9d47902016-03-30 19:48:28 -0400564 /* in cache, decref in vvp_page_delete */
565 atomic_inc(&page->cp_ref);
Peng Taod7e09d02013-05-02 16:46:55 +0800566 SetPagePrivate(vmpage);
567 vmpage->private = (unsigned long)page;
John L. Hammond3a52f802016-03-30 19:48:48 -0400568 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
Jinshan Xiongfd7444f2016-03-30 19:48:33 -0400569 &vvp_page_ops);
Peng Taod7e09d02013-05-02 16:46:55 +0800570 } else {
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400571 struct vvp_object *clobj = cl2vvp(obj);
Peng Taod7e09d02013-05-02 16:46:55 +0800572
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400573 LASSERT(!inode_trylock(clobj->vob_inode));
John L. Hammond3a52f802016-03-30 19:48:48 -0400574 cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
Oleg Drokine15ba452016-02-26 01:49:49 -0500575 &vvp_transient_page_ops);
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400576 clobj->vob_transient_pages++;
Peng Taod7e09d02013-05-02 16:46:55 +0800577 }
578 return 0;
579}