blob: 5802da81cd0e34559f163503d15b95a72d4158d4 [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
Andreas Dilger1dc563a2015-11-08 18:09:37 -050025 *
26 * Copyright (c) 2013, 2015, Intel Corporation.
Peng Taod7e09d02013-05-02 16:46:55 +080027 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * Internal definitions for VVP layer.
33 *
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 */
36
37#ifndef VVP_INTERNAL_H
38#define VVP_INTERNAL_H
39
John L. Hammond0d345652016-03-30 19:48:45 -040040#include "../include/lustre/lustre_idl.h"
Greg Kroah-Hartman67a235f2014-07-11 21:51:41 -070041#include "../include/cl_object.h"
John L. Hammond0d345652016-03-30 19:48:45 -040042
43enum obd_notify_event;
44struct inode;
45struct lov_stripe_md;
46struct lustre_md;
47struct obd_capa;
48struct obd_device;
49struct obd_export;
50struct page;
51
John L. Hammond10cdef72016-03-30 19:48:51 -040052/* specific architecture can implement only part of this list */
53enum vvp_io_subtype {
54 /** normal IO */
55 IO_NORMAL,
56 /** io started from splice_{read|write} */
57 IO_SPLICE
58};
59
John L. Hammond0d345652016-03-30 19:48:45 -040060/**
John L. Hammond10cdef72016-03-30 19:48:51 -040061 * IO state private to IO state private to VVP layer.
John L. Hammond0d345652016-03-30 19:48:45 -040062 */
John L. Hammond10cdef72016-03-30 19:48:51 -040063struct vvp_io {
John L. Hammond0d345652016-03-30 19:48:45 -040064 /** super class */
John L. Hammonde0a81442016-03-30 19:48:52 -040065 struct cl_io_slice vui_cl;
66 struct cl_io_lock_link vui_link;
John L. Hammond0d345652016-03-30 19:48:45 -040067 /**
68 * I/O vector information to or from which read/write is going.
69 */
John L. Hammonde0a81442016-03-30 19:48:52 -040070 struct iov_iter *vui_iter;
John L. Hammond0d345652016-03-30 19:48:45 -040071 /**
72 * Total size for the left IO.
73 */
John L. Hammonde0a81442016-03-30 19:48:52 -040074 size_t vui_tot_count;
John L. Hammond0d345652016-03-30 19:48:45 -040075
76 union {
John L. Hammond10cdef72016-03-30 19:48:51 -040077 struct vvp_fault_io {
78 /**
79 * Inode modification time that is checked across DLM
80 * lock request.
81 */
82 time64_t ft_mtime;
83 struct vm_area_struct *ft_vma;
84 /**
85 * locked page returned from vvp_io
86 */
87 struct page *ft_vmpage;
88 /**
89 * kernel fault info
90 */
91 struct vm_fault *ft_vmf;
92 /**
93 * fault API used bitflags for return code.
94 */
95 unsigned int ft_flags;
96 /**
97 * check that flags are from filemap_fault
98 */
99 bool ft_flags_valid;
100 } fault;
John L. Hammond0d345652016-03-30 19:48:45 -0400101 struct {
John L. Hammonde0a81442016-03-30 19:48:52 -0400102 struct pipe_inode_info *vui_pipe;
103 unsigned int vui_flags;
John L. Hammond10cdef72016-03-30 19:48:51 -0400104 } splice;
105 struct {
John L. Hammonde0a81442016-03-30 19:48:52 -0400106 struct cl_page_list vui_queue;
107 unsigned long vui_written;
108 int vui_from;
109 int vui_to;
John L. Hammond0d345652016-03-30 19:48:45 -0400110 } write;
111 } u;
John L. Hammond10cdef72016-03-30 19:48:51 -0400112
John L. Hammonde0a81442016-03-30 19:48:52 -0400113 enum vvp_io_subtype vui_io_subtype;
John L. Hammond10cdef72016-03-30 19:48:51 -0400114
John L. Hammond0d345652016-03-30 19:48:45 -0400115 /**
116 * Layout version when this IO is initialized
117 */
John L. Hammonde0a81442016-03-30 19:48:52 -0400118 __u32 vui_layout_gen;
John L. Hammond0d345652016-03-30 19:48:45 -0400119 /**
120 * File descriptor against which IO is done.
121 */
John L. Hammonde0a81442016-03-30 19:48:52 -0400122 struct ll_file_data *vui_fd;
123 struct kiocb *vui_iocb;
John L. Hammond10cdef72016-03-30 19:48:51 -0400124
125 /* Readahead state. */
John L. Hammonde0a81442016-03-30 19:48:52 -0400126 pgoff_t vui_ra_start;
127 pgoff_t vui_ra_count;
128 /* Set when vui_ra_{start,count} have been initialized. */
129 bool vui_ra_valid;
John L. Hammond0d345652016-03-30 19:48:45 -0400130};
131
John Hammonda37bec72016-03-30 19:48:58 -0400132extern struct lu_device_type vvp_device_type;
133
John L. Hammond10cdef72016-03-30 19:48:51 -0400134extern struct lu_context_key vvp_session_key;
John Hammond9acc4502016-03-30 19:48:57 -0400135extern struct lu_context_key vvp_thread_key;
John L. Hammond0d345652016-03-30 19:48:45 -0400136
John L. Hammond4a4eee02016-03-30 19:48:49 -0400137extern struct kmem_cache *vvp_lock_kmem;
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400138extern struct kmem_cache *vvp_object_kmem;
John L. Hammond103b8bd2016-03-30 19:48:54 -0400139extern struct kmem_cache *vvp_req_kmem;
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400140
John Hammond9acc4502016-03-30 19:48:57 -0400141struct vvp_thread_info {
142 struct cl_lock vti_lock;
143 struct cl_lock_descr vti_descr;
144 struct cl_io vti_io;
145 struct cl_attr vti_attr;
John L. Hammond0d345652016-03-30 19:48:45 -0400146};
147
John Hammond9acc4502016-03-30 19:48:57 -0400148static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
John L. Hammond0d345652016-03-30 19:48:45 -0400149{
John Hammond9acc4502016-03-30 19:48:57 -0400150 struct vvp_thread_info *vti;
John L. Hammond0d345652016-03-30 19:48:45 -0400151
John Hammond9acc4502016-03-30 19:48:57 -0400152 vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
153 LASSERT(vti);
John L. Hammond0d345652016-03-30 19:48:45 -0400154
John Hammond9acc4502016-03-30 19:48:57 -0400155 return vti;
John L. Hammond0d345652016-03-30 19:48:45 -0400156}
157
John Hammond9acc4502016-03-30 19:48:57 -0400158static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
John L. Hammond0d345652016-03-30 19:48:45 -0400159{
John Hammond9acc4502016-03-30 19:48:57 -0400160 struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
John L. Hammond0d345652016-03-30 19:48:45 -0400161
162 memset(lock, 0, sizeof(*lock));
163 return lock;
164}
165
John Hammond9acc4502016-03-30 19:48:57 -0400166static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
John L. Hammond0d345652016-03-30 19:48:45 -0400167{
John Hammond9acc4502016-03-30 19:48:57 -0400168 struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
John L. Hammond0d345652016-03-30 19:48:45 -0400169
170 memset(attr, 0, sizeof(*attr));
171
172 return attr;
173}
174
John Hammond9acc4502016-03-30 19:48:57 -0400175static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
John L. Hammond0d345652016-03-30 19:48:45 -0400176{
John Hammond9acc4502016-03-30 19:48:57 -0400177 struct cl_io *io = &vvp_env_info(env)->vti_io;
John L. Hammond0d345652016-03-30 19:48:45 -0400178
179 memset(io, 0, sizeof(*io));
180
181 return io;
182}
183
John L. Hammond10cdef72016-03-30 19:48:51 -0400184struct vvp_session {
185 struct vvp_io cs_ios;
John L. Hammond0d345652016-03-30 19:48:45 -0400186};
187
John L. Hammond10cdef72016-03-30 19:48:51 -0400188static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
John L. Hammond0d345652016-03-30 19:48:45 -0400189{
John L. Hammond10cdef72016-03-30 19:48:51 -0400190 struct vvp_session *ses;
John L. Hammond0d345652016-03-30 19:48:45 -0400191
John L. Hammond10cdef72016-03-30 19:48:51 -0400192 ses = lu_context_key_get(env->le_ses, &vvp_session_key);
John L. Hammond0d345652016-03-30 19:48:45 -0400193 LASSERT(ses);
194
195 return ses;
196}
197
John L. Hammond10cdef72016-03-30 19:48:51 -0400198static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
John L. Hammond0d345652016-03-30 19:48:45 -0400199{
John L. Hammond10cdef72016-03-30 19:48:51 -0400200 return &vvp_env_session(env)->cs_ios;
John L. Hammond0d345652016-03-30 19:48:45 -0400201}
202
203/**
204 * ccc-private object state.
205 */
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400206struct vvp_object {
207 struct cl_object_header vob_header;
208 struct cl_object vob_cl;
209 struct inode *vob_inode;
John L. Hammond0d345652016-03-30 19:48:45 -0400210
211 /**
212 * A list of dirty pages pending IO in the cache. Used by
213 * SOM. Protected by ll_inode_info::lli_lock.
214 *
John L. Hammond3a52f802016-03-30 19:48:48 -0400215 * \see vvp_page::vpg_pending_linkage
John L. Hammond0d345652016-03-30 19:48:45 -0400216 */
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400217 struct list_head vob_pending_list;
John L. Hammond0d345652016-03-30 19:48:45 -0400218
219 /**
Stephen Championb2e7bbb2016-09-18 16:37:45 -0400220 * Number of transient pages. This is no longer protected by i_sem,
221 * and needs to be atomic. This is not actually used for anything,
222 * and can probably be removed.
John L. Hammond0d345652016-03-30 19:48:45 -0400223 */
Stephen Championb2e7bbb2016-09-18 16:37:45 -0400224 atomic_t vob_transient_pages;
225
John L. Hammond0d345652016-03-30 19:48:45 -0400226 /**
227 * Number of outstanding mmaps on this file.
228 *
229 * \see ll_vm_open(), ll_vm_close().
230 */
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400231 atomic_t vob_mmap_cnt;
John L. Hammond0d345652016-03-30 19:48:45 -0400232
233 /**
234 * various flags
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400235 * vob_discard_page_warned
John L. Hammond0d345652016-03-30 19:48:45 -0400236 * if pages belonging to this object are discarded when a client
237 * is evicted, some debug info will be printed, this flag will be set
238 * during processing the first discarded page, then avoid flooding
239 * debug message for lots of discarded pages.
240 *
241 * \see ll_dirty_page_discard_warn.
242 */
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400243 unsigned int vob_discard_page_warned:1;
John L. Hammond0d345652016-03-30 19:48:45 -0400244};
245
246/**
John L. Hammond3a52f802016-03-30 19:48:48 -0400247 * VVP-private page state.
John L. Hammond0d345652016-03-30 19:48:45 -0400248 */
John L. Hammond3a52f802016-03-30 19:48:48 -0400249struct vvp_page {
250 struct cl_page_slice vpg_cl;
Jinshan Xiong96c53362016-08-16 16:19:09 -0400251 unsigned int vpg_defer_uptodate:1,
252 vpg_ra_used:1,
253 vpg_write_queued:1;
John L. Hammond0d345652016-03-30 19:48:45 -0400254 /**
255 * Non-empty iff this page is already counted in
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400256 * vvp_object::vob_pending_list. This list is only used as a flag,
John L. Hammond0d345652016-03-30 19:48:45 -0400257 * that is, never iterated through, only checked for list_empty(), but
258 * having a list is useful for debugging.
259 */
John L. Hammond3a52f802016-03-30 19:48:48 -0400260 struct list_head vpg_pending_linkage;
John L. Hammond0d345652016-03-30 19:48:45 -0400261 /** VM page */
John L. Hammond3a52f802016-03-30 19:48:48 -0400262 struct page *vpg_page;
John L. Hammond0d345652016-03-30 19:48:45 -0400263};
264
John L. Hammond3a52f802016-03-30 19:48:48 -0400265static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
John L. Hammond0d345652016-03-30 19:48:45 -0400266{
John L. Hammond3a52f802016-03-30 19:48:48 -0400267 return container_of(slice, struct vvp_page, vpg_cl);
John L. Hammond0d345652016-03-30 19:48:45 -0400268}
269
John L. Hammond3a52f802016-03-30 19:48:48 -0400270static inline pgoff_t vvp_index(struct vvp_page *vvp)
John L. Hammond0d345652016-03-30 19:48:45 -0400271{
John L. Hammond3a52f802016-03-30 19:48:48 -0400272 return vvp->vpg_cl.cpl_index;
John L. Hammond0d345652016-03-30 19:48:45 -0400273}
274
John L. Hammond3c95b832016-03-30 19:48:46 -0400275struct vvp_device {
276 struct cl_device vdv_cl;
277 struct super_block *vdv_sb;
278 struct cl_device *vdv_next;
John L. Hammond0d345652016-03-30 19:48:45 -0400279};
280
John L. Hammond4a4eee02016-03-30 19:48:49 -0400281struct vvp_lock {
282 struct cl_lock_slice vlk_cl;
John L. Hammond0d345652016-03-30 19:48:45 -0400283};
284
John L. Hammond103b8bd2016-03-30 19:48:54 -0400285struct vvp_req {
286 struct cl_req_slice vrq_cl;
John L. Hammond0d345652016-03-30 19:48:45 -0400287};
288
289void *ccc_key_init(const struct lu_context *ctx,
290 struct lu_context_key *key);
291void ccc_key_fini(const struct lu_context *ctx,
292 struct lu_context_key *key, void *data);
John L. Hammond0d345652016-03-30 19:48:45 -0400293
John L. Hammond0d345652016-03-30 19:48:45 -0400294void ccc_umount(const struct lu_env *env, struct cl_device *dev);
John L. Hammond0d345652016-03-30 19:48:45 -0400295
John L. Hammond3c95b832016-03-30 19:48:46 -0400296static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
297{
298 return &vdv->vdv_cl.cd_lu_dev;
299}
300
301static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
302{
303 return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
304}
305
306static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
307{
308 return container_of0(d, struct vvp_device, vdv_cl);
309}
310
John L. Hammond8c7b0e12016-03-30 19:48:47 -0400311static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
312{
313 return container_of0(obj, struct vvp_object, vob_cl);
314}
315
316static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
317{
318 return container_of0(obj, struct vvp_object, vob_cl.co_lu);
319}
320
321static inline struct inode *vvp_object_inode(const struct cl_object *obj)
322{
323 return cl2vvp(obj)->vob_inode;
324}
325
326int vvp_object_invariant(const struct cl_object *obj);
327struct vvp_object *cl_inode2vvp(struct inode *inode);
328
John L. Hammond3a52f802016-03-30 19:48:48 -0400329static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
330{
331 return cl2vvp_page(slice)->vpg_page;
332}
333
John L. Hammond4a4eee02016-03-30 19:48:49 -0400334static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
335{
336 return container_of(slice, struct vvp_lock, vlk_cl);
337}
338
John L. Hammond0d345652016-03-30 19:48:45 -0400339# define CLOBINVRNT(env, clob, expr) \
340 ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
341
John L. Hammond0d345652016-03-30 19:48:45 -0400342/**
343 * New interfaces to get and put lov_stripe_md from lov layer. This violates
344 * layering because lov_stripe_md is supposed to be a private data in lov.
345 *
346 * NB: If you find you have to use these interfaces for your new code, please
347 * think about it again. These interfaces may be removed in the future for
348 * better layering.
349 */
350struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
351void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
352int lov_read_and_clear_async_rc(struct cl_object *clob);
353
354struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
355void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
Peng Taod7e09d02013-05-02 16:46:55 +0800356
Jinshan Xiong77605e42016-03-30 19:48:30 -0400357int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
358 struct cl_io *io);
John L. Hammondfee6eb52016-03-30 19:48:53 -0400359int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
Jinshan Xiong77605e42016-03-30 19:48:30 -0400360int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
361 struct cl_lock *lock, const struct cl_io *io);
Oleg Drokine15ba452016-02-26 01:49:49 -0500362int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
Jinshan Xiong7addf402016-03-30 19:48:32 -0400363 struct cl_page *page, pgoff_t index);
John L. Hammond103b8bd2016-03-30 19:48:54 -0400364int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
365 struct cl_req *req);
Peng Taod7e09d02013-05-02 16:46:55 +0800366struct lu_object *vvp_object_alloc(const struct lu_env *env,
367 const struct lu_object_header *hdr,
368 struct lu_device *dev);
Peng Taod7e09d02013-05-02 16:46:55 +0800369
John Hammond5c5af0f2016-03-30 19:49:00 -0400370int vvp_global_init(void);
371void vvp_global_fini(void);
372
John L. Hammond2d95f102014-04-27 13:07:05 -0400373extern const struct file_operations vvp_dump_pgcache_file_ops;
Peng Taod7e09d02013-05-02 16:46:55 +0800374
375#endif /* VVP_INTERNAL_H */