Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 1 | /* |
| 2 | * GPL HEADER START |
| 3 | * |
| 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 only, |
| 8 | * as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License version 2 for more details (a copy is included |
| 14 | * in the LICENSE file that accompanied this code). |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * version 2 along with this program; If not, see |
Oleg Drokin | 6a5b99a | 2016-06-14 23:33:40 -0400 | [diff] [blame] | 18 | * http://www.gnu.org/licenses/gpl-2.0.html |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 19 | * |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 20 | * GPL HEADER END |
| 21 | */ |
| 22 | /* |
| 23 | * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. |
| 24 | * Use is subject to license terms. |
Andreas Dilger | 1dc563a | 2015-11-08 18:09:37 -0500 | [diff] [blame] | 25 | * |
| 26 | * Copyright (c) 2013, 2015, Intel Corporation. |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 27 | */ |
| 28 | /* |
| 29 | * This file is part of Lustre, http://www.lustre.org/ |
| 30 | * Lustre is a trademark of Sun Microsystems, Inc. |
| 31 | * |
| 32 | * Internal definitions for VVP layer. |
| 33 | * |
| 34 | * Author: Nikita Danilov <nikita.danilov@sun.com> |
| 35 | */ |
| 36 | |
| 37 | #ifndef VVP_INTERNAL_H |
| 38 | #define VVP_INTERNAL_H |
| 39 | |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 40 | #include "../include/lustre/lustre_idl.h" |
Greg Kroah-Hartman | 67a235f | 2014-07-11 21:51:41 -0700 | [diff] [blame] | 41 | #include "../include/cl_object.h" |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 42 | |
| 43 | enum obd_notify_event; |
| 44 | struct inode; |
| 45 | struct lov_stripe_md; |
| 46 | struct lustre_md; |
| 47 | struct obd_capa; |
| 48 | struct obd_device; |
| 49 | struct obd_export; |
| 50 | struct page; |
| 51 | |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 52 | /* specific architecture can implement only part of this list */ |
| 53 | enum vvp_io_subtype { |
| 54 | /** normal IO */ |
| 55 | IO_NORMAL, |
| 56 | /** io started from splice_{read|write} */ |
| 57 | IO_SPLICE |
| 58 | }; |
| 59 | |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 60 | /** |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 61 | * IO state private to IO state private to VVP layer. |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 62 | */ |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 63 | struct vvp_io { |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 64 | /** super class */ |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 65 | struct cl_io_slice vui_cl; |
| 66 | struct cl_io_lock_link vui_link; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 67 | /** |
| 68 | * I/O vector information to or from which read/write is going. |
| 69 | */ |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 70 | struct iov_iter *vui_iter; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 71 | /** |
| 72 | * Total size for the left IO. |
| 73 | */ |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 74 | size_t vui_tot_count; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 75 | |
| 76 | union { |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 77 | struct vvp_fault_io { |
| 78 | /** |
| 79 | * Inode modification time that is checked across DLM |
| 80 | * lock request. |
| 81 | */ |
| 82 | time64_t ft_mtime; |
| 83 | struct vm_area_struct *ft_vma; |
| 84 | /** |
| 85 | * locked page returned from vvp_io |
| 86 | */ |
| 87 | struct page *ft_vmpage; |
| 88 | /** |
| 89 | * kernel fault info |
| 90 | */ |
| 91 | struct vm_fault *ft_vmf; |
| 92 | /** |
| 93 | * fault API used bitflags for return code. |
| 94 | */ |
| 95 | unsigned int ft_flags; |
| 96 | /** |
| 97 | * check that flags are from filemap_fault |
| 98 | */ |
| 99 | bool ft_flags_valid; |
| 100 | } fault; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 101 | struct { |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 102 | struct pipe_inode_info *vui_pipe; |
| 103 | unsigned int vui_flags; |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 104 | } splice; |
| 105 | struct { |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 106 | struct cl_page_list vui_queue; |
| 107 | unsigned long vui_written; |
| 108 | int vui_from; |
| 109 | int vui_to; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 110 | } write; |
| 111 | } u; |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 112 | |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 113 | enum vvp_io_subtype vui_io_subtype; |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 114 | |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 115 | /** |
| 116 | * Layout version when this IO is initialized |
| 117 | */ |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 118 | __u32 vui_layout_gen; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 119 | /** |
| 120 | * File descriptor against which IO is done. |
| 121 | */ |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 122 | struct ll_file_data *vui_fd; |
| 123 | struct kiocb *vui_iocb; |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 124 | |
| 125 | /* Readahead state. */ |
John L. Hammond | e0a8144 | 2016-03-30 19:48:52 -0400 | [diff] [blame] | 126 | pgoff_t vui_ra_start; |
| 127 | pgoff_t vui_ra_count; |
| 128 | /* Set when vui_ra_{start,count} have been initialized. */ |
| 129 | bool vui_ra_valid; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 130 | }; |
| 131 | |
John Hammond | a37bec7 | 2016-03-30 19:48:58 -0400 | [diff] [blame] | 132 | extern struct lu_device_type vvp_device_type; |
| 133 | |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 134 | extern struct lu_context_key vvp_session_key; |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 135 | extern struct lu_context_key vvp_thread_key; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 136 | |
John L. Hammond | 4a4eee0 | 2016-03-30 19:48:49 -0400 | [diff] [blame] | 137 | extern struct kmem_cache *vvp_lock_kmem; |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 138 | extern struct kmem_cache *vvp_object_kmem; |
John L. Hammond | 103b8bd | 2016-03-30 19:48:54 -0400 | [diff] [blame] | 139 | extern struct kmem_cache *vvp_req_kmem; |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 140 | |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 141 | struct vvp_thread_info { |
| 142 | struct cl_lock vti_lock; |
| 143 | struct cl_lock_descr vti_descr; |
| 144 | struct cl_io vti_io; |
| 145 | struct cl_attr vti_attr; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 146 | }; |
| 147 | |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 148 | static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 149 | { |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 150 | struct vvp_thread_info *vti; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 151 | |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 152 | vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key); |
| 153 | LASSERT(vti); |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 154 | |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 155 | return vti; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 156 | } |
| 157 | |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 158 | static inline struct cl_lock *vvp_env_lock(const struct lu_env *env) |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 159 | { |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 160 | struct cl_lock *lock = &vvp_env_info(env)->vti_lock; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 161 | |
| 162 | memset(lock, 0, sizeof(*lock)); |
| 163 | return lock; |
| 164 | } |
| 165 | |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 166 | static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env) |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 167 | { |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 168 | struct cl_attr *attr = &vvp_env_info(env)->vti_attr; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 169 | |
| 170 | memset(attr, 0, sizeof(*attr)); |
| 171 | |
| 172 | return attr; |
| 173 | } |
| 174 | |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 175 | static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env) |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 176 | { |
John Hammond | 9acc450 | 2016-03-30 19:48:57 -0400 | [diff] [blame] | 177 | struct cl_io *io = &vvp_env_info(env)->vti_io; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 178 | |
| 179 | memset(io, 0, sizeof(*io)); |
| 180 | |
| 181 | return io; |
| 182 | } |
| 183 | |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 184 | struct vvp_session { |
| 185 | struct vvp_io cs_ios; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 186 | }; |
| 187 | |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 188 | static inline struct vvp_session *vvp_env_session(const struct lu_env *env) |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 189 | { |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 190 | struct vvp_session *ses; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 191 | |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 192 | ses = lu_context_key_get(env->le_ses, &vvp_session_key); |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 193 | LASSERT(ses); |
| 194 | |
| 195 | return ses; |
| 196 | } |
| 197 | |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 198 | static inline struct vvp_io *vvp_env_io(const struct lu_env *env) |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 199 | { |
John L. Hammond | 10cdef7 | 2016-03-30 19:48:51 -0400 | [diff] [blame] | 200 | return &vvp_env_session(env)->cs_ios; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 201 | } |
| 202 | |
| 203 | /** |
| 204 | * ccc-private object state. |
| 205 | */ |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 206 | struct vvp_object { |
| 207 | struct cl_object_header vob_header; |
| 208 | struct cl_object vob_cl; |
| 209 | struct inode *vob_inode; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 210 | |
| 211 | /** |
| 212 | * A list of dirty pages pending IO in the cache. Used by |
| 213 | * SOM. Protected by ll_inode_info::lli_lock. |
| 214 | * |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 215 | * \see vvp_page::vpg_pending_linkage |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 216 | */ |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 217 | struct list_head vob_pending_list; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 218 | |
| 219 | /** |
Stephen Champion | b2e7bbb | 2016-09-18 16:37:45 -0400 | [diff] [blame] | 220 | * Number of transient pages. This is no longer protected by i_sem, |
| 221 | * and needs to be atomic. This is not actually used for anything, |
| 222 | * and can probably be removed. |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 223 | */ |
Stephen Champion | b2e7bbb | 2016-09-18 16:37:45 -0400 | [diff] [blame] | 224 | atomic_t vob_transient_pages; |
| 225 | |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 226 | /** |
| 227 | * Number of outstanding mmaps on this file. |
| 228 | * |
| 229 | * \see ll_vm_open(), ll_vm_close(). |
| 230 | */ |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 231 | atomic_t vob_mmap_cnt; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 232 | |
| 233 | /** |
| 234 | * various flags |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 235 | * vob_discard_page_warned |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 236 | * if pages belonging to this object are discarded when a client |
| 237 | * is evicted, some debug info will be printed, this flag will be set |
| 238 | * during processing the first discarded page, then avoid flooding |
| 239 | * debug message for lots of discarded pages. |
| 240 | * |
| 241 | * \see ll_dirty_page_discard_warn. |
| 242 | */ |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 243 | unsigned int vob_discard_page_warned:1; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 244 | }; |
| 245 | |
| 246 | /** |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 247 | * VVP-private page state. |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 248 | */ |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 249 | struct vvp_page { |
| 250 | struct cl_page_slice vpg_cl; |
Jinshan Xiong | 96c5336 | 2016-08-16 16:19:09 -0400 | [diff] [blame] | 251 | unsigned int vpg_defer_uptodate:1, |
| 252 | vpg_ra_used:1, |
| 253 | vpg_write_queued:1; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 254 | /** |
| 255 | * Non-empty iff this page is already counted in |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 256 | * vvp_object::vob_pending_list. This list is only used as a flag, |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 257 | * that is, never iterated through, only checked for list_empty(), but |
| 258 | * having a list is useful for debugging. |
| 259 | */ |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 260 | struct list_head vpg_pending_linkage; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 261 | /** VM page */ |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 262 | struct page *vpg_page; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 263 | }; |
| 264 | |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 265 | static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice) |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 266 | { |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 267 | return container_of(slice, struct vvp_page, vpg_cl); |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 268 | } |
| 269 | |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 270 | static inline pgoff_t vvp_index(struct vvp_page *vvp) |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 271 | { |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 272 | return vvp->vpg_cl.cpl_index; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 273 | } |
| 274 | |
John L. Hammond | 3c95b83 | 2016-03-30 19:48:46 -0400 | [diff] [blame] | 275 | struct vvp_device { |
| 276 | struct cl_device vdv_cl; |
| 277 | struct super_block *vdv_sb; |
| 278 | struct cl_device *vdv_next; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 279 | }; |
| 280 | |
John L. Hammond | 4a4eee0 | 2016-03-30 19:48:49 -0400 | [diff] [blame] | 281 | struct vvp_lock { |
| 282 | struct cl_lock_slice vlk_cl; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 283 | }; |
| 284 | |
John L. Hammond | 103b8bd | 2016-03-30 19:48:54 -0400 | [diff] [blame] | 285 | struct vvp_req { |
| 286 | struct cl_req_slice vrq_cl; |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 287 | }; |
| 288 | |
| 289 | void *ccc_key_init(const struct lu_context *ctx, |
| 290 | struct lu_context_key *key); |
| 291 | void ccc_key_fini(const struct lu_context *ctx, |
| 292 | struct lu_context_key *key, void *data); |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 293 | |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 294 | void ccc_umount(const struct lu_env *env, struct cl_device *dev); |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 295 | |
John L. Hammond | 3c95b83 | 2016-03-30 19:48:46 -0400 | [diff] [blame] | 296 | static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv) |
| 297 | { |
| 298 | return &vdv->vdv_cl.cd_lu_dev; |
| 299 | } |
| 300 | |
| 301 | static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d) |
| 302 | { |
| 303 | return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev); |
| 304 | } |
| 305 | |
| 306 | static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d) |
| 307 | { |
| 308 | return container_of0(d, struct vvp_device, vdv_cl); |
| 309 | } |
| 310 | |
John L. Hammond | 8c7b0e1 | 2016-03-30 19:48:47 -0400 | [diff] [blame] | 311 | static inline struct vvp_object *cl2vvp(const struct cl_object *obj) |
| 312 | { |
| 313 | return container_of0(obj, struct vvp_object, vob_cl); |
| 314 | } |
| 315 | |
| 316 | static inline struct vvp_object *lu2vvp(const struct lu_object *obj) |
| 317 | { |
| 318 | return container_of0(obj, struct vvp_object, vob_cl.co_lu); |
| 319 | } |
| 320 | |
| 321 | static inline struct inode *vvp_object_inode(const struct cl_object *obj) |
| 322 | { |
| 323 | return cl2vvp(obj)->vob_inode; |
| 324 | } |
| 325 | |
| 326 | int vvp_object_invariant(const struct cl_object *obj); |
| 327 | struct vvp_object *cl_inode2vvp(struct inode *inode); |
| 328 | |
John L. Hammond | 3a52f80 | 2016-03-30 19:48:48 -0400 | [diff] [blame] | 329 | static inline struct page *cl2vm_page(const struct cl_page_slice *slice) |
| 330 | { |
| 331 | return cl2vvp_page(slice)->vpg_page; |
| 332 | } |
| 333 | |
John L. Hammond | 4a4eee0 | 2016-03-30 19:48:49 -0400 | [diff] [blame] | 334 | static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice) |
| 335 | { |
| 336 | return container_of(slice, struct vvp_lock, vlk_cl); |
| 337 | } |
| 338 | |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 339 | # define CLOBINVRNT(env, clob, expr) \ |
| 340 | ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr))) |
| 341 | |
John L. Hammond | 0d34565 | 2016-03-30 19:48:45 -0400 | [diff] [blame] | 342 | /** |
| 343 | * New interfaces to get and put lov_stripe_md from lov layer. This violates |
| 344 | * layering because lov_stripe_md is supposed to be a private data in lov. |
| 345 | * |
| 346 | * NB: If you find you have to use these interfaces for your new code, please |
| 347 | * think about it again. These interfaces may be removed in the future for |
| 348 | * better layering. |
| 349 | */ |
| 350 | struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj); |
| 351 | void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm); |
| 352 | int lov_read_and_clear_async_rc(struct cl_object *clob); |
| 353 | |
| 354 | struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode); |
| 355 | void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm); |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 356 | |
Jinshan Xiong | 77605e4 | 2016-03-30 19:48:30 -0400 | [diff] [blame] | 357 | int vvp_io_init(const struct lu_env *env, struct cl_object *obj, |
| 358 | struct cl_io *io); |
John L. Hammond | fee6eb5 | 2016-03-30 19:48:53 -0400 | [diff] [blame] | 359 | int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io); |
Jinshan Xiong | 77605e4 | 2016-03-30 19:48:30 -0400 | [diff] [blame] | 360 | int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, |
| 361 | struct cl_lock *lock, const struct cl_io *io); |
Oleg Drokin | e15ba45 | 2016-02-26 01:49:49 -0500 | [diff] [blame] | 362 | int vvp_page_init(const struct lu_env *env, struct cl_object *obj, |
Jinshan Xiong | 7addf40 | 2016-03-30 19:48:32 -0400 | [diff] [blame] | 363 | struct cl_page *page, pgoff_t index); |
John L. Hammond | 103b8bd | 2016-03-30 19:48:54 -0400 | [diff] [blame] | 364 | int vvp_req_init(const struct lu_env *env, struct cl_device *dev, |
| 365 | struct cl_req *req); |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 366 | struct lu_object *vvp_object_alloc(const struct lu_env *env, |
| 367 | const struct lu_object_header *hdr, |
| 368 | struct lu_device *dev); |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 369 | |
John Hammond | 5c5af0f | 2016-03-30 19:49:00 -0400 | [diff] [blame] | 370 | int vvp_global_init(void); |
| 371 | void vvp_global_fini(void); |
| 372 | |
John L. Hammond | 2d95f10 | 2014-04-27 13:07:05 -0400 | [diff] [blame] | 373 | extern const struct file_operations vvp_dump_pgcache_file_ops; |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 374 | |
| 375 | #endif /* VVP_INTERNAL_H */ |