blob: df3ee0b6264fe208f860ba362d86353ef15d40ed [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French18a89a12017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
255out:
256 kfree(buf);
257 return rc;
258}
259
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400260static bool
261cifs_has_mand_locks(struct cifsInodeInfo *cinode)
262{
263 struct cifs_fid_locks *cur;
264 bool has_locks = false;
265
266 down_read(&cinode->lock_sem);
267 list_for_each_entry(cur, &cinode->llist, llist) {
268 if (!list_empty(&cur->locks)) {
269 has_locks = true;
270 break;
271 }
272 }
273 up_read(&cinode->lock_sem);
274 return has_locks;
275}
276
Jeff Layton15ecb432010-10-15 15:34:02 -0400277struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700278cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400279 struct tcon_link *tlink, __u32 oplock)
280{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500281 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000282 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 struct cifsInodeInfo *cinode = CIFS_I(inode);
284 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700285 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700286 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400287 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400288
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
290 if (cfile == NULL)
291 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400292
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700293 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
294 if (!fdlocks) {
295 kfree(cfile);
296 return NULL;
297 }
298
299 INIT_LIST_HEAD(&fdlocks->locks);
300 fdlocks->cfile = cfile;
301 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700302 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700303 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700304 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700305
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700306 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->pid = current->tgid;
308 cfile->uid = current_fsuid();
309 cfile->dentry = dget(dentry);
310 cfile->f_flags = file->f_flags;
311 cfile->invalidHandle = false;
312 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700313 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700314 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500315 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400316
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100317 cifs_sb_active(inode->i_sb);
318
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 /*
320 * If the server returned a read oplock and we have mandatory brlocks,
321 * set oplock level to None.
322 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400323 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500324 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400325 oplock = 0;
326 }
327
Steve French3afca262016-09-22 18:58:16 -0500328 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700330 oplock = fid->pending_open->oplock;
331 list_del(&fid->pending_open->olist);
332
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400333 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400334 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700335
336 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500337
Jeff Layton15ecb432010-10-15 15:34:02 -0400338 /* if readable file instance put first in list*/
339 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700340 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400341 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700342 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500343 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400344
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400345 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400346 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700348 file->private_data = cfile;
349 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400350}
351
Jeff Layton764a1b12012-07-25 14:59:54 -0400352struct cifsFileInfo *
353cifsFileInfo_get(struct cifsFileInfo *cifs_file)
354{
Steve French3afca262016-09-22 18:58:16 -0500355 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400356 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500357 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400358 return cifs_file;
359}
360
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100361/**
362 * cifsFileInfo_put - release a reference of file priv data
363 *
364 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000365 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400366void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
367{
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100368 _cifsFileInfo_put(cifs_file, true);
369}
370
371/**
372 * _cifsFileInfo_put - release a reference of file priv data
373 *
374 * This may involve closing the filehandle @cifs_file out on the
375 * server. Must be called without holding tcon->open_file_lock and
376 * cifs_file->file_info_lock.
377 *
378 * If @wait_for_oplock_handler is true and we are releasing the last
379 * reference, wait for any running oplock break handler of the file
380 * and cancel any pending one. If calling this function from the
381 * oplock break handler, you need to pass false.
382 *
383 */
384void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
385{
David Howells2b0143b2015-03-17 22:25:59 +0000386 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000387 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700388 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300389 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100390 struct super_block *sb = inode->i_sb;
391 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000392 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700393 struct cifs_fid fid;
394 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000395 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000396
Steve French3afca262016-09-22 18:58:16 -0500397 spin_lock(&tcon->open_file_lock);
398
399 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400400 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500401 spin_unlock(&cifs_file->file_info_lock);
402 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000403 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400404 }
Steve French3afca262016-09-22 18:58:16 -0500405 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700407 if (server->ops->get_lease_key)
408 server->ops->get_lease_key(inode, &fid);
409
410 /* store open in pending opens to make sure we don't miss lease break */
411 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
412
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413 /* remove it from the lists */
414 list_del(&cifs_file->flist);
415 list_del(&cifs_file->tlist);
416
417 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500418 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000419 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700420 /*
421 * In strict cache mode we need invalidate mapping on the last
422 * close because it may cause a error when we open this file
423 * again and get at least level II oplock.
424 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300425 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400426 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300427 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000428 }
Steve French3afca262016-09-22 18:58:16 -0500429
430 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000431
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100432 oplock_break_cancelled = wait_oplock_handler ?
433 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400434
Steve Frenchcdff08e2010-10-21 22:46:14 +0000435 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700436 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400437 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700438
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400439 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700440 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400441 server->ops->close(xid, tcon, &cifs_file->fid);
442 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000443 }
444
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000445 if (oplock_break_cancelled)
446 cifs_done_oplock_break(cifsi);
447
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700448 cifs_del_pending_open(&open);
449
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700450 /*
451 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000452 * is closed anyway.
453 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700454 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700455 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000456 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400457 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000458 kfree(li);
459 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700460 list_del(&cifs_file->llist->llist);
461 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700462 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000463
464 cifs_put_tlink(cifs_file->tlink);
465 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100466 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000467 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400468}
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472{
473 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400474 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400475 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700477 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000478 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400479 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700480 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300482 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700483 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700484 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400486 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
488 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400489 tlink = cifs_sb_tlink(cifs_sb);
490 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400491 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400492 return PTR_ERR(tlink);
493 }
494 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700495 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500497 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530499 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400500 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 }
502
Joe Perchesf96637b2013-05-04 22:12:25 -0500503 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000504 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000505
Namjae Jeon787aded2014-08-22 14:22:51 +0900506 if (file->f_flags & O_DIRECT &&
507 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
508 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
509 file->f_op = &cifs_file_direct_nobrl_ops;
510 else
511 file->f_op = &cifs_file_direct_ops;
512 }
513
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700514 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000515 oplock = REQ_OPLOCK;
516 else
517 oplock = 0;
518
Steve French64cc2c62009-03-04 19:54:08 +0000519 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400520 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
521 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000522 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400523 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000524 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700525 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000526 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500527 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300528 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000529 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
530 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500531 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
532 tcon->ses->serverName,
533 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000534 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000535 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
536 (rc != -EOPNOTSUPP)) /* path not found or net err */
537 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700538 /*
539 * Else fallthrough to retry open the old way on network i/o
540 * or DFS errors.
541 */
Steve French276a74a2009-03-03 18:00:34 +0000542 }
543
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700544 if (server->ops->get_lease_key)
545 server->ops->get_lease_key(inode, &fid);
546
547 cifs_add_pending_open(&fid, tlink, &open);
548
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300549 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700550 if (server->ops->get_lease_key)
551 server->ops->get_lease_key(inode, &fid);
552
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300553 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700554 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700555 if (rc) {
556 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300557 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700558 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300559 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400560
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700561 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
562 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700563 if (server->ops->close)
564 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700565 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 rc = -ENOMEM;
567 goto out;
568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530570 cifs_fscache_set_inode_cookie(inode, file);
571
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300572 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700573 /*
574 * Time to set mode which we can not set earlier due to
575 * problems creating new read-only files.
576 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300577 struct cifs_unix_set_info_args args = {
578 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800579 .uid = INVALID_UID, /* no change */
580 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300581 .ctime = NO_CHANGE_64,
582 .atime = NO_CHANGE_64,
583 .mtime = NO_CHANGE_64,
584 .device = 0,
585 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700586 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
587 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
589
590out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400592 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400593 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 return rc;
595}
596
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400597static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
598
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700599/*
600 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400601 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700602 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400603static int
604cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400606 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000607 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400608 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 int rc = 0;
610
Rabin Vincent6ed24ef2017-05-03 17:17:21 +0200611 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400612 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400613 /* can cache locks - no need to relock */
614 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400615 return rc;
616 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400618 if (cap_unix(tcon->ses) &&
619 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
620 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
621 rc = cifs_push_posix_locks(cfile);
622 else
623 rc = tcon->ses->server->ops->push_mand_locks(cfile);
624
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400625 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 return rc;
627}
628
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700629static int
630cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631{
632 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400633 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400634 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000636 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700637 struct TCP_Server_Info *server;
638 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000639 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700641 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500643 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400644 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400646 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700647 mutex_lock(&cfile->fh_mutex);
648 if (!cfile->invalidHandle) {
649 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530650 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400651 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530652 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654
David Howells2b0143b2015-03-17 22:25:59 +0000655 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700657 tcon = tlink_tcon(cfile->tlink);
658 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000659
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700660 /*
661 * Can not grab rename sem here because various ops, including those
662 * that already have the rename sem can end up causing writepage to get
663 * called and if the server was down that means we end up here, and we
664 * can never tell if the caller already has the rename_sem.
665 */
666 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000668 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700669 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400670 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000671 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 }
673
Joe Perchesf96637b2013-05-04 22:12:25 -0500674 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
675 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300677 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 oplock = REQ_OPLOCK;
679 else
Steve French4b18f2a2008-04-29 00:06:05 +0000680 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400682 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000683 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400684 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400685 /*
686 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
687 * original open. Must mask them off for a reopen.
688 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700689 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400690 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400691
Jeff Layton2422f672010-06-16 13:40:16 -0400692 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700693 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400694 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000695 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500696 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200697 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000698 goto reopen_success;
699 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700700 /*
701 * fallthrough to retry open the old way on errors, especially
702 * in the reconnect path it is important to retry hard
703 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000704 }
705
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700706 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000707
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500708 if (backup_cred(cifs_sb))
709 create_options |= CREATE_OPEN_BACKUP_INTENT;
710
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700711 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400712 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700713
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400714 oparms.tcon = tcon;
715 oparms.cifs_sb = cifs_sb;
716 oparms.desired_access = desired_access;
717 oparms.create_options = create_options;
718 oparms.disposition = disposition;
719 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400720 oparms.fid = &cfile->fid;
721 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400722
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700723 /*
724 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400725 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700726 * file might have write behind data that needs to be flushed and server
727 * version of file size can be stale. If we knew for sure that inode was
728 * not dirty locally we could do this.
729 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400730 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400731 if (rc == -ENOENT && oparms.reconnect == false) {
732 /* durable handle timeout is expired - open the file again */
733 rc = server->ops->open(xid, &oparms, &oplock, NULL);
734 /* indicate that we need to relock the file */
735 oparms.reconnect = true;
736 }
737
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700739 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500740 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
741 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400742 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
Jeff Layton15886172010-10-15 15:33:59 -0400744
745reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700746 cfile->invalidHandle = false;
747 mutex_unlock(&cfile->fh_mutex);
748 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400749
750 if (can_flush) {
751 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400752 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400753
Jeff Layton15886172010-10-15 15:33:59 -0400754 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700755 rc = cifs_get_inode_info_unix(&inode, full_path,
756 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400757 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700758 rc = cifs_get_inode_info(&inode, full_path, NULL,
759 inode->i_sb, xid, NULL);
760 }
761 /*
762 * Else we are writing out data to server already and could deadlock if
763 * we tried to flush data, and since we do not know if we have data that
764 * would invalidate the current end of file on the server we can not go
765 * to the server to get the new inode info.
766 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300767
Pavel Shilovskyde740252016-10-11 15:34:07 -0700768 /*
769 * If the server returned a read oplock and we have mandatory brlocks,
770 * set oplock level to None.
771 */
772 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
773 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
774 oplock = 0;
775 }
776
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400777 server->ops->set_fid(cfile, &cfile->fid, oplock);
778 if (oparms.reconnect)
779 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400780
781reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400783 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return rc;
785}
786
787int cifs_close(struct inode *inode, struct file *file)
788{
Jeff Layton77970692011-04-05 16:23:47 -0700789 if (file->private_data != NULL) {
790 cifsFileInfo_put(file->private_data);
791 file->private_data = NULL;
792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Steve Frenchcdff08e2010-10-21 22:46:14 +0000794 /* return code from the ->release op is always ignored */
795 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796}
797
Steve French52ace1e2016-09-22 19:23:56 -0500798void
799cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
800{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700801 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500802 struct list_head *tmp;
803 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700804 struct list_head tmp_list;
805
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800806 if (!tcon->use_persistent || !tcon->need_reopen_files)
807 return;
808
809 tcon->need_reopen_files = false;
810
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700811 cifs_dbg(FYI, "Reopen persistent handles");
812 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500813
814 /* list all files open on tree connection, reopen resilient handles */
815 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700816 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500817 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700818 if (!open_file->invalidHandle)
819 continue;
820 cifsFileInfo_get(open_file);
821 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500822 }
823 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700824
825 list_for_each_safe(tmp, tmp1, &tmp_list) {
826 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800827 if (cifs_reopen_file(open_file, false /* do not flush */))
828 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700829 list_del_init(&open_file->rlist);
830 cifsFileInfo_put(open_file);
831 }
Steve French52ace1e2016-09-22 19:23:56 -0500832}
833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834int cifs_closedir(struct inode *inode, struct file *file)
835{
836 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400837 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700838 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700839 struct cifs_tcon *tcon;
840 struct TCP_Server_Info *server;
841 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Joe Perchesf96637b2013-05-04 22:12:25 -0500843 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700845 if (cfile == NULL)
846 return rc;
847
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400848 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700849 tcon = tlink_tcon(cfile->tlink);
850 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Joe Perchesf96637b2013-05-04 22:12:25 -0500852 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500853 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400854 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700855 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500856 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700857 if (server->ops->close_dir)
858 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
859 else
860 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500861 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700862 /* not much we can do if it fails anyway, ignore rc */
863 rc = 0;
864 } else
Steve French3afca262016-09-22 18:58:16 -0500865 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700867 buf = cfile->srch_inf.ntwrk_buf_start;
868 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500869 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700870 cfile->srch_inf.ntwrk_buf_start = NULL;
871 if (cfile->srch_inf.smallBuf)
872 cifs_small_buf_release(buf);
873 else
874 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700876
877 cifs_put_tlink(cfile->tlink);
878 kfree(file->private_data);
879 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400881 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 return rc;
883}
884
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300886cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000887{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400888 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000889 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400890 if (!lock)
891 return lock;
892 lock->offset = offset;
893 lock->length = length;
894 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400895 lock->pid = current->tgid;
896 INIT_LIST_HEAD(&lock->blist);
897 init_waitqueue_head(&lock->block_q);
898 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400899}
900
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700901void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400902cifs_del_lock_waiters(struct cifsLockInfo *lock)
903{
904 struct cifsLockInfo *li, *tmp;
905 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
906 list_del_init(&li->blist);
907 wake_up(&li->block_q);
908 }
909}
910
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400911#define CIFS_LOCK_OP 0
912#define CIFS_READ_OP 1
913#define CIFS_WRITE_OP 2
914
915/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400916static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700917cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
918 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400919 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300921 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700922 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300923 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400924
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700925 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926 if (offset + length <= li->offset ||
927 offset >= li->offset + li->length)
928 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400929 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
930 server->ops->compare_fids(cfile, cur_cfile)) {
931 /* shared lock prevents write op through the same fid */
932 if (!(li->type & server->vals->shared_lock_type) ||
933 rw_check != CIFS_WRITE_OP)
934 continue;
935 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700936 if ((type & server->vals->shared_lock_type) &&
937 ((server->ops->compare_fids(cfile, cur_cfile) &&
938 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400939 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700940 if (conf_lock)
941 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700942 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400943 }
944 return false;
945}
946
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700947bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300948cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700949 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400950 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400951{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300952 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700953 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000954 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300955
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700956 list_for_each_entry(cur, &cinode->llist, llist) {
957 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700958 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300959 if (rc)
960 break;
961 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300962
963 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400964}
965
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300966/*
967 * Check if there is another lock that prevents us to set the lock (mandatory
968 * style). If such a lock exists, update the flock structure with its
969 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
970 * or leave it the same if we can't. Returns 0 if we don't need to request to
971 * the server or 1 otherwise.
972 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400973static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300974cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
975 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400976{
977 int rc = 0;
978 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000979 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300980 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400981 bool exist;
982
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700983 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400984
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300985 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400986 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987 if (exist) {
988 flock->fl_start = conf_lock->offset;
989 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
990 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300991 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400992 flock->fl_type = F_RDLCK;
993 else
994 flock->fl_type = F_WRLCK;
995 } else if (!cinode->can_cache_brlcks)
996 rc = 1;
997 else
998 flock->fl_type = F_UNLCK;
999
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001000 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001001 return rc;
1002}
1003
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001004static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001005cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001006{
David Howells2b0143b2015-03-17 22:25:59 +00001007 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001008 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001009 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001010 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001011}
1012
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001013/*
1014 * Set the byte-range lock (mandatory style). Returns:
1015 * 1) 0, if we set the lock and don't need to request to the server;
1016 * 2) 1, if no locks prevent us but we need to request to the server;
1017 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1018 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001019static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001020cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001021 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001022{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001023 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001024 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001025 bool exist;
1026 int rc = 0;
1027
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028try_again:
1029 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001030 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001031
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001032 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001033 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001034 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001035 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001036 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001037 return rc;
1038 }
1039
1040 if (!exist)
1041 rc = 1;
1042 else if (!wait)
1043 rc = -EACCES;
1044 else {
1045 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001046 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001047 rc = wait_event_interruptible(lock->block_q,
1048 (lock->blist.prev == &lock->blist) &&
1049 (lock->blist.next == &lock->blist));
1050 if (!rc)
1051 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001052 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001053 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001054 }
1055
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001056 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001057 return rc;
1058}
1059
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001060/*
1061 * Check if there is another lock that prevents us to set the lock (posix
1062 * style). If such a lock exists, update the flock structure with its
1063 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1064 * or leave it the same if we can't. Returns 0 if we don't need to request to
1065 * the server or 1 otherwise.
1066 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001067static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001068cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1069{
1070 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001071 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001072 unsigned char saved_type = flock->fl_type;
1073
Pavel Shilovsky50792762011-10-29 17:17:57 +04001074 if ((flock->fl_flags & FL_POSIX) == 0)
1075 return 1;
1076
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001077 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001078 posix_test_lock(file, flock);
1079
1080 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1081 flock->fl_type = saved_type;
1082 rc = 1;
1083 }
1084
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001085 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001086 return rc;
1087}
1088
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001089/*
1090 * Set the byte-range lock (posix style). Returns:
1091 * 1) 0, if we set the lock and don't need to request to the server;
1092 * 2) 1, if we need to request to the server;
1093 * 3) <0, if the error occurs while setting the lock.
1094 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001095static int
1096cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1097{
Al Viro496ad9a2013-01-23 17:07:38 -05001098 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001099 int rc = 1;
1100
1101 if ((flock->fl_flags & FL_POSIX) == 0)
1102 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001103
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001104try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001105 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001106 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001107 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001108 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001109 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001110
1111 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001112 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001113 if (rc == FILE_LOCK_DEFERRED) {
1114 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1115 if (!rc)
1116 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001117 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001118 }
Steve French9ebb3892012-04-01 13:52:54 -05001119 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001120}
1121
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001122int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001123cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001124{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001125 unsigned int xid;
1126 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001127 struct cifsLockInfo *li, *tmp;
1128 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001129 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001130 LOCKING_ANDX_RANGE *buf, *cur;
1131 int types[] = {LOCKING_ANDX_LARGE_FILES,
1132 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1133 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001134
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001135 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001136 tcon = tlink_tcon(cfile->tlink);
1137
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001138 /*
1139 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001140 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001141 */
1142 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001143 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001144 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001145 return -EINVAL;
1146 }
1147
Ross Lagerwall04d76802019-01-08 18:30:56 +00001148 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1149 PAGE_SIZE);
1150 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1151 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001152 max_num = (max_buf - sizeof(struct smb_hdr)) /
1153 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001154 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001155 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001156 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001157 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001158 }
1159
1160 for (i = 0; i < 2; i++) {
1161 cur = buf;
1162 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001163 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001164 if (li->type != types[i])
1165 continue;
1166 cur->Pid = cpu_to_le16(li->pid);
1167 cur->LengthLow = cpu_to_le32((u32)li->length);
1168 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1169 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1170 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1171 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001172 stored_rc = cifs_lockv(xid, tcon,
1173 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001174 (__u8)li->type, 0, num,
1175 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001176 if (stored_rc)
1177 rc = stored_rc;
1178 cur = buf;
1179 num = 0;
1180 } else
1181 cur++;
1182 }
1183
1184 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001185 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001186 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001187 if (stored_rc)
1188 rc = stored_rc;
1189 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001190 }
1191
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001192 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001193 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001194 return rc;
1195}
1196
Jeff Layton3d224622016-05-24 06:27:44 -04001197static __u32
1198hash_lockowner(fl_owner_t owner)
1199{
1200 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1201}
1202
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001203struct lock_to_push {
1204 struct list_head llist;
1205 __u64 offset;
1206 __u64 length;
1207 __u32 pid;
1208 __u16 netfid;
1209 __u8 type;
1210};
1211
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001212static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001213cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001214{
David Howells2b0143b2015-03-17 22:25:59 +00001215 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001216 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001217 struct file_lock *flock;
1218 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001219 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001220 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001221 struct list_head locks_to_send, *el;
1222 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001223 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001224
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001225 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001226
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001227 if (!flctx)
1228 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001229
Jeff Laytone084c1b2015-02-16 14:32:03 -05001230 spin_lock(&flctx->flc_lock);
1231 list_for_each(el, &flctx->flc_posix) {
1232 count++;
1233 }
1234 spin_unlock(&flctx->flc_lock);
1235
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001236 INIT_LIST_HEAD(&locks_to_send);
1237
1238 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001239 * Allocating count locks is enough because no FL_POSIX locks can be
1240 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001241 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001242 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001243 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001244 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1245 if (!lck) {
1246 rc = -ENOMEM;
1247 goto err_out;
1248 }
1249 list_add_tail(&lck->llist, &locks_to_send);
1250 }
1251
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001252 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001253 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001254 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001255 if (el == &locks_to_send) {
1256 /*
1257 * The list ended. We don't have enough allocated
1258 * structures - something is really wrong.
1259 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001260 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001261 break;
1262 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001263 length = 1 + flock->fl_end - flock->fl_start;
1264 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1265 type = CIFS_RDLCK;
1266 else
1267 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001268 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001269 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001270 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001271 lck->length = length;
1272 lck->type = type;
1273 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001274 }
Jeff Layton6109c852015-01-16 15:05:57 -05001275 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001276
1277 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001278 int stored_rc;
1279
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001280 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001281 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001282 lck->type, 0);
1283 if (stored_rc)
1284 rc = stored_rc;
1285 list_del(&lck->llist);
1286 kfree(lck);
1287 }
1288
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001289out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001290 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001291 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001292err_out:
1293 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1294 list_del(&lck->llist);
1295 kfree(lck);
1296 }
1297 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001298}
1299
1300static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001301cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001302{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001303 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001304 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001305 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001306 int rc = 0;
1307
1308 /* we are going to update can_cache_brlcks here - need a write access */
1309 down_write(&cinode->lock_sem);
1310 if (!cinode->can_cache_brlcks) {
1311 up_write(&cinode->lock_sem);
1312 return rc;
1313 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001314
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001315 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001316 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1317 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001318 rc = cifs_push_posix_locks(cfile);
1319 else
1320 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001321
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001322 cinode->can_cache_brlcks = false;
1323 up_write(&cinode->lock_sem);
1324 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001325}
1326
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001327static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001328cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001329 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001331 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001332 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001334 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001335 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001336 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001337 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001339 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001340 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001341 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001342 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001343 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001344 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1345 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001346 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001348 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001349 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001350 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001351 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352 *lock = 1;
1353 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001354 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001355 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356 *unlock = 1;
1357 /* Check if unlock includes more than one lock range */
1358 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001359 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001360 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001361 *lock = 1;
1362 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001363 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001364 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001365 *lock = 1;
1366 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001367 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001368 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001369 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001371 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001372}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001374static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001375cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001376 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001377{
1378 int rc = 0;
1379 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001380 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1381 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001382 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001383 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001385 if (posix_lck) {
1386 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001387
1388 rc = cifs_posix_lock_test(file, flock);
1389 if (!rc)
1390 return rc;
1391
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001392 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001393 posix_lock_type = CIFS_RDLCK;
1394 else
1395 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001396 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1397 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001398 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001399 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400 return rc;
1401 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001402
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001403 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001404 if (!rc)
1405 return rc;
1406
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001407 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001408 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1409 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001410 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001411 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1412 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001413 flock->fl_type = F_UNLCK;
1414 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001415 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1416 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001417 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001418 }
1419
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001420 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001421 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001422 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001423 }
1424
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001425 type &= ~server->vals->exclusive_lock_type;
1426
1427 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1428 type | server->vals->shared_lock_type,
1429 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001430 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001431 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1432 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001433 flock->fl_type = F_RDLCK;
1434 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001435 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1436 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001437 } else
1438 flock->fl_type = F_WRLCK;
1439
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001440 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001441}
1442
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001443void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001444cifs_move_llist(struct list_head *source, struct list_head *dest)
1445{
1446 struct list_head *li, *tmp;
1447 list_for_each_safe(li, tmp, source)
1448 list_move(li, dest);
1449}
1450
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001451void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001452cifs_free_llist(struct list_head *llist)
1453{
1454 struct cifsLockInfo *li, *tmp;
1455 list_for_each_entry_safe(li, tmp, llist, llist) {
1456 cifs_del_lock_waiters(li);
1457 list_del(&li->llist);
1458 kfree(li);
1459 }
1460}
1461
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001462int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001463cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1464 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001465{
1466 int rc = 0, stored_rc;
1467 int types[] = {LOCKING_ANDX_LARGE_FILES,
1468 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1469 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001470 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001471 LOCKING_ANDX_RANGE *buf, *cur;
1472 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001473 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001474 struct cifsLockInfo *li, *tmp;
1475 __u64 length = 1 + flock->fl_end - flock->fl_start;
1476 struct list_head tmp_llist;
1477
1478 INIT_LIST_HEAD(&tmp_llist);
1479
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001480 /*
1481 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001482 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001483 */
1484 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001485 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001486 return -EINVAL;
1487
Ross Lagerwall04d76802019-01-08 18:30:56 +00001488 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1489 PAGE_SIZE);
1490 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1491 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001492 max_num = (max_buf - sizeof(struct smb_hdr)) /
1493 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001494 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001495 if (!buf)
1496 return -ENOMEM;
1497
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001498 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001499 for (i = 0; i < 2; i++) {
1500 cur = buf;
1501 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001502 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001503 if (flock->fl_start > li->offset ||
1504 (flock->fl_start + length) <
1505 (li->offset + li->length))
1506 continue;
1507 if (current->tgid != li->pid)
1508 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001509 if (types[i] != li->type)
1510 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001511 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001512 /*
1513 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001514 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001515 */
1516 list_del(&li->llist);
1517 cifs_del_lock_waiters(li);
1518 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001519 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001520 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001521 cur->Pid = cpu_to_le16(li->pid);
1522 cur->LengthLow = cpu_to_le32((u32)li->length);
1523 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1524 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1525 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1526 /*
1527 * We need to save a lock here to let us add it again to
1528 * the file's list if the unlock range request fails on
1529 * the server.
1530 */
1531 list_move(&li->llist, &tmp_llist);
1532 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001533 stored_rc = cifs_lockv(xid, tcon,
1534 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001535 li->type, num, 0, buf);
1536 if (stored_rc) {
1537 /*
1538 * We failed on the unlock range
1539 * request - add all locks from the tmp
1540 * list to the head of the file's list.
1541 */
1542 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001543 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001544 rc = stored_rc;
1545 } else
1546 /*
1547 * The unlock range request succeed -
1548 * free the tmp list.
1549 */
1550 cifs_free_llist(&tmp_llist);
1551 cur = buf;
1552 num = 0;
1553 } else
1554 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001555 }
1556 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001557 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001558 types[i], num, 0, buf);
1559 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001560 cifs_move_llist(&tmp_llist,
1561 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001562 rc = stored_rc;
1563 } else
1564 cifs_free_llist(&tmp_llist);
1565 }
1566 }
1567
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001568 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001569 kfree(buf);
1570 return rc;
1571}
1572
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001573static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001574cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001575 bool wait_flag, bool posix_lck, int lock, int unlock,
1576 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001577{
1578 int rc = 0;
1579 __u64 length = 1 + flock->fl_end - flock->fl_start;
1580 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1581 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001582 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001583 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001584
1585 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001586 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001587
1588 rc = cifs_posix_lock_set(file, flock);
1589 if (!rc || rc < 0)
1590 return rc;
1591
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001592 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001593 posix_lock_type = CIFS_RDLCK;
1594 else
1595 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001596
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001597 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001598 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001599
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001600 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001601 hash_lockowner(flock->fl_owner),
1602 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001603 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001604 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001605 }
1606
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001607 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001608 struct cifsLockInfo *lock;
1609
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001610 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001611 if (!lock)
1612 return -ENOMEM;
1613
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001614 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001615 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001616 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001617 return rc;
1618 }
1619 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001620 goto out;
1621
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001622 /*
1623 * Windows 7 server can delay breaking lease from read to None
1624 * if we set a byte-range lock on a file - break it explicitly
1625 * before sending the lock to the server to be sure the next
1626 * read won't conflict with non-overlapted locks due to
1627 * pagereading.
1628 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001629 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1630 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001631 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001632 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1633 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001634 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001635 }
1636
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001637 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1638 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001639 if (rc) {
1640 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001641 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001642 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001643
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001644 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001645 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001646 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001647
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001648out:
Aurelien Aptel56300d32019-03-14 18:44:16 +01001649 if (flock->fl_flags & FL_POSIX) {
1650 /*
1651 * If this is a request to remove all locks because we
1652 * are closing the file, it doesn't matter if the
1653 * unlocking failed as both cifs.ko and the SMB server
1654 * remove the lock on file close
1655 */
1656 if (rc) {
1657 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1658 if (!(flock->fl_flags & FL_CLOSE))
1659 return rc;
1660 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001661 rc = locks_lock_file_wait(file, flock);
Aurelien Aptel56300d32019-03-14 18:44:16 +01001662 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001663 return rc;
1664}
1665
1666int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1667{
1668 int rc, xid;
1669 int lock = 0, unlock = 0;
1670 bool wait_flag = false;
1671 bool posix_lck = false;
1672 struct cifs_sb_info *cifs_sb;
1673 struct cifs_tcon *tcon;
1674 struct cifsInodeInfo *cinode;
1675 struct cifsFileInfo *cfile;
1676 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001677 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001678
1679 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001680 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001681
Joe Perchesf96637b2013-05-04 22:12:25 -05001682 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1683 cmd, flock->fl_flags, flock->fl_type,
1684 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001685
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001686 cfile = (struct cifsFileInfo *)file->private_data;
1687 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001688
1689 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1690 tcon->ses->server);
1691
Al Viro7119e222014-10-22 00:25:12 -04001692 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001693 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001694 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001695
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001696 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001697 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1698 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1699 posix_lck = true;
1700 /*
1701 * BB add code here to normalize offset and length to account for
1702 * negative length which we can not accept over the wire.
1703 */
1704 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001705 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001706 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001707 return rc;
1708 }
1709
1710 if (!lock && !unlock) {
1711 /*
1712 * if no lock or unlock then nothing to do since we do not
1713 * know what it is
1714 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001715 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001716 return -EOPNOTSUPP;
1717 }
1718
1719 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1720 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001721 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 return rc;
1723}
1724
Jeff Layton597b0272012-03-23 14:40:56 -04001725/*
1726 * update the file size (if needed) after a write. Should be called with
1727 * the inode->i_lock held
1728 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001729void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001730cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1731 unsigned int bytes_written)
1732{
1733 loff_t end_of_write = offset + bytes_written;
1734
1735 if (end_of_write > cifsi->server_eof)
1736 cifsi->server_eof = end_of_write;
1737}
1738
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001739static ssize_t
1740cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1741 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742{
1743 int rc = 0;
1744 unsigned int bytes_written = 0;
1745 unsigned int total_written;
1746 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001747 struct cifs_tcon *tcon;
1748 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001749 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001750 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001751 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001752 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
Jeff Layton7da4b492010-10-15 15:34:00 -04001754 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
Al Viro35c265e2014-08-19 20:25:34 -04001756 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1757 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001759 tcon = tlink_tcon(open_file->tlink);
1760 server = tcon->ses->server;
1761
1762 if (!server->ops->sync_write)
1763 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001764
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001765 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 for (total_written = 0; write_size > total_written;
1768 total_written += bytes_written) {
1769 rc = -EAGAIN;
1770 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001771 struct kvec iov[2];
1772 unsigned int len;
1773
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 /* we could deadlock if we called
1776 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001777 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001779 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 if (rc != 0)
1781 break;
1782 }
Steve French3e844692005-10-03 13:37:24 -07001783
David Howells2b0143b2015-03-17 22:25:59 +00001784 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001785 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001786 /* iov[0] is reserved for smb header */
1787 iov[1].iov_base = (char *)write_data + total_written;
1788 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001789 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001790 io_parms.tcon = tcon;
1791 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001792 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001793 rc = server->ops->sync_write(xid, &open_file->fid,
1794 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 }
1796 if (rc || (bytes_written == 0)) {
1797 if (total_written)
1798 break;
1799 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001800 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 return rc;
1802 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001803 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001804 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001805 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001806 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001807 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 }
1810
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001811 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
Jeff Layton7da4b492010-10-15 15:34:00 -04001813 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001814 spin_lock(&d_inode(dentry)->i_lock);
1815 if (*offset > d_inode(dentry)->i_size)
1816 i_size_write(d_inode(dentry), *offset);
1817 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 }
David Howells2b0143b2015-03-17 22:25:59 +00001819 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001820 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 return total_written;
1822}
1823
Jeff Layton6508d902010-09-29 19:51:11 -04001824struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1825 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001826{
1827 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001828 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001829 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001830
1831 /* only filter by fsuid on multiuser mounts */
1832 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1833 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001834
Steve French3afca262016-09-22 18:58:16 -05001835 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001836 /* we could simply get the first_list_entry since write-only entries
1837 are always at the end of the list but since the first entry might
1838 have a close pending, we go through the whole list */
1839 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001840 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001841 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001842 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001843 if (!open_file->invalidHandle) {
1844 /* found a good file */
1845 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001846 cifsFileInfo_get(open_file);
1847 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001848 return open_file;
1849 } /* else might as well continue, and look for
1850 another, or simply have the caller reopen it
1851 again rather than trying to fix this handle */
1852 } else /* write only file */
1853 break; /* write only files are last so must be done */
1854 }
Steve French3afca262016-09-22 18:58:16 -05001855 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001856 return NULL;
1857}
Steve French630f3f0c2007-10-25 21:17:17 +00001858
Jeff Layton6508d902010-09-29 19:51:11 -04001859struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1860 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001861{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001862 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001863 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001864 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001865 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001866 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001867 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001868
Steve French60808232006-04-22 15:53:05 +00001869 /* Having a null inode here (because mapping->host was set to zero by
1870 the VFS or MM) should not happen but we had reports of on oops (due to
1871 it being zero) during stress testcases so we need to check for it */
1872
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001873 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001874 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001875 dump_stack();
1876 return NULL;
1877 }
1878
Jeff Laytond3892292010-11-02 16:22:50 -04001879 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001880 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001881
Jeff Layton6508d902010-09-29 19:51:11 -04001882 /* only filter by fsuid on multiuser mounts */
1883 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1884 fsuid_only = false;
1885
Steve French3afca262016-09-22 18:58:16 -05001886 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001887refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001888 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001889 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001890 return NULL;
1891 }
Steve French6148a742005-10-05 12:23:19 -07001892 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001893 if (!any_available && open_file->pid != current->tgid)
1894 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001895 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001896 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001897 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001898 if (!open_file->invalidHandle) {
1899 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001900 cifsFileInfo_get(open_file);
1901 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001902 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001903 } else {
1904 if (!inv_file)
1905 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001906 }
Steve French6148a742005-10-05 12:23:19 -07001907 }
1908 }
Jeff Layton2846d382008-09-22 21:33:33 -04001909 /* couldn't find useable FH with same pid, try any available */
1910 if (!any_available) {
1911 any_available = true;
1912 goto refind_writable;
1913 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001914
1915 if (inv_file) {
1916 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001917 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001918 }
1919
Steve French3afca262016-09-22 18:58:16 -05001920 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001921
1922 if (inv_file) {
1923 rc = cifs_reopen_file(inv_file, false);
1924 if (!rc)
1925 return inv_file;
1926 else {
Steve French3afca262016-09-22 18:58:16 -05001927 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001928 list_move_tail(&inv_file->flist,
1929 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001930 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001931 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001932 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001933 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001934 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001935 goto refind_writable;
1936 }
1937 }
1938
Steve French6148a742005-10-05 12:23:19 -07001939 return NULL;
1940}
1941
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1943{
1944 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001945 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 char *write_data;
1947 int rc = -EFAULT;
1948 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001950 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
1952 if (!mapping || !mapping->host)
1953 return -EFAULT;
1954
1955 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
1957 offset += (loff_t)from;
1958 write_data = kmap(page);
1959 write_data += from;
1960
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001961 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 kunmap(page);
1963 return -EIO;
1964 }
1965
1966 /* racing with truncate? */
1967 if (offset > mapping->host->i_size) {
1968 kunmap(page);
1969 return 0; /* don't care */
1970 }
1971
1972 /* check to make sure that we are not extending the file */
1973 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001974 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
Jeff Layton6508d902010-09-29 19:51:11 -04001976 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001977 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001978 bytes_written = cifs_write(open_file, open_file->pid,
1979 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001980 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001982 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001983 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001984 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001985 else if (bytes_written < 0)
1986 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001987 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001988 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 rc = -EIO;
1990 }
1991
1992 kunmap(page);
1993 return rc;
1994}
1995
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001996static struct cifs_writedata *
1997wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1998 pgoff_t end, pgoff_t *index,
1999 unsigned int *found_pages)
2000{
2001 unsigned int nr_pages;
2002 struct page **pages;
2003 struct cifs_writedata *wdata;
2004
2005 wdata = cifs_writedata_alloc((unsigned int)tofind,
2006 cifs_writev_complete);
2007 if (!wdata)
2008 return NULL;
2009
2010 /*
2011 * find_get_pages_tag seems to return a max of 256 on each
2012 * iteration, so we must call it several times in order to
2013 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03002014 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002015 */
2016 *found_pages = 0;
2017 pages = wdata->pages;
2018 do {
2019 nr_pages = find_get_pages_tag(mapping, index,
2020 PAGECACHE_TAG_DIRTY, tofind,
2021 pages);
2022 *found_pages += nr_pages;
2023 tofind -= nr_pages;
2024 pages += nr_pages;
2025 } while (nr_pages && tofind && *index <= end);
2026
2027 return wdata;
2028}
2029
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002030static unsigned int
2031wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2032 struct address_space *mapping,
2033 struct writeback_control *wbc,
2034 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2035{
2036 unsigned int nr_pages = 0, i;
2037 struct page *page;
2038
2039 for (i = 0; i < found_pages; i++) {
2040 page = wdata->pages[i];
2041 /*
2042 * At this point we hold neither mapping->tree_lock nor
2043 * lock on the page itself: the page may be truncated or
2044 * invalidated (changing page->mapping to NULL), or even
2045 * swizzled back from swapper_space to tmpfs file
2046 * mapping
2047 */
2048
2049 if (nr_pages == 0)
2050 lock_page(page);
2051 else if (!trylock_page(page))
2052 break;
2053
2054 if (unlikely(page->mapping != mapping)) {
2055 unlock_page(page);
2056 break;
2057 }
2058
2059 if (!wbc->range_cyclic && page->index > end) {
2060 *done = true;
2061 unlock_page(page);
2062 break;
2063 }
2064
2065 if (*next && (page->index != *next)) {
2066 /* Not next consecutive page */
2067 unlock_page(page);
2068 break;
2069 }
2070
2071 if (wbc->sync_mode != WB_SYNC_NONE)
2072 wait_on_page_writeback(page);
2073
2074 if (PageWriteback(page) ||
2075 !clear_page_dirty_for_io(page)) {
2076 unlock_page(page);
2077 break;
2078 }
2079
2080 /*
2081 * This actually clears the dirty bit in the radix tree.
2082 * See cifs_writepage() for more commentary.
2083 */
2084 set_page_writeback(page);
2085 if (page_offset(page) >= i_size_read(mapping->host)) {
2086 *done = true;
2087 unlock_page(page);
2088 end_page_writeback(page);
2089 break;
2090 }
2091
2092 wdata->pages[i] = page;
2093 *next = page->index + 1;
2094 ++nr_pages;
2095 }
2096
2097 /* reset index to refind any pages skipped */
2098 if (nr_pages == 0)
2099 *index = wdata->pages[0]->index + 1;
2100
2101 /* put any pages we aren't going to use */
2102 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002103 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002104 wdata->pages[i] = NULL;
2105 }
2106
2107 return nr_pages;
2108}
2109
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002110static int
2111wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2112 struct address_space *mapping, struct writeback_control *wbc)
2113{
2114 int rc = 0;
2115 struct TCP_Server_Info *server;
2116 unsigned int i;
2117
2118 wdata->sync_mode = wbc->sync_mode;
2119 wdata->nr_pages = nr_pages;
2120 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002121 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002122 wdata->tailsz = min(i_size_read(mapping->host) -
2123 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002124 (loff_t)PAGE_SIZE);
2125 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002126
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002127 if (wdata->cfile != NULL)
2128 cifsFileInfo_put(wdata->cfile);
2129 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2130 if (!wdata->cfile) {
2131 cifs_dbg(VFS, "No writable handles for inode\n");
2132 rc = -EBADF;
2133 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002134 wdata->pid = wdata->cfile->pid;
2135 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2136 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002137 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002138
2139 for (i = 0; i < nr_pages; ++i)
2140 unlock_page(wdata->pages[i]);
2141
2142 return rc;
2143}
2144
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002146 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002148 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002149 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002150 bool done = false, scanned = false, range_whole = false;
2151 pgoff_t end, index;
2152 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002153 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002154
Steve French37c0eb42005-10-05 14:50:29 -07002155 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002156 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002157 * one page at a time via cifs_writepage
2158 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002159 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002160 return generic_writepages(mapping, wbc);
2161
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002162 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002163 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002164 end = -1;
2165 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002166 index = wbc->range_start >> PAGE_SHIFT;
2167 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002168 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002169 range_whole = true;
2170 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002171 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002172 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002173retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002174 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002175 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002176 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002177
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002178 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2179 &wsize, &credits);
2180 if (rc)
2181 break;
Steve French37c0eb42005-10-05 14:50:29 -07002182
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002183 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002184
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002185 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2186 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002187 if (!wdata) {
2188 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002189 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002190 break;
2191 }
2192
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002193 if (found_pages == 0) {
2194 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002195 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002196 break;
2197 }
2198
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002199 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2200 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002201
2202 /* nothing to write? */
2203 if (nr_pages == 0) {
2204 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002205 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002206 continue;
2207 }
2208
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002209 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002210
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002211 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002212
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002213 /* send failure -- clean up the mess */
2214 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002215 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002216 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002217 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002218 redirty_page_for_writepage(wbc,
2219 wdata->pages[i]);
2220 else
2221 SetPageError(wdata->pages[i]);
2222 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002223 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002224 }
Jeff Layton941b8532011-01-11 07:24:01 -05002225 if (rc != -EAGAIN)
2226 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002227 }
2228 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002229
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002230 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2231 index = saved_index;
2232 continue;
2233 }
2234
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002235 wbc->nr_to_write -= nr_pages;
2236 if (wbc->nr_to_write <= 0)
2237 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002238
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002239 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002240 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002241
Steve French37c0eb42005-10-05 14:50:29 -07002242 if (!scanned && !done) {
2243 /*
2244 * We hit the last page and there is more work to be done: wrap
2245 * back to the start of the file
2246 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002247 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002248 index = 0;
2249 goto retry;
2250 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002251
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002252 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002253 mapping->writeback_index = index;
2254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 return rc;
2256}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002258static int
2259cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002261 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002262 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002264 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002266 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002267 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002268 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002269
2270 /*
2271 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2272 *
2273 * A writepage() implementation always needs to do either this,
2274 * or re-dirty the page with "redirty_page_for_writepage()" in
2275 * the case of a failure.
2276 *
2277 * Just unlocking the page will cause the radix tree tag-bits
2278 * to fail to update with the state of the page correctly.
2279 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002280 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002281retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002282 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002283 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2284 goto retry_write;
2285 else if (rc == -EAGAIN)
2286 redirty_page_for_writepage(wbc, page);
2287 else if (rc != 0)
2288 SetPageError(page);
2289 else
2290 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002291 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002292 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002293 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 return rc;
2295}
2296
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002297static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2298{
2299 int rc = cifs_writepage_locked(page, wbc);
2300 unlock_page(page);
2301 return rc;
2302}
2303
Nick Piggind9414772008-09-24 11:32:59 -04002304static int cifs_write_end(struct file *file, struct address_space *mapping,
2305 loff_t pos, unsigned len, unsigned copied,
2306 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307{
Nick Piggind9414772008-09-24 11:32:59 -04002308 int rc;
2309 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002310 struct cifsFileInfo *cfile = file->private_data;
2311 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2312 __u32 pid;
2313
2314 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2315 pid = cfile->pid;
2316 else
2317 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
Joe Perchesf96637b2013-05-04 22:12:25 -05002319 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002320 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002321
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002322 if (PageChecked(page)) {
2323 if (copied == len)
2324 SetPageUptodate(page);
2325 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002326 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002327 SetPageUptodate(page);
2328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002330 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002331 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002332 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002333
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002334 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 /* this is probably better than directly calling
2336 partialpage_write since in this function the file handle is
2337 known which we might as well leverage */
2338 /* BB check if anything else missing out of ppw
2339 such as updating last write time */
2340 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002341 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002342 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002344
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002345 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002346 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002347 rc = copied;
2348 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002349 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 }
2351
Nick Piggind9414772008-09-24 11:32:59 -04002352 if (rc > 0) {
2353 spin_lock(&inode->i_lock);
2354 if (pos > inode->i_size)
2355 i_size_write(inode, pos);
2356 spin_unlock(&inode->i_lock);
2357 }
2358
2359 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002360 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 return rc;
2363}
2364
Josef Bacik02c24a82011-07-16 20:44:56 -04002365int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2366 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002368 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002370 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002371 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002372 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002373 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002374 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
Josef Bacik02c24a82011-07-16 20:44:56 -04002376 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2377 if (rc)
2378 return rc;
Al Viro59551022016-01-22 15:40:57 -05002379 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002380
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002381 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
Al Viro35c265e2014-08-19 20:25:34 -04002383 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2384 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002385
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002386 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002387 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002388 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002389 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002390 rc = 0; /* don't care about it in fsync */
2391 }
2392 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002393
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002394 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002395 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2396 server = tcon->ses->server;
2397 if (server->ops->flush)
2398 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2399 else
2400 rc = -ENOSYS;
2401 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002402
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002403 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002404 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002405 return rc;
2406}
2407
Josef Bacik02c24a82011-07-16 20:44:56 -04002408int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002409{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002410 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002411 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002412 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002413 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002414 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002415 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002416 struct inode *inode = file->f_mapping->host;
2417
2418 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2419 if (rc)
2420 return rc;
Al Viro59551022016-01-22 15:40:57 -05002421 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002422
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002423 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002424
Al Viro35c265e2014-08-19 20:25:34 -04002425 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2426 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002427
2428 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002429 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2430 server = tcon->ses->server;
2431 if (server->ops->flush)
2432 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2433 else
2434 rc = -ENOSYS;
2435 }
Steve Frenchb298f222009-02-21 21:17:43 +00002436
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002437 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002438 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 return rc;
2440}
2441
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442/*
2443 * As file closes, flush all cached write data for this inode checking
2444 * for write behind errors.
2445 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002446int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447{
Al Viro496ad9a2013-01-23 17:07:38 -05002448 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 int rc = 0;
2450
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002451 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002452 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002453
Joe Perchesf96637b2013-05-04 22:12:25 -05002454 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455
2456 return rc;
2457}
2458
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002459static int
2460cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2461{
2462 int rc = 0;
2463 unsigned long i;
2464
2465 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002466 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002467 if (!pages[i]) {
2468 /*
2469 * save number of pages we have already allocated and
2470 * return with ENOMEM error
2471 */
2472 num_pages = i;
2473 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002474 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002475 }
2476 }
2477
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002478 if (rc) {
2479 for (i = 0; i < num_pages; i++)
2480 put_page(pages[i]);
2481 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002482 return rc;
2483}
2484
2485static inline
2486size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2487{
2488 size_t num_pages;
2489 size_t clen;
2490
2491 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002492 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002493
2494 if (cur_len)
2495 *cur_len = clen;
2496
2497 return num_pages;
2498}
2499
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002500static void
Steve French4a5c80d2014-02-07 20:45:12 -06002501cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002502{
2503 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002504 struct cifs_writedata *wdata = container_of(refcount,
2505 struct cifs_writedata, refcount);
2506
2507 for (i = 0; i < wdata->nr_pages; i++)
2508 put_page(wdata->pages[i]);
2509 cifs_writedata_release(refcount);
2510}
2511
2512static void
2513cifs_uncached_writev_complete(struct work_struct *work)
2514{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002515 struct cifs_writedata *wdata = container_of(work,
2516 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002517 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002518 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2519
2520 spin_lock(&inode->i_lock);
2521 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2522 if (cifsi->server_eof > inode->i_size)
2523 i_size_write(inode, cifsi->server_eof);
2524 spin_unlock(&inode->i_lock);
2525
2526 complete(&wdata->done);
2527
Steve French4a5c80d2014-02-07 20:45:12 -06002528 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002529}
2530
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002531static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002532wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2533 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002534{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002535 size_t save_len, copied, bytes, cur_len = *len;
2536 unsigned long i, nr_pages = *num_pages;
2537
2538 save_len = cur_len;
2539 for (i = 0; i < nr_pages; i++) {
2540 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2541 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2542 cur_len -= copied;
2543 /*
2544 * If we didn't copy as much as we expected, then that
2545 * may mean we trod into an unmapped area. Stop copying
2546 * at that point. On the next pass through the big
2547 * loop, we'll likely end up getting a zero-length
2548 * write and bailing out of it.
2549 */
2550 if (copied < bytes)
2551 break;
2552 }
2553 cur_len = save_len - cur_len;
2554 *len = cur_len;
2555
2556 /*
2557 * If we have no data to send, then that probably means that
2558 * the copy above failed altogether. That's most likely because
2559 * the address in the iovec was bogus. Return -EFAULT and let
2560 * the caller free anything we allocated and bail out.
2561 */
2562 if (!cur_len)
2563 return -EFAULT;
2564
2565 /*
2566 * i + 1 now represents the number of pages we actually used in
2567 * the copy phase above.
2568 */
2569 *num_pages = i + 1;
2570 return 0;
2571}
2572
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002573static int
2574cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2575 struct cifsFileInfo *open_file,
2576 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002577{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002578 int rc = 0;
2579 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002580 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002581 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002582 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002583 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002584 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002585 struct TCP_Server_Info *server;
2586
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002587 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2588 pid = open_file->pid;
2589 else
2590 pid = current->tgid;
2591
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002592 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002593
2594 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002595 unsigned int wsize, credits;
2596
2597 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2598 &wsize, &credits);
2599 if (rc)
2600 break;
2601
2602 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002603 wdata = cifs_writedata_alloc(nr_pages,
2604 cifs_uncached_writev_complete);
2605 if (!wdata) {
2606 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002607 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002608 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002609 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002610
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002611 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2612 if (rc) {
2613 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002614 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002615 break;
2616 }
2617
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002618 num_pages = nr_pages;
2619 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2620 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002621 for (i = 0; i < nr_pages; i++)
2622 put_page(wdata->pages[i]);
2623 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002624 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002625 break;
2626 }
2627
2628 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002629 * Bring nr_pages down to the number of pages we actually used,
2630 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002631 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002632 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002633 put_page(wdata->pages[nr_pages - 1]);
2634
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002635 wdata->sync_mode = WB_SYNC_ALL;
2636 wdata->nr_pages = nr_pages;
2637 wdata->offset = (__u64)offset;
2638 wdata->cfile = cifsFileInfo_get(open_file);
2639 wdata->pid = pid;
2640 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002641 wdata->pagesz = PAGE_SIZE;
2642 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002643 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002644
2645 if (!wdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01002646 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002647 rc = server->ops->async_writev(wdata,
2648 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002649 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002650 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002651 kref_put(&wdata->refcount,
2652 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002653 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002654 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002655 iov_iter_advance(from, offset - saved_offset);
2656 continue;
2657 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002658 break;
2659 }
2660
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002661 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002662 offset += cur_len;
2663 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002664 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002665
2666 return rc;
2667}
2668
Al Viroe9d15932015-04-06 22:44:11 -04002669ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002670{
Al Viroe9d15932015-04-06 22:44:11 -04002671 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002672 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002673 struct cifsFileInfo *open_file;
2674 struct cifs_tcon *tcon;
2675 struct cifs_sb_info *cifs_sb;
2676 struct cifs_writedata *wdata, *tmp;
2677 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002678 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002679 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002680
Al Viroe9d15932015-04-06 22:44:11 -04002681 /*
2682 * BB - optimize the way when signing is disabled. We can drop this
2683 * extra memory-to-memory copying and use iovec buffers for constructing
2684 * write request.
2685 */
2686
Al Viro3309dd02015-04-09 12:55:47 -04002687 rc = generic_write_checks(iocb, from);
2688 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002689 return rc;
2690
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002691 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002692 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002693 open_file = file->private_data;
2694 tcon = tlink_tcon(open_file->tlink);
2695
2696 if (!tcon->ses->server->ops->async_writev)
2697 return -ENOSYS;
2698
Al Viro3309dd02015-04-09 12:55:47 -04002699 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2700 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002701
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002702 /*
2703 * If at least one write was successfully sent, then discard any rc
2704 * value from the later writes. If the other write succeeds, then
2705 * we'll end up returning whatever was written. If it fails, then
2706 * we'll get a new rc value from that.
2707 */
2708 if (!list_empty(&wdata_list))
2709 rc = 0;
2710
2711 /*
2712 * Wait for and collect replies for any successful sends in order of
2713 * increasing offset. Once an error is hit or we get a fatal signal
2714 * while waiting, then return without waiting for any more replies.
2715 */
2716restart_loop:
2717 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2718 if (!rc) {
2719 /* FIXME: freezable too? */
2720 rc = wait_for_completion_killable(&wdata->done);
2721 if (rc)
2722 rc = -EINTR;
2723 else if (wdata->result)
2724 rc = wdata->result;
2725 else
2726 total_written += wdata->bytes;
2727
2728 /* resend call if it's a retryable error */
2729 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002730 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002731 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002732
2733 INIT_LIST_HEAD(&tmp_list);
2734 list_del_init(&wdata->list);
2735
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002736 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002737 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002738
2739 rc = cifs_write_from_iter(wdata->offset,
2740 wdata->bytes, &tmp_from,
2741 open_file, cifs_sb, &tmp_list);
2742
2743 list_splice(&tmp_list, &wdata_list);
2744
2745 kref_put(&wdata->refcount,
2746 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002747 goto restart_loop;
2748 }
2749 }
2750 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002751 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002752 }
2753
Al Viroe9d15932015-04-06 22:44:11 -04002754 if (unlikely(!total_written))
2755 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002756
Al Viroe9d15932015-04-06 22:44:11 -04002757 iocb->ki_pos += total_written;
2758 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002759 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002760 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002761}
2762
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002763static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002764cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002765{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002766 struct file *file = iocb->ki_filp;
2767 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2768 struct inode *inode = file->f_mapping->host;
2769 struct cifsInodeInfo *cinode = CIFS_I(inode);
2770 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002771 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002772
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002773 /*
2774 * We need to hold the sem to be sure nobody modifies lock list
2775 * with a brlock that prevents writing.
2776 */
2777 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002778 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002779
Al Viro3309dd02015-04-09 12:55:47 -04002780 rc = generic_write_checks(iocb, from);
2781 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002782 goto out;
2783
Al Viro5f380c72015-04-07 11:28:12 -04002784 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002785 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002786 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002787 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002788 else
2789 rc = -EACCES;
2790out:
Al Viro59551022016-01-22 15:40:57 -05002791 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002792
Christoph Hellwige2592212016-04-07 08:52:01 -07002793 if (rc > 0)
2794 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002795 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002796 return rc;
2797}
2798
2799ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002800cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002801{
Al Viro496ad9a2013-01-23 17:07:38 -05002802 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002803 struct cifsInodeInfo *cinode = CIFS_I(inode);
2804 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2805 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2806 iocb->ki_filp->private_data;
2807 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002808 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002809
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002810 written = cifs_get_writer(cinode);
2811 if (written)
2812 return written;
2813
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002814 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002815 if (cap_unix(tcon->ses) &&
2816 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002817 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002818 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002819 goto out;
2820 }
Al Viro3dae8752014-04-03 12:05:17 -04002821 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002822 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002823 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002824 /*
2825 * For non-oplocked files in strict cache mode we need to write the data
2826 * to the server exactly from the pos to pos+len-1 rather than flush all
2827 * affected pages because it may cause a error with mandatory locks on
2828 * these pages but not on the region from pos to ppos+len-1.
2829 */
Al Viro3dae8752014-04-03 12:05:17 -04002830 written = cifs_user_writev(iocb, from);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002831 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002832 /*
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002833 * We have read level caching and we have just sent a write
2834 * request to the server thus making data in the cache stale.
2835 * Zap the cache and set oplock/lease level to NONE to avoid
2836 * reading stale data from the cache. All subsequent read
2837 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002838 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002839 cifs_zap_mapping(inode);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002840 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05002841 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002842 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002843 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002844out:
2845 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002846 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002847}
2848
Jeff Layton0471ca32012-05-16 07:13:16 -04002849static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002850cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002851{
2852 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002853
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002854 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2855 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002856 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002857 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002858 INIT_LIST_HEAD(&rdata->list);
2859 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002860 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002861 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002862
Jeff Layton0471ca32012-05-16 07:13:16 -04002863 return rdata;
2864}
2865
Jeff Layton6993f742012-05-16 07:13:17 -04002866void
2867cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002868{
Jeff Layton6993f742012-05-16 07:13:17 -04002869 struct cifs_readdata *rdata = container_of(refcount,
2870 struct cifs_readdata, refcount);
2871
2872 if (rdata->cfile)
2873 cifsFileInfo_put(rdata->cfile);
2874
Jeff Layton0471ca32012-05-16 07:13:16 -04002875 kfree(rdata);
2876}
2877
Jeff Layton2a1bb132012-05-16 07:13:17 -04002878static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002879cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002880{
2881 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002882 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002883 unsigned int i;
2884
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002885 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002886 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2887 if (!page) {
2888 rc = -ENOMEM;
2889 break;
2890 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002891 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002892 }
2893
2894 if (rc) {
Roberto Bergantinos Corpasdf2b6af2019-05-28 09:38:14 +02002895 unsigned int nr_page_failed = i;
2896
2897 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002898 put_page(rdata->pages[i]);
2899 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002900 }
2901 }
2902 return rc;
2903}
2904
2905static void
2906cifs_uncached_readdata_release(struct kref *refcount)
2907{
Jeff Layton1c892542012-05-16 07:13:17 -04002908 struct cifs_readdata *rdata = container_of(refcount,
2909 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002910 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002911
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002912 for (i = 0; i < rdata->nr_pages; i++) {
2913 put_page(rdata->pages[i]);
2914 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002915 }
2916 cifs_readdata_release(refcount);
2917}
2918
Jeff Layton1c892542012-05-16 07:13:17 -04002919/**
2920 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2921 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002922 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002923 *
2924 * This function copies data from a list of pages in a readdata response into
2925 * an array of iovecs. It will first calculate where the data should go
2926 * based on the info in the readdata and then copy the data into that spot.
2927 */
Al Viro7f25bba2014-02-04 14:07:43 -05002928static int
2929cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002930{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002931 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002932 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002933
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002934 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002935 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002936 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovskyc06d74d2017-01-19 13:53:15 -08002937 size_t written;
2938
2939 if (unlikely(iter->type & ITER_PIPE)) {
2940 void *addr = kmap_atomic(page);
2941
2942 written = copy_to_iter(addr, copy, iter);
2943 kunmap_atomic(addr);
2944 } else
2945 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05002946 remaining -= written;
2947 if (written < copy && iov_iter_count(iter) > 0)
2948 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002949 }
Al Viro7f25bba2014-02-04 14:07:43 -05002950 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002951}
2952
2953static void
2954cifs_uncached_readv_complete(struct work_struct *work)
2955{
2956 struct cifs_readdata *rdata = container_of(work,
2957 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002958
2959 complete(&rdata->done);
2960 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2961}
2962
2963static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002964cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2965 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002966{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002967 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002968 unsigned int i;
2969 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002970
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002971 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002972 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002973 for (i = 0; i < nr_pages; i++) {
2974 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002975 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002976
Al Viro71335662016-01-09 19:54:50 -05002977 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002978 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002979 rdata->pages[i] = NULL;
2980 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002981 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002982 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002983 }
Al Viro71335662016-01-09 19:54:50 -05002984 n = len;
2985 if (len >= PAGE_SIZE) {
2986 /* enough data to fill the page */
2987 n = PAGE_SIZE;
2988 len -= n;
2989 } else {
2990 zero_user(page, len, PAGE_SIZE - len);
2991 rdata->tailsz = len;
2992 len = 0;
2993 }
2994 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07002995 if (result < 0)
2996 break;
2997
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002998 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002999 }
3000
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003001 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3002 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003003}
3004
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003005static int
3006cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3007 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003009 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003010 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003011 size_t cur_len;
3012 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003013 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003014 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003015
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003016 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003017
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003018 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3019 pid = open_file->pid;
3020 else
3021 pid = current->tgid;
3022
Jeff Layton1c892542012-05-16 07:13:17 -04003023 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003024 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3025 &rsize, &credits);
3026 if (rc)
3027 break;
3028
3029 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04003030 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003031
Jeff Layton1c892542012-05-16 07:13:17 -04003032 /* allocate a readdata struct */
3033 rdata = cifs_readdata_alloc(npages,
3034 cifs_uncached_readv_complete);
3035 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003036 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003037 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003038 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003040
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003041 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003042 if (rc)
3043 goto error;
3044
3045 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003046 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003047 rdata->offset = offset;
3048 rdata->bytes = cur_len;
3049 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003050 rdata->pagesz = PAGE_SIZE;
3051 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003052 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04003053
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003054 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003055 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003056 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003057error:
3058 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003059 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003060 kref_put(&rdata->refcount,
3061 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003062 if (rc == -EAGAIN)
3063 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003064 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 }
Jeff Layton1c892542012-05-16 07:13:17 -04003066
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003067 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003068 offset += cur_len;
3069 len -= cur_len;
3070 } while (len > 0);
3071
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003072 return rc;
3073}
3074
3075ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3076{
3077 struct file *file = iocb->ki_filp;
3078 ssize_t rc;
3079 size_t len;
3080 ssize_t total_read = 0;
3081 loff_t offset = iocb->ki_pos;
3082 struct cifs_sb_info *cifs_sb;
3083 struct cifs_tcon *tcon;
3084 struct cifsFileInfo *open_file;
3085 struct cifs_readdata *rdata, *tmp;
3086 struct list_head rdata_list;
3087
3088 len = iov_iter_count(to);
3089 if (!len)
3090 return 0;
3091
3092 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003093 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003094 open_file = file->private_data;
3095 tcon = tlink_tcon(open_file->tlink);
3096
3097 if (!tcon->ses->server->ops->async_readv)
3098 return -ENOSYS;
3099
3100 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3101 cifs_dbg(FYI, "attempting read on write only file instance\n");
3102
3103 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3104
Jeff Layton1c892542012-05-16 07:13:17 -04003105 /* if at least one read request send succeeded, then reset rc */
3106 if (!list_empty(&rdata_list))
3107 rc = 0;
3108
Al Viroe6a7bcb2014-04-02 19:53:36 -04003109 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003110 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003111again:
Jeff Layton1c892542012-05-16 07:13:17 -04003112 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3113 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003114 /* FIXME: freezable sleep too? */
3115 rc = wait_for_completion_killable(&rdata->done);
3116 if (rc)
3117 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003118 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003119 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003120 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003121 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003122
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003123 list_del_init(&rdata->list);
3124 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003125
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003126 /*
3127 * Got a part of data and then reconnect has
3128 * happened -- fill the buffer and continue
3129 * reading.
3130 */
3131 if (got_bytes && got_bytes < rdata->bytes) {
3132 rc = cifs_readdata_to_iov(rdata, to);
3133 if (rc) {
3134 kref_put(&rdata->refcount,
3135 cifs_uncached_readdata_release);
3136 continue;
3137 }
3138 }
3139
3140 rc = cifs_send_async_read(
3141 rdata->offset + got_bytes,
3142 rdata->bytes - got_bytes,
3143 rdata->cfile, cifs_sb,
3144 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003145
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003146 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003147
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003148 kref_put(&rdata->refcount,
3149 cifs_uncached_readdata_release);
3150 goto again;
3151 } else if (rdata->result)
3152 rc = rdata->result;
3153 else
Jeff Layton1c892542012-05-16 07:13:17 -04003154 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003155
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003156 /* if there was a short read -- discard anything left */
3157 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3158 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003159 }
3160 list_del_init(&rdata->list);
3161 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003163
Al Viroe6a7bcb2014-04-02 19:53:36 -04003164 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003165
Jeff Layton1c892542012-05-16 07:13:17 -04003166 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003167
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003168 /* mask nodata case */
3169 if (rc == -ENODATA)
3170 rc = 0;
3171
Al Viro0165e812014-02-04 14:19:48 -05003172 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003173 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003174 return total_read;
3175 }
3176 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003177}
3178
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003179ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003180cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003181{
Al Viro496ad9a2013-01-23 17:07:38 -05003182 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003183 struct cifsInodeInfo *cinode = CIFS_I(inode);
3184 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3185 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3186 iocb->ki_filp->private_data;
3187 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3188 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003189
3190 /*
3191 * In strict cache mode we need to read from the server all the time
3192 * if we don't have level II oplock because the server can delay mtime
3193 * change - so we can't make a decision about inode invalidating.
3194 * And we can also fail with pagereading if there are mandatory locks
3195 * on pages affected by this read but not on the region from pos to
3196 * pos+len-1.
3197 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003198 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003199 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003200
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003201 if (cap_unix(tcon->ses) &&
3202 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3203 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003204 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003205
3206 /*
3207 * We need to hold the sem to be sure nobody modifies lock list
3208 * with a brlock that prevents reading.
3209 */
3210 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003211 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003212 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003213 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003214 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003215 up_read(&cinode->lock_sem);
3216 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003217}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003219static ssize_t
3220cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221{
3222 int rc = -EACCES;
3223 unsigned int bytes_read = 0;
3224 unsigned int total_read;
3225 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003226 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003228 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003229 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003230 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003231 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003233 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003234 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003235 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003237 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003238 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003240 /* FIXME: set up handlers for larger reads and/or convert to async */
3241 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3242
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303244 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003245 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303246 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003248 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003249 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003250 server = tcon->ses->server;
3251
3252 if (!server->ops->sync_read) {
3253 free_xid(xid);
3254 return -ENOSYS;
3255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003257 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3258 pid = open_file->pid;
3259 else
3260 pid = current->tgid;
3261
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003263 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003265 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3266 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003267 do {
3268 current_read_size = min_t(uint, read_size - total_read,
3269 rsize);
3270 /*
3271 * For windows me and 9x we do not want to request more
3272 * than it negotiated since it will refuse the read
3273 * then.
3274 */
3275 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003276 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003277 current_read_size = min_t(uint,
3278 current_read_size, CIFSMaxBufSize);
3279 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003280 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003281 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 if (rc != 0)
3283 break;
3284 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003285 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003286 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003287 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003288 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003289 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003290 &bytes_read, &cur_offset,
3291 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003292 } while (rc == -EAGAIN);
3293
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 if (rc || (bytes_read == 0)) {
3295 if (total_read) {
3296 break;
3297 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003298 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003299 return rc;
3300 }
3301 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003302 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003303 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 }
3305 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003306 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 return total_read;
3308}
3309
Jeff Laytonca83ce32011-04-12 09:13:44 -04003310/*
3311 * If the page is mmap'ed into a process' page tables, then we need to make
3312 * sure that it doesn't change while being written back.
3313 */
3314static int
3315cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3316{
3317 struct page *page = vmf->page;
3318
3319 lock_page(page);
3320 return VM_FAULT_LOCKED;
3321}
3322
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003323static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003324 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003325 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003326 .page_mkwrite = cifs_page_mkwrite,
3327};
3328
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003329int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3330{
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003331 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003332 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003333
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003334 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003335
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003336 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003337 rc = cifs_zap_mapping(inode);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003338 if (!rc)
3339 rc = generic_file_mmap(file, vma);
3340 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003341 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003342
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003343 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003344 return rc;
3345}
3346
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3348{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 int rc, xid;
3350
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003351 xid = get_xid();
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003352
Jeff Laytonabab0952010-02-12 07:44:18 -05003353 rc = cifs_revalidate_file(file);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003354 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003355 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3356 rc);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003357 if (!rc)
3358 rc = generic_file_mmap(file, vma);
3359 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003360 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003361
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003362 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 return rc;
3364}
3365
Jeff Layton0471ca32012-05-16 07:13:16 -04003366static void
3367cifs_readv_complete(struct work_struct *work)
3368{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003369 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003370 struct cifs_readdata *rdata = container_of(work,
3371 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003372
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003373 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003374 for (i = 0; i < rdata->nr_pages; i++) {
3375 struct page *page = rdata->pages[i];
3376
Jeff Layton0471ca32012-05-16 07:13:16 -04003377 lru_cache_add_file(page);
3378
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003379 if (rdata->result == 0 ||
3380 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003381 flush_dcache_page(page);
3382 SetPageUptodate(page);
3383 }
3384
3385 unlock_page(page);
3386
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003387 if (rdata->result == 0 ||
3388 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003389 cifs_readpage_to_fscache(rdata->mapping->host, page);
3390
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003391 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003392
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003393 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003394 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003395 }
Jeff Layton6993f742012-05-16 07:13:17 -04003396 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003397}
3398
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003399static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003400cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3401 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003402{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003403 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003404 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003405 u64 eof;
3406 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003407 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003408
3409 /* determine the eof that the server (probably) has */
3410 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003411 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003412 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003413
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003414 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003415 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003416 for (i = 0; i < nr_pages; i++) {
3417 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003418 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003419
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003420 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003421 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003422 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003423 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003424 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003425 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003426 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003427 } else if (page->index > eof_index) {
3428 /*
3429 * The VFS will not try to do readahead past the
3430 * i_size, but it's possible that we have outstanding
3431 * writes with gaps in the middle and the i_size hasn't
3432 * caught up yet. Populate those with zeroed out pages
3433 * to prevent the VFS from repeatedly attempting to
3434 * fill them until the writes are flushed.
3435 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003436 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003437 lru_cache_add_file(page);
3438 flush_dcache_page(page);
3439 SetPageUptodate(page);
3440 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003441 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003442 rdata->pages[i] = NULL;
3443 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003444 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003445 } else {
3446 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003447 lru_cache_add_file(page);
3448 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003449 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003450 rdata->pages[i] = NULL;
3451 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003452 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003453 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003454
Al Viro71335662016-01-09 19:54:50 -05003455 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003456 if (result < 0)
3457 break;
3458
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003459 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003460 }
3461
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003462 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3463 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003464}
3465
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003466static int
3467readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3468 unsigned int rsize, struct list_head *tmplist,
3469 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3470{
3471 struct page *page, *tpage;
3472 unsigned int expected_index;
3473 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003474 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003475
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003476 INIT_LIST_HEAD(tmplist);
3477
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003478 page = list_entry(page_list->prev, struct page, lru);
3479
3480 /*
3481 * Lock the page and put it in the cache. Since no one else
3482 * should have access to this page, we're safe to simply set
3483 * PG_locked without checking it first.
3484 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003485 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003486 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003487 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003488
3489 /* give up if we can't stick it in the cache */
3490 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003491 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003492 return rc;
3493 }
3494
3495 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003496 *offset = (loff_t)page->index << PAGE_SHIFT;
3497 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003498 *nr_pages = 1;
3499 list_move_tail(&page->lru, tmplist);
3500
3501 /* now try and add more pages onto the request */
3502 expected_index = page->index + 1;
3503 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3504 /* discontinuity ? */
3505 if (page->index != expected_index)
3506 break;
3507
3508 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003509 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003510 break;
3511
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003512 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003513 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003514 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003515 break;
3516 }
3517 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003518 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003519 expected_index++;
3520 (*nr_pages)++;
3521 }
3522 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523}
3524
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525static int cifs_readpages(struct file *file, struct address_space *mapping,
3526 struct list_head *page_list, unsigned num_pages)
3527{
Jeff Layton690c5e32011-10-19 15:30:16 -04003528 int rc;
3529 struct list_head tmplist;
3530 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003531 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003532 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003533 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534
Jeff Layton690c5e32011-10-19 15:30:16 -04003535 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303536 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3537 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003538 *
3539 * After this point, every page in the list might have PG_fscache set,
3540 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303541 */
3542 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3543 &num_pages);
3544 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003545 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303546
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003547 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3548 pid = open_file->pid;
3549 else
3550 pid = current->tgid;
3551
Jeff Layton690c5e32011-10-19 15:30:16 -04003552 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003553 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003554
Joe Perchesf96637b2013-05-04 22:12:25 -05003555 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3556 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003557
3558 /*
3559 * Start with the page at end of list and move it to private
3560 * list. Do the same with any following pages until we hit
3561 * the rsize limit, hit an index discontinuity, or run out of
3562 * pages. Issue the async read and then start the loop again
3563 * until the list is empty.
3564 *
3565 * Note that list order is important. The page_list is in
3566 * the order of declining indexes. When we put the pages in
3567 * the rdata->pages, then we want them in increasing order.
3568 */
3569 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003570 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003571 loff_t offset;
3572 struct page *page, *tpage;
3573 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003574 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003576 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3577 &rsize, &credits);
3578 if (rc)
3579 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580
Jeff Layton690c5e32011-10-19 15:30:16 -04003581 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003582 * Give up immediately if rsize is too small to read an entire
3583 * page. The VFS will fall back to readpage. We should never
3584 * reach this point however since we set ra_pages to 0 when the
3585 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003586 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003587 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003588 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003589 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003592 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3593 &nr_pages, &offset, &bytes);
3594 if (rc) {
3595 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003597 }
3598
Jeff Layton0471ca32012-05-16 07:13:16 -04003599 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003600 if (!rdata) {
3601 /* best to give up if we're out of mem */
3602 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3603 list_del(&page->lru);
3604 lru_cache_add_file(page);
3605 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003606 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003607 }
3608 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003609 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003610 break;
3611 }
3612
Jeff Layton6993f742012-05-16 07:13:17 -04003613 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003614 rdata->mapping = mapping;
3615 rdata->offset = offset;
3616 rdata->bytes = bytes;
3617 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003618 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003619 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003620 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003621
3622 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3623 list_del(&page->lru);
3624 rdata->pages[rdata->nr_pages++] = page;
3625 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003626
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003627 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003628 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003629 rc = server->ops->async_readv(rdata);
3630 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003631 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003632 for (i = 0; i < rdata->nr_pages; i++) {
3633 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003634 lru_cache_add_file(page);
3635 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003636 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003638 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003639 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 break;
3641 }
Jeff Layton6993f742012-05-16 07:13:17 -04003642
3643 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644 }
3645
David Howells54afa992013-09-04 17:10:39 +00003646 /* Any pages that have been shown to fscache but didn't get added to
3647 * the pagecache must be uncached before they get returned to the
3648 * allocator.
3649 */
3650 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 return rc;
3652}
3653
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003654/*
3655 * cifs_readpage_worker must be called with the page pinned
3656 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657static int cifs_readpage_worker(struct file *file, struct page *page,
3658 loff_t *poffset)
3659{
3660 char *read_data;
3661 int rc;
3662
Suresh Jayaraman56698232010-07-05 18:13:25 +05303663 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003664 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303665 if (rc == 0)
3666 goto read_complete;
3667
Linus Torvalds1da177e2005-04-16 15:20:36 -07003668 read_data = kmap(page);
3669 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003670
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003671 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003672
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 if (rc < 0)
3674 goto io_error;
3675 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003676 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003677
Al Viro496ad9a2013-01-23 17:07:38 -05003678 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003679 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003680
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003681 if (PAGE_SIZE > rc)
3682 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683
3684 flush_dcache_page(page);
3685 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303686
3687 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003688 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303689
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003691
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003693 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003694 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303695
3696read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 return rc;
3698}
3699
3700static int cifs_readpage(struct file *file, struct page *page)
3701{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003702 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003704 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003706 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707
3708 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303709 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003710 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303711 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 }
3713
Joe Perchesf96637b2013-05-04 22:12:25 -05003714 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003715 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716
3717 rc = cifs_readpage_worker(file, page, &offset);
3718
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003719 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720 return rc;
3721}
3722
Steve Frencha403a0a2007-07-26 15:54:16 +00003723static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3724{
3725 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003726 struct cifs_tcon *tcon =
3727 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003728
Steve French3afca262016-09-22 18:58:16 -05003729 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003730 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003731 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003732 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003733 return 1;
3734 }
3735 }
Steve French3afca262016-09-22 18:58:16 -05003736 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003737 return 0;
3738}
3739
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740/* We do not want to update the file size from server for inodes
3741 open for write - to avoid races with writepage extending
3742 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003743 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 but this is tricky to do without racing with writebehind
3745 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003746bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747{
Steve Frencha403a0a2007-07-26 15:54:16 +00003748 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003749 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003750
Steve Frencha403a0a2007-07-26 15:54:16 +00003751 if (is_inode_writable(cifsInode)) {
3752 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003753 struct cifs_sb_info *cifs_sb;
3754
Steve Frenchc32a0b62006-01-12 14:41:28 -08003755 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003756 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003757 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003758 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003759 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003760 }
3761
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003762 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003763 return true;
Steve French7ba52632007-02-08 18:14:13 +00003764
Steve French4b18f2a2008-04-29 00:06:05 +00003765 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003766 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003767 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768}
3769
Nick Piggind9414772008-09-24 11:32:59 -04003770static int cifs_write_begin(struct file *file, struct address_space *mapping,
3771 loff_t pos, unsigned len, unsigned flags,
3772 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003774 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003775 pgoff_t index = pos >> PAGE_SHIFT;
3776 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003777 loff_t page_start = pos & PAGE_MASK;
3778 loff_t i_size;
3779 struct page *page;
3780 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781
Joe Perchesf96637b2013-05-04 22:12:25 -05003782 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003783
Sachin Prabhu466bd312013-09-13 14:11:57 +01003784start:
Nick Piggin54566b22009-01-04 12:00:53 -08003785 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003786 if (!page) {
3787 rc = -ENOMEM;
3788 goto out;
3789 }
Nick Piggind9414772008-09-24 11:32:59 -04003790
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003791 if (PageUptodate(page))
3792 goto out;
Steve French8a236262007-03-06 00:31:00 +00003793
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003794 /*
3795 * If we write a full page it will be up to date, no need to read from
3796 * the server. If the write is short, we'll end up doing a sync write
3797 * instead.
3798 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003799 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003800 goto out;
3801
3802 /*
3803 * optimize away the read when we have an oplock, and we're not
3804 * expecting to use any of the data we'd be reading in. That
3805 * is, when the page lies beyond the EOF, or straddles the EOF
3806 * and the write will cover all of the existing data.
3807 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003808 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003809 i_size = i_size_read(mapping->host);
3810 if (page_start >= i_size ||
3811 (offset == 0 && (pos + len) >= i_size)) {
3812 zero_user_segments(page, 0, offset,
3813 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003814 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003815 /*
3816 * PageChecked means that the parts of the page
3817 * to which we're not writing are considered up
3818 * to date. Once the data is copied to the
3819 * page, it can be set uptodate.
3820 */
3821 SetPageChecked(page);
3822 goto out;
3823 }
3824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825
Sachin Prabhu466bd312013-09-13 14:11:57 +01003826 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003827 /*
3828 * might as well read a page, it is fast enough. If we get
3829 * an error, we don't need to return it. cifs_write_end will
3830 * do a sync write instead since PG_uptodate isn't set.
3831 */
3832 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003833 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003834 oncethru = 1;
3835 goto start;
Steve French8a236262007-03-06 00:31:00 +00003836 } else {
3837 /* we could try using another file handle if there is one -
3838 but how would we lock it to prevent close of that handle
3839 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003840 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003841 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003842out:
3843 *pagep = page;
3844 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845}
3846
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303847static int cifs_release_page(struct page *page, gfp_t gfp)
3848{
3849 if (PagePrivate(page))
3850 return 0;
3851
3852 return cifs_fscache_release_page(page, gfp);
3853}
3854
Lukas Czernerd47992f2013-05-21 23:17:23 -04003855static void cifs_invalidate_page(struct page *page, unsigned int offset,
3856 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303857{
3858 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3859
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003860 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303861 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3862}
3863
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003864static int cifs_launder_page(struct page *page)
3865{
3866 int rc = 0;
3867 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003868 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003869 struct writeback_control wbc = {
3870 .sync_mode = WB_SYNC_ALL,
3871 .nr_to_write = 0,
3872 .range_start = range_start,
3873 .range_end = range_end,
3874 };
3875
Joe Perchesf96637b2013-05-04 22:12:25 -05003876 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003877
3878 if (clear_page_dirty_for_io(page))
3879 rc = cifs_writepage_locked(page, &wbc);
3880
3881 cifs_fscache_invalidate_page(page, page->mapping->host);
3882 return rc;
3883}
3884
Tejun Heo9b646972010-07-20 22:09:02 +02003885void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003886{
3887 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3888 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003889 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003890 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003891 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003892 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003893 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003894
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003895 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003896 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003897
3898 server->ops->downgrade_oplock(server, cinode,
3899 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3900
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003901 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003902 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003903 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3904 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003905 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003906 }
3907
Jeff Layton3bc303c2009-09-21 06:47:50 -04003908 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003909 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003910 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003911 else
Al Viro8737c932009-12-24 06:47:55 -05003912 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003913 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003914 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003915 rc = filemap_fdatawait(inode->i_mapping);
3916 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003917 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003918 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003919 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003920 }
3921
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003922 rc = cifs_push_locks(cfile);
3923 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003924 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003925
Jeff Layton3bc303c2009-09-21 06:47:50 -04003926 /*
3927 * releasing stale oplock after recent reconnect of smb session using
3928 * a now incorrect file handle is not a data integrity issue but do
3929 * not bother sending an oplock release if session to server still is
3930 * disconnected since oplock already released by the server
3931 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003932 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003933 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3934 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003935 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003936 }
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +01003937 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003938 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003939}
3940
Steve Frenchdca69282013-11-11 16:42:37 -06003941/*
3942 * The presence of cifs_direct_io() in the address space ops vector
3943 * allowes open() O_DIRECT flags which would have failed otherwise.
3944 *
3945 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3946 * so this method should never be called.
3947 *
3948 * Direct IO is not yet supported in the cached mode.
3949 */
3950static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003951cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003952{
3953 /*
3954 * FIXME
3955 * Eventually need to support direct IO for non forcedirectio mounts
3956 */
3957 return -EINVAL;
3958}
3959
3960
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003961const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 .readpage = cifs_readpage,
3963 .readpages = cifs_readpages,
3964 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003965 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003966 .write_begin = cifs_write_begin,
3967 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303969 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003970 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303971 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003972 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003974
3975/*
3976 * cifs_readpages requires the server to support a buffer large enough to
3977 * contain the header plus one complete page of data. Otherwise, we need
3978 * to leave cifs_readpages out of the address space operations.
3979 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003980const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003981 .readpage = cifs_readpage,
3982 .writepage = cifs_writepage,
3983 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003984 .write_begin = cifs_write_begin,
3985 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003986 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303987 .releasepage = cifs_release_page,
3988 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003989 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003990};