blob: b4e33ef2ff31509125d8d9dc4b1c53a9f5aba003 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070045#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French1013e762017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
Pavel Shilovskyd72c2112019-09-30 10:06:18 -0700255 if (rc) {
256 server->ops->close(xid, tcon, fid);
257 if (rc == -ESTALE)
258 rc = -EOPENSTALE;
259 }
260
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300261out:
262 kfree(buf);
263 return rc;
264}
265
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400266static bool
267cifs_has_mand_locks(struct cifsInodeInfo *cinode)
268{
269 struct cifs_fid_locks *cur;
270 bool has_locks = false;
271
272 down_read(&cinode->lock_sem);
273 list_for_each_entry(cur, &cinode->llist, llist) {
274 if (!list_empty(&cur->locks)) {
275 has_locks = true;
276 break;
277 }
278 }
279 up_read(&cinode->lock_sem);
280 return has_locks;
281}
282
Jeff Layton15ecb432010-10-15 15:34:02 -0400283struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700284cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400285 struct tcon_link *tlink, __u32 oplock)
286{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500287 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000288 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 struct cifsInodeInfo *cinode = CIFS_I(inode);
290 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700291 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700292 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400293 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400294
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700295 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
296 if (cfile == NULL)
297 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400298
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700299 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
300 if (!fdlocks) {
301 kfree(cfile);
302 return NULL;
303 }
304
305 INIT_LIST_HEAD(&fdlocks->locks);
306 fdlocks->cfile = cfile;
307 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700308 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700309 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700310 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700311
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700312 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700313 cfile->pid = current->tgid;
314 cfile->uid = current_fsuid();
315 cfile->dentry = dget(dentry);
316 cfile->f_flags = file->f_flags;
317 cfile->invalidHandle = false;
318 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700319 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700320 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500321 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400322
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100323 cifs_sb_active(inode->i_sb);
324
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400325 /*
326 * If the server returned a read oplock and we have mandatory brlocks,
327 * set oplock level to None.
328 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400329 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500330 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400331 oplock = 0;
332 }
333
Steve French3afca262016-09-22 18:58:16 -0500334 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400335 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700336 oplock = fid->pending_open->oplock;
337 list_del(&fid->pending_open->olist);
338
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400339 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400340 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700341
342 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500343
Jeff Layton15ecb432010-10-15 15:34:02 -0400344 /* if readable file instance put first in list*/
Ronnie Sahlbergacc07942019-06-05 10:38:38 +1000345 spin_lock(&cinode->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400346 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700347 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400348 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700349 list_add_tail(&cfile->flist, &cinode->openFileList);
Ronnie Sahlbergacc07942019-06-05 10:38:38 +1000350 spin_unlock(&cinode->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500351 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400352
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400353 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400354 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400355
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700356 file->private_data = cfile;
357 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400358}
359
Jeff Layton764a1b12012-07-25 14:59:54 -0400360struct cifsFileInfo *
361cifsFileInfo_get(struct cifsFileInfo *cifs_file)
362{
Steve French3afca262016-09-22 18:58:16 -0500363 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400364 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500365 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400366 return cifs_file;
367}
368
Aurelien Aptel8092ecc2019-03-29 10:49:12 +0100369/**
370 * cifsFileInfo_put - release a reference of file priv data
371 *
372 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000373 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400374void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
375{
Aurelien Aptel8092ecc2019-03-29 10:49:12 +0100376 _cifsFileInfo_put(cifs_file, true);
377}
378
379/**
380 * _cifsFileInfo_put - release a reference of file priv data
381 *
382 * This may involve closing the filehandle @cifs_file out on the
383 * server. Must be called without holding tcon->open_file_lock and
384 * cifs_file->file_info_lock.
385 *
386 * If @wait_for_oplock_handler is true and we are releasing the last
387 * reference, wait for any running oplock break handler of the file
388 * and cancel any pending one. If calling this function from the
389 * oplock break handler, you need to pass false.
390 *
391 */
392void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
393{
David Howells2b0143b2015-03-17 22:25:59 +0000394 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000395 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700396 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300397 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100398 struct super_block *sb = inode->i_sb;
399 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000400 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700401 struct cifs_fid fid;
402 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000403 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000404
Steve French3afca262016-09-22 18:58:16 -0500405 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky01332b02019-10-23 15:37:19 -0700406 spin_lock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500407 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400408 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500409 spin_unlock(&cifs_file->file_info_lock);
Pavel Shilovsky01332b02019-10-23 15:37:19 -0700410 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500411 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000412 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400413 }
Steve French3afca262016-09-22 18:58:16 -0500414 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000415
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700416 if (server->ops->get_lease_key)
417 server->ops->get_lease_key(inode, &fid);
418
419 /* store open in pending opens to make sure we don't miss lease break */
420 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
421
Steve Frenchcdff08e2010-10-21 22:46:14 +0000422 /* remove it from the lists */
423 list_del(&cifs_file->flist);
424 list_del(&cifs_file->tlist);
425
426 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500427 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000428 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700429 /*
430 * In strict cache mode we need invalidate mapping on the last
431 * close because it may cause a error when we open this file
432 * again and get at least level II oplock.
433 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300434 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400435 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300436 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000437 }
Steve French3afca262016-09-22 18:58:16 -0500438
Pavel Shilovsky01332b02019-10-23 15:37:19 -0700439 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500440 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000441
Aurelien Aptel8092ecc2019-03-29 10:49:12 +0100442 oplock_break_cancelled = wait_oplock_handler ?
443 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400444
Steve Frenchcdff08e2010-10-21 22:46:14 +0000445 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700446 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400447 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700448
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400449 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700450 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400451 server->ops->close(xid, tcon, &cifs_file->fid);
452 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000453 }
454
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000455 if (oplock_break_cancelled)
456 cifs_done_oplock_break(cifsi);
457
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700458 cifs_del_pending_open(&open);
459
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700460 /*
461 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000462 * is closed anyway.
463 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700464 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700465 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000466 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400467 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000468 kfree(li);
469 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700470 list_del(&cifs_file->llist->llist);
471 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700472 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000473
474 cifs_put_tlink(cifs_file->tlink);
475 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100476 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000477 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400478}
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700481
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
483 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400484 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400485 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700487 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000488 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400489 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700490 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300492 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700493 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700494 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400496 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400499 tlink = cifs_sb_tlink(cifs_sb);
500 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400501 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400502 return PTR_ERR(tlink);
503 }
504 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700505 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500507 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530509 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400510 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 }
512
Joe Perchesf96637b2013-05-04 22:12:25 -0500513 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000514 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000515
Namjae Jeon787aded2014-08-22 14:22:51 +0900516 if (file->f_flags & O_DIRECT &&
517 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
518 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
519 file->f_op = &cifs_file_direct_nobrl_ops;
520 else
521 file->f_op = &cifs_file_direct_ops;
522 }
523
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700524 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000525 oplock = REQ_OPLOCK;
526 else
527 oplock = 0;
528
Steve French64cc2c62009-03-04 19:54:08 +0000529 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400530 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
531 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000532 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400533 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000534 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700535 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000536 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500537 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300538 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000539 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
540 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500541 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
542 tcon->ses->serverName,
543 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000544 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000545 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
546 (rc != -EOPNOTSUPP)) /* path not found or net err */
547 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700548 /*
549 * Else fallthrough to retry open the old way on network i/o
550 * or DFS errors.
551 */
Steve French276a74a2009-03-03 18:00:34 +0000552 }
553
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700554 if (server->ops->get_lease_key)
555 server->ops->get_lease_key(inode, &fid);
556
557 cifs_add_pending_open(&fid, tlink, &open);
558
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300559 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700560 if (server->ops->get_lease_key)
561 server->ops->get_lease_key(inode, &fid);
562
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300563 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700564 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700565 if (rc) {
566 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300567 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700568 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300569 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400570
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700571 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
572 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700573 if (server->ops->close)
574 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700575 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 rc = -ENOMEM;
577 goto out;
578 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530580 cifs_fscache_set_inode_cookie(inode, file);
581
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300582 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700583 /*
584 * Time to set mode which we can not set earlier due to
585 * problems creating new read-only files.
586 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300587 struct cifs_unix_set_info_args args = {
588 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800589 .uid = INVALID_UID, /* no change */
590 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300591 .ctime = NO_CHANGE_64,
592 .atime = NO_CHANGE_64,
593 .mtime = NO_CHANGE_64,
594 .device = 0,
595 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700596 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
597 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 }
599
600out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400602 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400603 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return rc;
605}
606
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400607static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
608
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700609/*
610 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400611 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700612 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400613static int
614cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400616 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000617 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400618 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 int rc = 0;
620
Rabin Vincent560d3882017-05-03 17:17:21 +0200621 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400622 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400623 /* can cache locks - no need to relock */
624 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400625 return rc;
626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400628 if (cap_unix(tcon->ses) &&
629 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
630 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
631 rc = cifs_push_posix_locks(cfile);
632 else
633 rc = tcon->ses->server->ops->push_mand_locks(cfile);
634
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400635 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 return rc;
637}
638
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700639static int
640cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
642 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400643 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400644 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000646 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700647 struct TCP_Server_Info *server;
648 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000649 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700651 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500653 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400654 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400656 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700657 mutex_lock(&cfile->fh_mutex);
658 if (!cfile->invalidHandle) {
659 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530660 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400661 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530662 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 }
664
David Howells2b0143b2015-03-17 22:25:59 +0000665 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700667 tcon = tlink_tcon(cfile->tlink);
668 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000669
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700670 /*
671 * Can not grab rename sem here because various ops, including those
672 * that already have the rename sem can end up causing writepage to get
673 * called and if the server was down that means we end up here, and we
674 * can never tell if the caller already has the rename_sem.
675 */
676 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000678 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700679 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400680 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000681 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 }
683
Joe Perchesf96637b2013-05-04 22:12:25 -0500684 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
685 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300687 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 oplock = REQ_OPLOCK;
689 else
Steve French4b18f2a2008-04-29 00:06:05 +0000690 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400692 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000693 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400694 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400695 /*
696 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
697 * original open. Must mask them off for a reopen.
698 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700699 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400700 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400701
Jeff Layton2422f672010-06-16 13:40:16 -0400702 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700703 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400704 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000705 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500706 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200707 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000708 goto reopen_success;
709 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700710 /*
711 * fallthrough to retry open the old way on errors, especially
712 * in the reconnect path it is important to retry hard
713 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000714 }
715
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700716 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000717
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500718 if (backup_cred(cifs_sb))
719 create_options |= CREATE_OPEN_BACKUP_INTENT;
720
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700721 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400722 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700723
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400724 oparms.tcon = tcon;
725 oparms.cifs_sb = cifs_sb;
726 oparms.desired_access = desired_access;
727 oparms.create_options = create_options;
728 oparms.disposition = disposition;
729 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400730 oparms.fid = &cfile->fid;
731 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400732
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700733 /*
734 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400735 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700736 * file might have write behind data that needs to be flushed and server
737 * version of file size can be stale. If we knew for sure that inode was
738 * not dirty locally we could do this.
739 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400740 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400741 if (rc == -ENOENT && oparms.reconnect == false) {
742 /* durable handle timeout is expired - open the file again */
743 rc = server->ops->open(xid, &oparms, &oplock, NULL);
744 /* indicate that we need to relock the file */
745 oparms.reconnect = true;
746 }
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700749 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500750 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
751 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400752 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 }
Jeff Layton15886172010-10-15 15:33:59 -0400754
755reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700756 cfile->invalidHandle = false;
757 mutex_unlock(&cfile->fh_mutex);
758 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400759
760 if (can_flush) {
761 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -0800762 if (!is_interrupt_error(rc))
763 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400764
Jeff Layton15886172010-10-15 15:33:59 -0400765 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700766 rc = cifs_get_inode_info_unix(&inode, full_path,
767 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400768 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700769 rc = cifs_get_inode_info(&inode, full_path, NULL,
770 inode->i_sb, xid, NULL);
771 }
772 /*
773 * Else we are writing out data to server already and could deadlock if
774 * we tried to flush data, and since we do not know if we have data that
775 * would invalidate the current end of file on the server we can not go
776 * to the server to get the new inode info.
777 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300778
Pavel Shilovskyde740252016-10-11 15:34:07 -0700779 /*
780 * If the server returned a read oplock and we have mandatory brlocks,
781 * set oplock level to None.
782 */
783 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
784 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
785 oplock = 0;
786 }
787
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400788 server->ops->set_fid(cfile, &cfile->fid, oplock);
789 if (oparms.reconnect)
790 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400791
792reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400794 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 return rc;
796}
797
798int cifs_close(struct inode *inode, struct file *file)
799{
Jeff Layton77970692011-04-05 16:23:47 -0700800 if (file->private_data != NULL) {
801 cifsFileInfo_put(file->private_data);
802 file->private_data = NULL;
803 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Steve Frenchcdff08e2010-10-21 22:46:14 +0000805 /* return code from the ->release op is always ignored */
806 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807}
808
Steve French52ace1e2016-09-22 19:23:56 -0500809void
810cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
811{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700812 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500813 struct list_head *tmp;
814 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700815 struct list_head tmp_list;
816
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800817 if (!tcon->use_persistent || !tcon->need_reopen_files)
818 return;
819
820 tcon->need_reopen_files = false;
821
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700822 cifs_dbg(FYI, "Reopen persistent handles");
823 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500824
825 /* list all files open on tree connection, reopen resilient handles */
826 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700827 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500828 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700829 if (!open_file->invalidHandle)
830 continue;
831 cifsFileInfo_get(open_file);
832 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500833 }
834 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700835
836 list_for_each_safe(tmp, tmp1, &tmp_list) {
837 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800838 if (cifs_reopen_file(open_file, false /* do not flush */))
839 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700840 list_del_init(&open_file->rlist);
841 cifsFileInfo_put(open_file);
842 }
Steve French52ace1e2016-09-22 19:23:56 -0500843}
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845int cifs_closedir(struct inode *inode, struct file *file)
846{
847 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400848 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700849 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700850 struct cifs_tcon *tcon;
851 struct TCP_Server_Info *server;
852 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
Joe Perchesf96637b2013-05-04 22:12:25 -0500854 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700856 if (cfile == NULL)
857 return rc;
858
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400859 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700860 tcon = tlink_tcon(cfile->tlink);
861 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
Joe Perchesf96637b2013-05-04 22:12:25 -0500863 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500864 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400865 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700866 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500867 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700868 if (server->ops->close_dir)
869 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
870 else
871 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500872 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700873 /* not much we can do if it fails anyway, ignore rc */
874 rc = 0;
875 } else
Steve French3afca262016-09-22 18:58:16 -0500876 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700878 buf = cfile->srch_inf.ntwrk_buf_start;
879 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500880 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700881 cfile->srch_inf.ntwrk_buf_start = NULL;
882 if (cfile->srch_inf.smallBuf)
883 cifs_small_buf_release(buf);
884 else
885 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700887
888 cifs_put_tlink(cfile->tlink);
889 kfree(file->private_data);
890 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400892 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 return rc;
894}
895
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400896static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300897cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000898{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400899 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000900 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400901 if (!lock)
902 return lock;
903 lock->offset = offset;
904 lock->length = length;
905 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400906 lock->pid = current->tgid;
907 INIT_LIST_HEAD(&lock->blist);
908 init_waitqueue_head(&lock->block_q);
909 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400910}
911
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700912void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400913cifs_del_lock_waiters(struct cifsLockInfo *lock)
914{
915 struct cifsLockInfo *li, *tmp;
916 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
917 list_del_init(&li->blist);
918 wake_up(&li->block_q);
919 }
920}
921
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400922#define CIFS_LOCK_OP 0
923#define CIFS_READ_OP 1
924#define CIFS_WRITE_OP 2
925
926/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400927static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700928cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
929 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400930 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400931{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300932 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700933 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300934 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400935
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700936 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400937 if (offset + length <= li->offset ||
938 offset >= li->offset + li->length)
939 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400940 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
941 server->ops->compare_fids(cfile, cur_cfile)) {
942 /* shared lock prevents write op through the same fid */
943 if (!(li->type & server->vals->shared_lock_type) ||
944 rw_check != CIFS_WRITE_OP)
945 continue;
946 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700947 if ((type & server->vals->shared_lock_type) &&
948 ((server->ops->compare_fids(cfile, cur_cfile) &&
949 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400950 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700951 if (conf_lock)
952 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700953 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400954 }
955 return false;
956}
957
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700958bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300959cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700960 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400961 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400962{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300963 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700964 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000965 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300966
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700967 list_for_each_entry(cur, &cinode->llist, llist) {
968 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700969 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300970 if (rc)
971 break;
972 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300973
974 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400975}
976
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300977/*
978 * Check if there is another lock that prevents us to set the lock (mandatory
979 * style). If such a lock exists, update the flock structure with its
980 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
981 * or leave it the same if we can't. Returns 0 if we don't need to request to
982 * the server or 1 otherwise.
983 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400984static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300985cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
986 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987{
988 int rc = 0;
989 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000990 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300991 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400992 bool exist;
993
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700994 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400995
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300996 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400997 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400998 if (exist) {
999 flock->fl_start = conf_lock->offset;
1000 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1001 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001002 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001003 flock->fl_type = F_RDLCK;
1004 else
1005 flock->fl_type = F_WRLCK;
1006 } else if (!cinode->can_cache_brlcks)
1007 rc = 1;
1008 else
1009 flock->fl_type = F_UNLCK;
1010
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001011 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012 return rc;
1013}
1014
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001015static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001016cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001017{
David Howells2b0143b2015-03-17 22:25:59 +00001018 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001019 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001020 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001021 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001022}
1023
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001024/*
1025 * Set the byte-range lock (mandatory style). Returns:
1026 * 1) 0, if we set the lock and don't need to request to the server;
1027 * 2) 1, if no locks prevent us but we need to request to the server;
1028 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1029 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001030static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001031cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001032 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001033{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001034 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001035 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001036 bool exist;
1037 int rc = 0;
1038
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001039try_again:
1040 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001041 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001042
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001043 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001044 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001045 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001046 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001047 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001048 return rc;
1049 }
1050
1051 if (!exist)
1052 rc = 1;
1053 else if (!wait)
1054 rc = -EACCES;
1055 else {
1056 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001057 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001058 rc = wait_event_interruptible(lock->block_q,
1059 (lock->blist.prev == &lock->blist) &&
1060 (lock->blist.next == &lock->blist));
1061 if (!rc)
1062 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001063 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001064 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001065 }
1066
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001067 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001068 return rc;
1069}
1070
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001071/*
1072 * Check if there is another lock that prevents us to set the lock (posix
1073 * style). If such a lock exists, update the flock structure with its
1074 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1075 * or leave it the same if we can't. Returns 0 if we don't need to request to
1076 * the server or 1 otherwise.
1077 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001078static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001079cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1080{
1081 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001082 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001083 unsigned char saved_type = flock->fl_type;
1084
Pavel Shilovsky50792762011-10-29 17:17:57 +04001085 if ((flock->fl_flags & FL_POSIX) == 0)
1086 return 1;
1087
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001088 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001089 posix_test_lock(file, flock);
1090
1091 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1092 flock->fl_type = saved_type;
1093 rc = 1;
1094 }
1095
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001096 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097 return rc;
1098}
1099
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001100/*
1101 * Set the byte-range lock (posix style). Returns:
1102 * 1) 0, if we set the lock and don't need to request to the server;
1103 * 2) 1, if we need to request to the server;
1104 * 3) <0, if the error occurs while setting the lock.
1105 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001106static int
1107cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1108{
Al Viro496ad9a2013-01-23 17:07:38 -05001109 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001110 int rc = 1;
1111
1112 if ((flock->fl_flags & FL_POSIX) == 0)
1113 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001114
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001115try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001116 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001117 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001118 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001119 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001120 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001121
1122 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001123 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001124 if (rc == FILE_LOCK_DEFERRED) {
1125 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1126 if (!rc)
1127 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001128 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001129 }
Steve French9ebb3892012-04-01 13:52:54 -05001130 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001131}
1132
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001133int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001134cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001135{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001136 unsigned int xid;
1137 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001138 struct cifsLockInfo *li, *tmp;
1139 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001140 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001141 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001142 static const int types[] = {
1143 LOCKING_ANDX_LARGE_FILES,
1144 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1145 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001146 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001147
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001148 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001149 tcon = tlink_tcon(cfile->tlink);
1150
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001151 /*
1152 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwall2a71a472019-01-08 18:30:57 +00001153 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001154 */
1155 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwall2a71a472019-01-08 18:30:57 +00001156 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001157 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001158 return -EINVAL;
1159 }
1160
Ross Lagerwall63715c12019-01-08 18:30:56 +00001161 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1162 PAGE_SIZE);
1163 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1164 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001165 max_num = (max_buf - sizeof(struct smb_hdr)) /
1166 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001167 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001168 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001169 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001170 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001171 }
1172
1173 for (i = 0; i < 2; i++) {
1174 cur = buf;
1175 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001176 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001177 if (li->type != types[i])
1178 continue;
1179 cur->Pid = cpu_to_le16(li->pid);
1180 cur->LengthLow = cpu_to_le32((u32)li->length);
1181 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1182 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1183 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1184 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001185 stored_rc = cifs_lockv(xid, tcon,
1186 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001187 (__u8)li->type, 0, num,
1188 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001189 if (stored_rc)
1190 rc = stored_rc;
1191 cur = buf;
1192 num = 0;
1193 } else
1194 cur++;
1195 }
1196
1197 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001198 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001199 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001200 if (stored_rc)
1201 rc = stored_rc;
1202 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001203 }
1204
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001205 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001206 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001207 return rc;
1208}
1209
Jeff Layton3d224622016-05-24 06:27:44 -04001210static __u32
1211hash_lockowner(fl_owner_t owner)
1212{
1213 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1214}
1215
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001216struct lock_to_push {
1217 struct list_head llist;
1218 __u64 offset;
1219 __u64 length;
1220 __u32 pid;
1221 __u16 netfid;
1222 __u8 type;
1223};
1224
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001225static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001226cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001227{
David Howells2b0143b2015-03-17 22:25:59 +00001228 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001229 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001230 struct file_lock *flock;
1231 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001232 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001233 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001234 struct list_head locks_to_send, *el;
1235 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001236 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001237
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001238 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001239
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001240 if (!flctx)
1241 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001242
Jeff Laytone084c1b2015-02-16 14:32:03 -05001243 spin_lock(&flctx->flc_lock);
1244 list_for_each(el, &flctx->flc_posix) {
1245 count++;
1246 }
1247 spin_unlock(&flctx->flc_lock);
1248
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001249 INIT_LIST_HEAD(&locks_to_send);
1250
1251 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001252 * Allocating count locks is enough because no FL_POSIX locks can be
1253 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001254 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001255 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001256 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001257 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1258 if (!lck) {
1259 rc = -ENOMEM;
1260 goto err_out;
1261 }
1262 list_add_tail(&lck->llist, &locks_to_send);
1263 }
1264
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001265 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001266 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001267 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001268 if (el == &locks_to_send) {
1269 /*
1270 * The list ended. We don't have enough allocated
1271 * structures - something is really wrong.
1272 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001273 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001274 break;
1275 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001276 length = 1 + flock->fl_end - flock->fl_start;
1277 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1278 type = CIFS_RDLCK;
1279 else
1280 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001281 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001282 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001283 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001284 lck->length = length;
1285 lck->type = type;
1286 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001287 }
Jeff Layton6109c852015-01-16 15:05:57 -05001288 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001289
1290 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001291 int stored_rc;
1292
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001293 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001294 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001295 lck->type, 0);
1296 if (stored_rc)
1297 rc = stored_rc;
1298 list_del(&lck->llist);
1299 kfree(lck);
1300 }
1301
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001302out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001303 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001304 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001305err_out:
1306 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1307 list_del(&lck->llist);
1308 kfree(lck);
1309 }
1310 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001311}
1312
1313static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001314cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001315{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001316 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001317 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001318 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001319 int rc = 0;
1320
1321 /* we are going to update can_cache_brlcks here - need a write access */
1322 down_write(&cinode->lock_sem);
1323 if (!cinode->can_cache_brlcks) {
1324 up_write(&cinode->lock_sem);
1325 return rc;
1326 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001327
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001328 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001329 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1330 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001331 rc = cifs_push_posix_locks(cfile);
1332 else
1333 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001334
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001335 cinode->can_cache_brlcks = false;
1336 up_write(&cinode->lock_sem);
1337 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001338}
1339
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001340static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001341cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001342 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001344 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001345 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001346 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001347 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001349 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001350 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001353 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001354 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001355 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001357 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1358 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001359 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001361 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001363 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001364 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001365 *lock = 1;
1366 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001367 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001368 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001369 *unlock = 1;
1370 /* Check if unlock includes more than one lock range */
1371 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001372 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001373 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001374 *lock = 1;
1375 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001376 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001377 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001378 *lock = 1;
1379 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001380 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001381 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001382 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001384 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001385}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001388cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001389 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001390{
1391 int rc = 0;
1392 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001393 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1394 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001395 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001396 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001398 if (posix_lck) {
1399 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001400
1401 rc = cifs_posix_lock_test(file, flock);
1402 if (!rc)
1403 return rc;
1404
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001405 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001406 posix_lock_type = CIFS_RDLCK;
1407 else
1408 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001409 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1410 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001411 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001412 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 return rc;
1414 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001415
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001416 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001417 if (!rc)
1418 return rc;
1419
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001420 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001421 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1422 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001423 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001424 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1425 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001426 flock->fl_type = F_UNLCK;
1427 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001428 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1429 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001430 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001431 }
1432
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001433 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001434 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001435 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001436 }
1437
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001438 type &= ~server->vals->exclusive_lock_type;
1439
1440 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1441 type | server->vals->shared_lock_type,
1442 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001443 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001444 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1445 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001446 flock->fl_type = F_RDLCK;
1447 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001448 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1449 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001450 } else
1451 flock->fl_type = F_WRLCK;
1452
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001453 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001454}
1455
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001456void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001457cifs_move_llist(struct list_head *source, struct list_head *dest)
1458{
1459 struct list_head *li, *tmp;
1460 list_for_each_safe(li, tmp, source)
1461 list_move(li, dest);
1462}
1463
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001464void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001465cifs_free_llist(struct list_head *llist)
1466{
1467 struct cifsLockInfo *li, *tmp;
1468 list_for_each_entry_safe(li, tmp, llist, llist) {
1469 cifs_del_lock_waiters(li);
1470 list_del(&li->llist);
1471 kfree(li);
1472 }
1473}
1474
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001475int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001476cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1477 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001478{
1479 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001480 static const int types[] = {
1481 LOCKING_ANDX_LARGE_FILES,
1482 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1483 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001484 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001485 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001486 LOCKING_ANDX_RANGE *buf, *cur;
1487 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001488 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001489 struct cifsLockInfo *li, *tmp;
1490 __u64 length = 1 + flock->fl_end - flock->fl_start;
1491 struct list_head tmp_llist;
1492
1493 INIT_LIST_HEAD(&tmp_llist);
1494
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001495 /*
1496 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwall2a71a472019-01-08 18:30:57 +00001497 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001498 */
1499 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwall2a71a472019-01-08 18:30:57 +00001500 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001501 return -EINVAL;
1502
Ross Lagerwall63715c12019-01-08 18:30:56 +00001503 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1504 PAGE_SIZE);
1505 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1506 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001507 max_num = (max_buf - sizeof(struct smb_hdr)) /
1508 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001509 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001510 if (!buf)
1511 return -ENOMEM;
1512
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001513 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001514 for (i = 0; i < 2; i++) {
1515 cur = buf;
1516 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001517 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001518 if (flock->fl_start > li->offset ||
1519 (flock->fl_start + length) <
1520 (li->offset + li->length))
1521 continue;
1522 if (current->tgid != li->pid)
1523 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001524 if (types[i] != li->type)
1525 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001526 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001527 /*
1528 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001529 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001530 */
1531 list_del(&li->llist);
1532 cifs_del_lock_waiters(li);
1533 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001534 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001535 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001536 cur->Pid = cpu_to_le16(li->pid);
1537 cur->LengthLow = cpu_to_le32((u32)li->length);
1538 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1539 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1540 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1541 /*
1542 * We need to save a lock here to let us add it again to
1543 * the file's list if the unlock range request fails on
1544 * the server.
1545 */
1546 list_move(&li->llist, &tmp_llist);
1547 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001548 stored_rc = cifs_lockv(xid, tcon,
1549 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001550 li->type, num, 0, buf);
1551 if (stored_rc) {
1552 /*
1553 * We failed on the unlock range
1554 * request - add all locks from the tmp
1555 * list to the head of the file's list.
1556 */
1557 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001558 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001559 rc = stored_rc;
1560 } else
1561 /*
1562 * The unlock range request succeed -
1563 * free the tmp list.
1564 */
1565 cifs_free_llist(&tmp_llist);
1566 cur = buf;
1567 num = 0;
1568 } else
1569 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001570 }
1571 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001572 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001573 types[i], num, 0, buf);
1574 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001575 cifs_move_llist(&tmp_llist,
1576 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001577 rc = stored_rc;
1578 } else
1579 cifs_free_llist(&tmp_llist);
1580 }
1581 }
1582
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001583 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001584 kfree(buf);
1585 return rc;
1586}
1587
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001588static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001589cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001590 bool wait_flag, bool posix_lck, int lock, int unlock,
1591 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001592{
1593 int rc = 0;
1594 __u64 length = 1 + flock->fl_end - flock->fl_start;
1595 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1596 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001597 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001598 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001599
1600 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001601 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001602
1603 rc = cifs_posix_lock_set(file, flock);
1604 if (!rc || rc < 0)
1605 return rc;
1606
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001607 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001608 posix_lock_type = CIFS_RDLCK;
1609 else
1610 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001611
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001612 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001613 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001614
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001615 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001616 hash_lockowner(flock->fl_owner),
1617 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001618 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001619 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001620 }
1621
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001622 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001623 struct cifsLockInfo *lock;
1624
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001625 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001626 if (!lock)
1627 return -ENOMEM;
1628
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001629 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001630 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001631 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001632 return rc;
1633 }
1634 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001635 goto out;
1636
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001637 /*
1638 * Windows 7 server can delay breaking lease from read to None
1639 * if we set a byte-range lock on a file - break it explicitly
1640 * before sending the lock to the server to be sure the next
1641 * read won't conflict with non-overlapted locks due to
1642 * pagereading.
1643 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001644 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1645 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001646 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001647 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1648 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001649 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001650 }
1651
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001652 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1653 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001654 if (rc) {
1655 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001656 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001657 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001658
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001659 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001660 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001661 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001662
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001663out:
Aurelien Aptel29386512019-03-14 18:44:16 +01001664 if (flock->fl_flags & FL_POSIX) {
1665 /*
1666 * If this is a request to remove all locks because we
1667 * are closing the file, it doesn't matter if the
1668 * unlocking failed as both cifs.ko and the SMB server
1669 * remove the lock on file close
1670 */
1671 if (rc) {
1672 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1673 if (!(flock->fl_flags & FL_CLOSE))
1674 return rc;
1675 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001676 rc = locks_lock_file_wait(file, flock);
Aurelien Aptel29386512019-03-14 18:44:16 +01001677 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001678 return rc;
1679}
1680
1681int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1682{
1683 int rc, xid;
1684 int lock = 0, unlock = 0;
1685 bool wait_flag = false;
1686 bool posix_lck = false;
1687 struct cifs_sb_info *cifs_sb;
1688 struct cifs_tcon *tcon;
1689 struct cifsInodeInfo *cinode;
1690 struct cifsFileInfo *cfile;
1691 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001692 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001693
1694 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001695 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001696
Joe Perchesf96637b2013-05-04 22:12:25 -05001697 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1698 cmd, flock->fl_flags, flock->fl_type,
1699 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001700
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001701 cfile = (struct cifsFileInfo *)file->private_data;
1702 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001703
1704 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1705 tcon->ses->server);
1706
Al Viro7119e222014-10-22 00:25:12 -04001707 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001708 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001709 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001710
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001711 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001712 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1713 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1714 posix_lck = true;
1715 /*
1716 * BB add code here to normalize offset and length to account for
1717 * negative length which we can not accept over the wire.
1718 */
1719 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001720 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001721 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001722 return rc;
1723 }
1724
1725 if (!lock && !unlock) {
1726 /*
1727 * if no lock or unlock then nothing to do since we do not
1728 * know what it is
1729 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001730 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001731 return -EOPNOTSUPP;
1732 }
1733
1734 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1735 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001736 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 return rc;
1738}
1739
Jeff Layton597b0272012-03-23 14:40:56 -04001740/*
1741 * update the file size (if needed) after a write. Should be called with
1742 * the inode->i_lock held
1743 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001744void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001745cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1746 unsigned int bytes_written)
1747{
1748 loff_t end_of_write = offset + bytes_written;
1749
1750 if (end_of_write > cifsi->server_eof)
1751 cifsi->server_eof = end_of_write;
1752}
1753
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001754static ssize_t
1755cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1756 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757{
1758 int rc = 0;
1759 unsigned int bytes_written = 0;
1760 unsigned int total_written;
1761 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001762 struct cifs_tcon *tcon;
1763 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001764 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001765 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001766 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001767 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Jeff Layton7da4b492010-10-15 15:34:00 -04001769 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
Al Viro35c265e2014-08-19 20:25:34 -04001771 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1772 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001774 tcon = tlink_tcon(open_file->tlink);
1775 server = tcon->ses->server;
1776
1777 if (!server->ops->sync_write)
1778 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001779
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001780 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 for (total_written = 0; write_size > total_written;
1783 total_written += bytes_written) {
1784 rc = -EAGAIN;
1785 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001786 struct kvec iov[2];
1787 unsigned int len;
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 /* we could deadlock if we called
1791 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001792 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001794 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 if (rc != 0)
1796 break;
1797 }
Steve French3e844692005-10-03 13:37:24 -07001798
David Howells2b0143b2015-03-17 22:25:59 +00001799 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001800 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001801 /* iov[0] is reserved for smb header */
1802 iov[1].iov_base = (char *)write_data + total_written;
1803 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001804 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001805 io_parms.tcon = tcon;
1806 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001807 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001808 rc = server->ops->sync_write(xid, &open_file->fid,
1809 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811 if (rc || (bytes_written == 0)) {
1812 if (total_written)
1813 break;
1814 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001815 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 return rc;
1817 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001818 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001819 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001820 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001821 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001822 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 }
1825
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001826 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
Jeff Layton7da4b492010-10-15 15:34:00 -04001828 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001829 spin_lock(&d_inode(dentry)->i_lock);
1830 if (*offset > d_inode(dentry)->i_size)
1831 i_size_write(d_inode(dentry), *offset);
1832 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 }
David Howells2b0143b2015-03-17 22:25:59 +00001834 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001835 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 return total_written;
1837}
1838
Jeff Layton6508d902010-09-29 19:51:11 -04001839struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1840 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001841{
1842 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001843 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1844
1845 /* only filter by fsuid on multiuser mounts */
1846 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1847 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001848
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001849 spin_lock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001850 /* we could simply get the first_list_entry since write-only entries
1851 are always at the end of the list but since the first entry might
1852 have a close pending, we go through the whole list */
1853 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001854 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001855 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001856 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001857 if (!open_file->invalidHandle) {
1858 /* found a good file */
1859 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001860 cifsFileInfo_get(open_file);
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001861 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001862 return open_file;
1863 } /* else might as well continue, and look for
1864 another, or simply have the caller reopen it
1865 again rather than trying to fix this handle */
1866 } else /* write only file */
1867 break; /* write only files are last so must be done */
1868 }
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001869 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001870 return NULL;
1871}
Steve French630f3f0c2007-10-25 21:17:17 +00001872
Jeff Layton6508d902010-09-29 19:51:11 -04001873struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1874 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001875{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001876 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001877 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001878 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001879 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001880 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001881
Steve French60808232006-04-22 15:53:05 +00001882 /* Having a null inode here (because mapping->host was set to zero by
1883 the VFS or MM) should not happen but we had reports of on oops (due to
1884 it being zero) during stress testcases so we need to check for it */
1885
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001886 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001887 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001888 dump_stack();
1889 return NULL;
1890 }
1891
Jeff Laytond3892292010-11-02 16:22:50 -04001892 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1893
Jeff Layton6508d902010-09-29 19:51:11 -04001894 /* only filter by fsuid on multiuser mounts */
1895 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1896 fsuid_only = false;
1897
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001898 spin_lock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001899refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001900 if (refind > MAX_REOPEN_ATT) {
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001901 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001902 return NULL;
1903 }
Steve French6148a742005-10-05 12:23:19 -07001904 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001905 if (!any_available && open_file->pid != current->tgid)
1906 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001907 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001908 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001909 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001910 if (!open_file->invalidHandle) {
1911 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001912 cifsFileInfo_get(open_file);
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001913 spin_unlock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001914 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001915 } else {
1916 if (!inv_file)
1917 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001918 }
Steve French6148a742005-10-05 12:23:19 -07001919 }
1920 }
Jeff Layton2846d382008-09-22 21:33:33 -04001921 /* couldn't find useable FH with same pid, try any available */
1922 if (!any_available) {
1923 any_available = true;
1924 goto refind_writable;
1925 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001926
1927 if (inv_file) {
1928 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001929 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001930 }
1931
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001932 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001933
1934 if (inv_file) {
1935 rc = cifs_reopen_file(inv_file, false);
1936 if (!rc)
1937 return inv_file;
1938 else {
Ronnie Sahlbergacc07942019-06-05 10:38:38 +10001939 spin_lock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001940 list_move_tail(&inv_file->flist,
1941 &cifs_inode->openFileList);
Ronnie Sahlbergacc07942019-06-05 10:38:38 +10001942 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001943 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001944 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001945 inv_file = NULL;
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001946 spin_lock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001947 goto refind_writable;
1948 }
1949 }
1950
Steve French6148a742005-10-05 12:23:19 -07001951 return NULL;
1952}
1953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1955{
1956 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001957 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 char *write_data;
1959 int rc = -EFAULT;
1960 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001962 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 if (!mapping || !mapping->host)
1965 return -EFAULT;
1966
1967 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
1969 offset += (loff_t)from;
1970 write_data = kmap(page);
1971 write_data += from;
1972
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001973 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 kunmap(page);
1975 return -EIO;
1976 }
1977
1978 /* racing with truncate? */
1979 if (offset > mapping->host->i_size) {
1980 kunmap(page);
1981 return 0; /* don't care */
1982 }
1983
1984 /* check to make sure that we are not extending the file */
1985 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001986 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Jeff Layton6508d902010-09-29 19:51:11 -04001988 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001989 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001990 bytes_written = cifs_write(open_file, open_file->pid,
1991 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001992 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001994 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001995 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001996 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001997 else if (bytes_written < 0)
1998 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001999 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05002000 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 rc = -EIO;
2002 }
2003
2004 kunmap(page);
2005 return rc;
2006}
2007
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002008static struct cifs_writedata *
2009wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2010 pgoff_t end, pgoff_t *index,
2011 unsigned int *found_pages)
2012{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002013 struct cifs_writedata *wdata;
2014
2015 wdata = cifs_writedata_alloc((unsigned int)tofind,
2016 cifs_writev_complete);
2017 if (!wdata)
2018 return NULL;
2019
Jan Kara9c19a9c2017-11-15 17:35:26 -08002020 *found_pages = find_get_pages_range_tag(mapping, index, end,
2021 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002022 return wdata;
2023}
2024
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002025static unsigned int
2026wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2027 struct address_space *mapping,
2028 struct writeback_control *wbc,
2029 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2030{
2031 unsigned int nr_pages = 0, i;
2032 struct page *page;
2033
2034 for (i = 0; i < found_pages; i++) {
2035 page = wdata->pages[i];
2036 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002037 * At this point we hold neither the i_pages lock nor the
2038 * page lock: the page may be truncated or invalidated
2039 * (changing page->mapping to NULL), or even swizzled
2040 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002041 */
2042
2043 if (nr_pages == 0)
2044 lock_page(page);
2045 else if (!trylock_page(page))
2046 break;
2047
2048 if (unlikely(page->mapping != mapping)) {
2049 unlock_page(page);
2050 break;
2051 }
2052
2053 if (!wbc->range_cyclic && page->index > end) {
2054 *done = true;
2055 unlock_page(page);
2056 break;
2057 }
2058
2059 if (*next && (page->index != *next)) {
2060 /* Not next consecutive page */
2061 unlock_page(page);
2062 break;
2063 }
2064
2065 if (wbc->sync_mode != WB_SYNC_NONE)
2066 wait_on_page_writeback(page);
2067
2068 if (PageWriteback(page) ||
2069 !clear_page_dirty_for_io(page)) {
2070 unlock_page(page);
2071 break;
2072 }
2073
2074 /*
2075 * This actually clears the dirty bit in the radix tree.
2076 * See cifs_writepage() for more commentary.
2077 */
2078 set_page_writeback(page);
2079 if (page_offset(page) >= i_size_read(mapping->host)) {
2080 *done = true;
2081 unlock_page(page);
2082 end_page_writeback(page);
2083 break;
2084 }
2085
2086 wdata->pages[i] = page;
2087 *next = page->index + 1;
2088 ++nr_pages;
2089 }
2090
2091 /* reset index to refind any pages skipped */
2092 if (nr_pages == 0)
2093 *index = wdata->pages[0]->index + 1;
2094
2095 /* put any pages we aren't going to use */
2096 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002097 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002098 wdata->pages[i] = NULL;
2099 }
2100
2101 return nr_pages;
2102}
2103
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002104static int
2105wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2106 struct address_space *mapping, struct writeback_control *wbc)
2107{
2108 int rc = 0;
2109 struct TCP_Server_Info *server;
2110 unsigned int i;
2111
2112 wdata->sync_mode = wbc->sync_mode;
2113 wdata->nr_pages = nr_pages;
2114 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002115 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002116 wdata->tailsz = min(i_size_read(mapping->host) -
2117 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002118 (loff_t)PAGE_SIZE);
2119 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002120
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002121 if (wdata->cfile != NULL)
2122 cifsFileInfo_put(wdata->cfile);
2123 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2124 if (!wdata->cfile) {
2125 cifs_dbg(VFS, "No writable handles for inode\n");
2126 rc = -EBADF;
2127 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002128 wdata->pid = wdata->cfile->pid;
2129 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2130 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002131 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002132
2133 for (i = 0; i < nr_pages; ++i)
2134 unlock_page(wdata->pages[i]);
2135
2136 return rc;
2137}
2138
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002140 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002142 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002143 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002144 bool done = false, scanned = false, range_whole = false;
2145 pgoff_t end, index;
2146 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002147 int rc = 0;
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002148 int saved_rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002149
Steve French37c0eb42005-10-05 14:50:29 -07002150 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002151 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002152 * one page at a time via cifs_writepage
2153 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002154 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002155 return generic_writepages(mapping, wbc);
2156
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002157 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002158 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002159 end = -1;
2160 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002161 index = wbc->range_start >> PAGE_SHIFT;
2162 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002163 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002164 range_whole = true;
2165 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002166 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002167 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002168retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002169 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002170 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002171 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002172
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002173 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2174 &wsize, &credits);
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002175 if (rc != 0) {
2176 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002177 break;
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002178 }
Steve French37c0eb42005-10-05 14:50:29 -07002179
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002180 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002181
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002182 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2183 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002184 if (!wdata) {
2185 rc = -ENOMEM;
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002186 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002187 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002188 break;
2189 }
2190
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002191 if (found_pages == 0) {
2192 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002193 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002194 break;
2195 }
2196
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002197 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2198 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002199
2200 /* nothing to write? */
2201 if (nr_pages == 0) {
2202 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002203 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002204 continue;
2205 }
2206
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002207 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002208
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002209 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002210
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002211 /* send failure -- clean up the mess */
2212 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002213 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002214 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002215 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002216 redirty_page_for_writepage(wbc,
2217 wdata->pages[i]);
2218 else
2219 SetPageError(wdata->pages[i]);
2220 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002221 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002222 }
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002223 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002224 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002225 }
2226 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002227
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002228 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2229 index = saved_index;
2230 continue;
2231 }
2232
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002233 /* Return immediately if we received a signal during writing */
2234 if (is_interrupt_error(rc)) {
2235 done = true;
2236 break;
2237 }
2238
2239 if (rc != 0 && saved_rc == 0)
2240 saved_rc = rc;
2241
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002242 wbc->nr_to_write -= nr_pages;
2243 if (wbc->nr_to_write <= 0)
2244 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002245
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002246 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002247 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002248
Steve French37c0eb42005-10-05 14:50:29 -07002249 if (!scanned && !done) {
2250 /*
2251 * We hit the last page and there is more work to be done: wrap
2252 * back to the start of the file
2253 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002254 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002255 index = 0;
2256 goto retry;
2257 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002258
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002259 if (saved_rc != 0)
2260 rc = saved_rc;
2261
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002262 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002263 mapping->writeback_index = index;
2264
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 return rc;
2266}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002268static int
2269cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002271 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002272 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002274 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002276 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002277 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002278 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002279
2280 /*
2281 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2282 *
2283 * A writepage() implementation always needs to do either this,
2284 * or re-dirty the page with "redirty_page_for_writepage()" in
2285 * the case of a failure.
2286 *
2287 * Just unlocking the page will cause the radix tree tag-bits
2288 * to fail to update with the state of the page correctly.
2289 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002290 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002291retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002292 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002293 if (is_retryable_error(rc)) {
2294 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002295 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002296 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002297 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002298 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002299 mapping_set_error(page->mapping, rc);
2300 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002301 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002302 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002303 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002304 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002305 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 return rc;
2307}
2308
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002309static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2310{
2311 int rc = cifs_writepage_locked(page, wbc);
2312 unlock_page(page);
2313 return rc;
2314}
2315
Nick Piggind9414772008-09-24 11:32:59 -04002316static int cifs_write_end(struct file *file, struct address_space *mapping,
2317 loff_t pos, unsigned len, unsigned copied,
2318 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319{
Nick Piggind9414772008-09-24 11:32:59 -04002320 int rc;
2321 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002322 struct cifsFileInfo *cfile = file->private_data;
2323 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2324 __u32 pid;
2325
2326 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2327 pid = cfile->pid;
2328 else
2329 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Joe Perchesf96637b2013-05-04 22:12:25 -05002331 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002332 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002333
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002334 if (PageChecked(page)) {
2335 if (copied == len)
2336 SetPageUptodate(page);
2337 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002338 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002339 SetPageUptodate(page);
2340
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002342 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002343 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002344 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002345
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002346 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 /* this is probably better than directly calling
2348 partialpage_write since in this function the file handle is
2349 known which we might as well leverage */
2350 /* BB check if anything else missing out of ppw
2351 such as updating last write time */
2352 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002353 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002354 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002356
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002357 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002358 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002359 rc = copied;
2360 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002361 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 }
2363
Nick Piggind9414772008-09-24 11:32:59 -04002364 if (rc > 0) {
2365 spin_lock(&inode->i_lock);
2366 if (pos > inode->i_size)
2367 i_size_write(inode, pos);
2368 spin_unlock(&inode->i_lock);
2369 }
2370
2371 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002372 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 return rc;
2375}
2376
Josef Bacik02c24a82011-07-16 20:44:56 -04002377int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2378 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002380 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002382 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002383 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002384 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002385 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002386 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002388 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002389 if (rc)
2390 return rc;
Al Viro59551022016-01-22 15:40:57 -05002391 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002392
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002393 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
Al Viro35c265e2014-08-19 20:25:34 -04002395 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2396 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002397
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002398 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002399 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002400 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002401 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002402 rc = 0; /* don't care about it in fsync */
2403 }
2404 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002405
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002406 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002407 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2408 server = tcon->ses->server;
2409 if (server->ops->flush)
2410 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2411 else
2412 rc = -ENOSYS;
2413 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002414
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002415 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002416 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002417 return rc;
2418}
2419
Josef Bacik02c24a82011-07-16 20:44:56 -04002420int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002421{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002422 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002423 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002424 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002425 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002426 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002427 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002428 struct inode *inode = file->f_mapping->host;
2429
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002430 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002431 if (rc)
2432 return rc;
Al Viro59551022016-01-22 15:40:57 -05002433 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002434
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002435 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002436
Al Viro35c265e2014-08-19 20:25:34 -04002437 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2438 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002439
2440 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002441 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2442 server = tcon->ses->server;
2443 if (server->ops->flush)
2444 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2445 else
2446 rc = -ENOSYS;
2447 }
Steve Frenchb298f222009-02-21 21:17:43 +00002448
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002449 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002450 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 return rc;
2452}
2453
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454/*
2455 * As file closes, flush all cached write data for this inode checking
2456 * for write behind errors.
2457 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002458int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459{
Al Viro496ad9a2013-01-23 17:07:38 -05002460 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 int rc = 0;
2462
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002463 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002464 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002465
Joe Perchesf96637b2013-05-04 22:12:25 -05002466 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
2468 return rc;
2469}
2470
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002471static int
2472cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2473{
2474 int rc = 0;
2475 unsigned long i;
2476
2477 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002478 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002479 if (!pages[i]) {
2480 /*
2481 * save number of pages we have already allocated and
2482 * return with ENOMEM error
2483 */
2484 num_pages = i;
2485 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002486 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002487 }
2488 }
2489
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002490 if (rc) {
2491 for (i = 0; i < num_pages; i++)
2492 put_page(pages[i]);
2493 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002494 return rc;
2495}
2496
2497static inline
2498size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2499{
2500 size_t num_pages;
2501 size_t clen;
2502
2503 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002504 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002505
2506 if (cur_len)
2507 *cur_len = clen;
2508
2509 return num_pages;
2510}
2511
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002512static void
Steve French4a5c80d2014-02-07 20:45:12 -06002513cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002514{
2515 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002516 struct cifs_writedata *wdata = container_of(refcount,
2517 struct cifs_writedata, refcount);
2518
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002519 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002520 for (i = 0; i < wdata->nr_pages; i++)
2521 put_page(wdata->pages[i]);
2522 cifs_writedata_release(refcount);
2523}
2524
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002525static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2526
Steve French4a5c80d2014-02-07 20:45:12 -06002527static void
2528cifs_uncached_writev_complete(struct work_struct *work)
2529{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002530 struct cifs_writedata *wdata = container_of(work,
2531 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002532 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002533 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2534
2535 spin_lock(&inode->i_lock);
2536 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2537 if (cifsi->server_eof > inode->i_size)
2538 i_size_write(inode, cifsi->server_eof);
2539 spin_unlock(&inode->i_lock);
2540
2541 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002542 collect_uncached_write_data(wdata->ctx);
2543 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002544 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002545}
2546
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002547static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002548wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2549 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002550{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002551 size_t save_len, copied, bytes, cur_len = *len;
2552 unsigned long i, nr_pages = *num_pages;
2553
2554 save_len = cur_len;
2555 for (i = 0; i < nr_pages; i++) {
2556 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2557 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2558 cur_len -= copied;
2559 /*
2560 * If we didn't copy as much as we expected, then that
2561 * may mean we trod into an unmapped area. Stop copying
2562 * at that point. On the next pass through the big
2563 * loop, we'll likely end up getting a zero-length
2564 * write and bailing out of it.
2565 */
2566 if (copied < bytes)
2567 break;
2568 }
2569 cur_len = save_len - cur_len;
2570 *len = cur_len;
2571
2572 /*
2573 * If we have no data to send, then that probably means that
2574 * the copy above failed altogether. That's most likely because
2575 * the address in the iovec was bogus. Return -EFAULT and let
2576 * the caller free anything we allocated and bail out.
2577 */
2578 if (!cur_len)
2579 return -EFAULT;
2580
2581 /*
2582 * i + 1 now represents the number of pages we actually used in
2583 * the copy phase above.
2584 */
2585 *num_pages = i + 1;
2586 return 0;
2587}
2588
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002589static int
2590cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2591 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002592 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2593 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002594{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002595 int rc = 0;
2596 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002597 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002598 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002599 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002600 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002601 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002602 struct TCP_Server_Info *server;
2603
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002604 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2605 pid = open_file->pid;
2606 else
2607 pid = current->tgid;
2608
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002609 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002610
2611 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002612 unsigned int wsize, credits;
2613
2614 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2615 &wsize, &credits);
2616 if (rc)
2617 break;
2618
2619 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002620 wdata = cifs_writedata_alloc(nr_pages,
2621 cifs_uncached_writev_complete);
2622 if (!wdata) {
2623 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002624 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002625 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002626 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002627
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002628 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2629 if (rc) {
2630 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002631 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002632 break;
2633 }
2634
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002635 num_pages = nr_pages;
2636 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2637 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002638 for (i = 0; i < nr_pages; i++)
2639 put_page(wdata->pages[i]);
2640 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002641 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002642 break;
2643 }
2644
2645 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002646 * Bring nr_pages down to the number of pages we actually used,
2647 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002648 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002649 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002650 put_page(wdata->pages[nr_pages - 1]);
2651
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002652 wdata->sync_mode = WB_SYNC_ALL;
2653 wdata->nr_pages = nr_pages;
2654 wdata->offset = (__u64)offset;
2655 wdata->cfile = cifsFileInfo_get(open_file);
2656 wdata->pid = pid;
2657 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002658 wdata->pagesz = PAGE_SIZE;
2659 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002660 wdata->credits = credits;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002661 wdata->ctx = ctx;
2662 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002663
2664 if (!wdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01002665 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002666 rc = server->ops->async_writev(wdata,
2667 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002668 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002669 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002670 kref_put(&wdata->refcount,
2671 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002672 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002673 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002674 iov_iter_advance(from, offset - saved_offset);
2675 continue;
2676 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002677 break;
2678 }
2679
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002680 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002681 offset += cur_len;
2682 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002683 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002684
2685 return rc;
2686}
2687
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002688static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2689{
2690 struct cifs_writedata *wdata, *tmp;
2691 struct cifs_tcon *tcon;
2692 struct cifs_sb_info *cifs_sb;
2693 struct dentry *dentry = ctx->cfile->dentry;
2694 unsigned int i;
2695 int rc;
2696
2697 tcon = tlink_tcon(ctx->cfile->tlink);
2698 cifs_sb = CIFS_SB(dentry->d_sb);
2699
2700 mutex_lock(&ctx->aio_mutex);
2701
2702 if (list_empty(&ctx->list)) {
2703 mutex_unlock(&ctx->aio_mutex);
2704 return;
2705 }
2706
2707 rc = ctx->rc;
2708 /*
2709 * Wait for and collect replies for any successful sends in order of
2710 * increasing offset. Once an error is hit, then return without waiting
2711 * for any more replies.
2712 */
2713restart_loop:
2714 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2715 if (!rc) {
2716 if (!try_wait_for_completion(&wdata->done)) {
2717 mutex_unlock(&ctx->aio_mutex);
2718 return;
2719 }
2720
2721 if (wdata->result)
2722 rc = wdata->result;
2723 else
2724 ctx->total_len += wdata->bytes;
2725
2726 /* resend call if it's a retryable error */
2727 if (rc == -EAGAIN) {
2728 struct list_head tmp_list;
2729 struct iov_iter tmp_from = ctx->iter;
2730
2731 INIT_LIST_HEAD(&tmp_list);
2732 list_del_init(&wdata->list);
2733
2734 iov_iter_advance(&tmp_from,
2735 wdata->offset - ctx->pos);
2736
2737 rc = cifs_write_from_iter(wdata->offset,
2738 wdata->bytes, &tmp_from,
2739 ctx->cfile, cifs_sb, &tmp_list,
2740 ctx);
2741
2742 list_splice(&tmp_list, &ctx->list);
2743
2744 kref_put(&wdata->refcount,
2745 cifs_uncached_writedata_release);
2746 goto restart_loop;
2747 }
2748 }
2749 list_del_init(&wdata->list);
2750 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2751 }
2752
2753 for (i = 0; i < ctx->npages; i++)
2754 put_page(ctx->bv[i].bv_page);
2755
2756 cifs_stats_bytes_written(tcon, ctx->total_len);
2757 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2758
2759 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2760
2761 mutex_unlock(&ctx->aio_mutex);
2762
2763 if (ctx->iocb && ctx->iocb->ki_complete)
2764 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2765 else
2766 complete(&ctx->done);
2767}
2768
Al Viroe9d15932015-04-06 22:44:11 -04002769ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002770{
Al Viroe9d15932015-04-06 22:44:11 -04002771 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002772 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002773 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002774 struct cifs_tcon *tcon;
2775 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002776 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04002777 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002778 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002779
Al Viroe9d15932015-04-06 22:44:11 -04002780 /*
2781 * BB - optimize the way when signing is disabled. We can drop this
2782 * extra memory-to-memory copying and use iovec buffers for constructing
2783 * write request.
2784 */
2785
Al Viro3309dd02015-04-09 12:55:47 -04002786 rc = generic_write_checks(iocb, from);
2787 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002788 return rc;
2789
Al Viro7119e222014-10-22 00:25:12 -04002790 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002791 cfile = file->private_data;
2792 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002793
2794 if (!tcon->ses->server->ops->async_writev)
2795 return -ENOSYS;
2796
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002797 ctx = cifs_aio_ctx_alloc();
2798 if (!ctx)
2799 return -ENOMEM;
2800
2801 ctx->cfile = cifsFileInfo_get(cfile);
2802
2803 if (!is_sync_kiocb(iocb))
2804 ctx->iocb = iocb;
2805
2806 ctx->pos = iocb->ki_pos;
2807
2808 rc = setup_aio_ctx_iter(ctx, from, WRITE);
2809 if (rc) {
2810 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2811 return rc;
2812 }
2813
2814 /* grab a lock here due to read response handlers can access ctx */
2815 mutex_lock(&ctx->aio_mutex);
2816
2817 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
2818 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002819
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002820 /*
2821 * If at least one write was successfully sent, then discard any rc
2822 * value from the later writes. If the other write succeeds, then
2823 * we'll end up returning whatever was written. If it fails, then
2824 * we'll get a new rc value from that.
2825 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002826 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002827 rc = 0;
2828
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002829 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002830
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002831 if (rc) {
2832 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2833 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002834 }
2835
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002836 if (!is_sync_kiocb(iocb)) {
2837 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2838 return -EIOCBQUEUED;
2839 }
2840
2841 rc = wait_for_completion_killable(&ctx->done);
2842 if (rc) {
2843 mutex_lock(&ctx->aio_mutex);
2844 ctx->rc = rc = -EINTR;
2845 total_written = ctx->total_len;
2846 mutex_unlock(&ctx->aio_mutex);
2847 } else {
2848 rc = ctx->rc;
2849 total_written = ctx->total_len;
2850 }
2851
2852 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2853
Al Viroe9d15932015-04-06 22:44:11 -04002854 if (unlikely(!total_written))
2855 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002856
Al Viroe9d15932015-04-06 22:44:11 -04002857 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04002858 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002859}
2860
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002861static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002862cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002863{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002864 struct file *file = iocb->ki_filp;
2865 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2866 struct inode *inode = file->f_mapping->host;
2867 struct cifsInodeInfo *cinode = CIFS_I(inode);
2868 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002869 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002870
Rabin Vincent966681c2017-06-29 16:01:42 +02002871 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002872 /*
2873 * We need to hold the sem to be sure nobody modifies lock list
2874 * with a brlock that prevents writing.
2875 */
2876 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04002877
Al Viro3309dd02015-04-09 12:55:47 -04002878 rc = generic_write_checks(iocb, from);
2879 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002880 goto out;
2881
Al Viro5f380c72015-04-07 11:28:12 -04002882 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002883 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002884 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002885 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002886 else
2887 rc = -EACCES;
2888out:
Rabin Vincent966681c2017-06-29 16:01:42 +02002889 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002890 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002891
Christoph Hellwige2592212016-04-07 08:52:01 -07002892 if (rc > 0)
2893 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002894 return rc;
2895}
2896
2897ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002898cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002899{
Al Viro496ad9a2013-01-23 17:07:38 -05002900 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002901 struct cifsInodeInfo *cinode = CIFS_I(inode);
2902 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2903 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2904 iocb->ki_filp->private_data;
2905 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002906 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002907
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002908 written = cifs_get_writer(cinode);
2909 if (written)
2910 return written;
2911
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002912 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002913 if (cap_unix(tcon->ses) &&
2914 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002915 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002916 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002917 goto out;
2918 }
Al Viro3dae8752014-04-03 12:05:17 -04002919 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002920 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002921 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002922 /*
2923 * For non-oplocked files in strict cache mode we need to write the data
2924 * to the server exactly from the pos to pos+len-1 rather than flush all
2925 * affected pages because it may cause a error with mandatory locks on
2926 * these pages but not on the region from pos to ppos+len-1.
2927 */
Al Viro3dae8752014-04-03 12:05:17 -04002928 written = cifs_user_writev(iocb, from);
Pavel Shilovsky43eaa6c2019-03-04 17:48:01 -08002929 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002930 /*
Pavel Shilovsky43eaa6c2019-03-04 17:48:01 -08002931 * We have read level caching and we have just sent a write
2932 * request to the server thus making data in the cache stale.
2933 * Zap the cache and set oplock/lease level to NONE to avoid
2934 * reading stale data from the cache. All subsequent read
2935 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002936 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002937 cifs_zap_mapping(inode);
Pavel Shilovsky43eaa6c2019-03-04 17:48:01 -08002938 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05002939 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002940 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002941 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002942out:
2943 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002944 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002945}
2946
Jeff Layton0471ca32012-05-16 07:13:16 -04002947static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07002948cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002949{
2950 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002951
Long Lif9f5aca2018-05-30 12:47:54 -07002952 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002953 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07002954 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04002955 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002956 INIT_LIST_HEAD(&rdata->list);
2957 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002958 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002959 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002960
Jeff Layton0471ca32012-05-16 07:13:16 -04002961 return rdata;
2962}
2963
Long Lif9f5aca2018-05-30 12:47:54 -07002964static struct cifs_readdata *
2965cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2966{
2967 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07002968 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07002969 struct cifs_readdata *ret = NULL;
2970
2971 if (pages) {
2972 ret = cifs_readdata_direct_alloc(pages, complete);
2973 if (!ret)
2974 kfree(pages);
2975 }
2976
2977 return ret;
2978}
2979
Jeff Layton6993f742012-05-16 07:13:17 -04002980void
2981cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002982{
Jeff Layton6993f742012-05-16 07:13:17 -04002983 struct cifs_readdata *rdata = container_of(refcount,
2984 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07002985#ifdef CONFIG_CIFS_SMB_DIRECT
2986 if (rdata->mr) {
2987 smbd_deregister_mr(rdata->mr);
2988 rdata->mr = NULL;
2989 }
2990#endif
Jeff Layton6993f742012-05-16 07:13:17 -04002991 if (rdata->cfile)
2992 cifsFileInfo_put(rdata->cfile);
2993
Long Lif9f5aca2018-05-30 12:47:54 -07002994 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04002995 kfree(rdata);
2996}
2997
Jeff Layton2a1bb132012-05-16 07:13:17 -04002998static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002999cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003000{
3001 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003002 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003003 unsigned int i;
3004
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003005 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003006 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3007 if (!page) {
3008 rc = -ENOMEM;
3009 break;
3010 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003011 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003012 }
3013
3014 if (rc) {
Roberto Bergantinos Corpas297a2512019-05-28 09:38:14 +02003015 unsigned int nr_page_failed = i;
3016
3017 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003018 put_page(rdata->pages[i]);
3019 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003020 }
3021 }
3022 return rc;
3023}
3024
3025static void
3026cifs_uncached_readdata_release(struct kref *refcount)
3027{
Jeff Layton1c892542012-05-16 07:13:17 -04003028 struct cifs_readdata *rdata = container_of(refcount,
3029 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003030 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003031
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003032 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003033 for (i = 0; i < rdata->nr_pages; i++) {
3034 put_page(rdata->pages[i]);
3035 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003036 }
3037 cifs_readdata_release(refcount);
3038}
3039
Jeff Layton1c892542012-05-16 07:13:17 -04003040/**
3041 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3042 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003043 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003044 *
3045 * This function copies data from a list of pages in a readdata response into
3046 * an array of iovecs. It will first calculate where the data should go
3047 * based on the info in the readdata and then copy the data into that spot.
3048 */
Al Viro7f25bba2014-02-04 14:07:43 -05003049static int
3050cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003051{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003052 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003053 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003054
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003055 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003056 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003057 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003058 size_t written;
3059
3060 if (unlikely(iter->type & ITER_PIPE)) {
3061 void *addr = kmap_atomic(page);
3062
3063 written = copy_to_iter(addr, copy, iter);
3064 kunmap_atomic(addr);
3065 } else
3066 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003067 remaining -= written;
3068 if (written < copy && iov_iter_count(iter) > 0)
3069 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003070 }
Al Viro7f25bba2014-02-04 14:07:43 -05003071 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003072}
3073
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003074static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3075
Jeff Layton1c892542012-05-16 07:13:17 -04003076static void
3077cifs_uncached_readv_complete(struct work_struct *work)
3078{
3079 struct cifs_readdata *rdata = container_of(work,
3080 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003081
3082 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003083 collect_uncached_read_data(rdata->ctx);
3084 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003085 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3086}
3087
3088static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003089uncached_fill_pages(struct TCP_Server_Info *server,
3090 struct cifs_readdata *rdata, struct iov_iter *iter,
3091 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003092{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003093 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003094 unsigned int i;
3095 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003096 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003097
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003098 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003099 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003100 for (i = 0; i < nr_pages; i++) {
3101 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003102 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003103 unsigned int segment_size = rdata->pagesz;
3104
3105 if (i == 0)
3106 segment_size -= page_offset;
3107 else
3108 page_offset = 0;
3109
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003110
Al Viro71335662016-01-09 19:54:50 -05003111 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003112 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003113 rdata->pages[i] = NULL;
3114 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003115 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003116 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003117 }
Long Li1dbe3462018-05-30 12:47:55 -07003118
Al Viro71335662016-01-09 19:54:50 -05003119 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003120 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003121 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003122 n = segment_size;
3123 else
Al Viro71335662016-01-09 19:54:50 -05003124 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003125 len -= n;
3126
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003127 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003128 result = copy_page_from_iter(
3129 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003130#ifdef CONFIG_CIFS_SMB_DIRECT
3131 else if (rdata->mr)
3132 result = n;
3133#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003134 else
Long Li1dbe3462018-05-30 12:47:55 -07003135 result = cifs_read_page_from_socket(
3136 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003137 if (result < 0)
3138 break;
3139
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003140 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003141 }
3142
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003143 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3144 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003145}
3146
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003147static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003148cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3149 struct cifs_readdata *rdata, unsigned int len)
3150{
3151 return uncached_fill_pages(server, rdata, NULL, len);
3152}
3153
3154static int
3155cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3156 struct cifs_readdata *rdata,
3157 struct iov_iter *iter)
3158{
3159 return uncached_fill_pages(server, rdata, iter, iter->count);
3160}
3161
3162static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003163cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003164 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3165 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003167 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003168 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003169 size_t cur_len;
3170 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003171 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003172 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003173
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003174 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003175
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003176 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3177 pid = open_file->pid;
3178 else
3179 pid = current->tgid;
3180
Jeff Layton1c892542012-05-16 07:13:17 -04003181 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003182 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3183 &rsize, &credits);
3184 if (rc)
3185 break;
3186
3187 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04003188 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003189
Jeff Layton1c892542012-05-16 07:13:17 -04003190 /* allocate a readdata struct */
3191 rdata = cifs_readdata_alloc(npages,
3192 cifs_uncached_readv_complete);
3193 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003194 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003195 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003196 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003198
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003199 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003200 if (rc)
3201 goto error;
3202
3203 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003204 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003205 rdata->offset = offset;
3206 rdata->bytes = cur_len;
3207 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003208 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07003209 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003210 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003211 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003212 rdata->credits = credits;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003213 rdata->ctx = ctx;
3214 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003215
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003216 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01003217 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003218 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003219error:
3220 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003221 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003222 kref_put(&rdata->refcount,
3223 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003224 if (rc == -EAGAIN)
3225 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003226 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 }
Jeff Layton1c892542012-05-16 07:13:17 -04003228
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003229 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003230 offset += cur_len;
3231 len -= cur_len;
3232 } while (len > 0);
3233
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003234 return rc;
3235}
3236
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003237static void
3238collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003239{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003240 struct cifs_readdata *rdata, *tmp;
3241 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003242 struct cifs_sb_info *cifs_sb;
3243 struct cifs_tcon *tcon;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003244 unsigned int i;
3245 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003246
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003247 tcon = tlink_tcon(ctx->cfile->tlink);
3248 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003249
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003250 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003251
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003252 if (list_empty(&ctx->list)) {
3253 mutex_unlock(&ctx->aio_mutex);
3254 return;
3255 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003256
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003257 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003258 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003259again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003260 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003261 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003262 if (!try_wait_for_completion(&rdata->done)) {
3263 mutex_unlock(&ctx->aio_mutex);
3264 return;
3265 }
3266
3267 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003268 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003269 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003270 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003271
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003272 list_del_init(&rdata->list);
3273 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003274
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003275 /*
3276 * Got a part of data and then reconnect has
3277 * happened -- fill the buffer and continue
3278 * reading.
3279 */
3280 if (got_bytes && got_bytes < rdata->bytes) {
3281 rc = cifs_readdata_to_iov(rdata, to);
3282 if (rc) {
3283 kref_put(&rdata->refcount,
3284 cifs_uncached_readdata_release);
3285 continue;
3286 }
3287 }
3288
3289 rc = cifs_send_async_read(
3290 rdata->offset + got_bytes,
3291 rdata->bytes - got_bytes,
3292 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003293 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003294
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003295 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003296
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003297 kref_put(&rdata->refcount,
3298 cifs_uncached_readdata_release);
3299 goto again;
3300 } else if (rdata->result)
3301 rc = rdata->result;
3302 else
Jeff Layton1c892542012-05-16 07:13:17 -04003303 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003304
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003305 /* if there was a short read -- discard anything left */
3306 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3307 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003308 }
3309 list_del_init(&rdata->list);
3310 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003312
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003313 for (i = 0; i < ctx->npages; i++) {
3314 if (ctx->should_dirty)
3315 set_page_dirty(ctx->bv[i].bv_page);
3316 put_page(ctx->bv[i].bv_page);
3317 }
Al Viro7f25bba2014-02-04 14:07:43 -05003318
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003319 ctx->total_len = ctx->len - iov_iter_count(to);
3320
3321 cifs_stats_bytes_read(tcon, ctx->total_len);
Jeff Layton1c892542012-05-16 07:13:17 -04003322
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003323 /* mask nodata case */
3324 if (rc == -ENODATA)
3325 rc = 0;
3326
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003327 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3328
3329 mutex_unlock(&ctx->aio_mutex);
3330
3331 if (ctx->iocb && ctx->iocb->ki_complete)
3332 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3333 else
3334 complete(&ctx->done);
3335}
3336
3337ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3338{
3339 struct file *file = iocb->ki_filp;
3340 ssize_t rc;
3341 size_t len;
3342 ssize_t total_read = 0;
3343 loff_t offset = iocb->ki_pos;
3344 struct cifs_sb_info *cifs_sb;
3345 struct cifs_tcon *tcon;
3346 struct cifsFileInfo *cfile;
3347 struct cifs_aio_ctx *ctx;
3348
3349 len = iov_iter_count(to);
3350 if (!len)
3351 return 0;
3352
3353 cifs_sb = CIFS_FILE_SB(file);
3354 cfile = file->private_data;
3355 tcon = tlink_tcon(cfile->tlink);
3356
3357 if (!tcon->ses->server->ops->async_readv)
3358 return -ENOSYS;
3359
3360 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3361 cifs_dbg(FYI, "attempting read on write only file instance\n");
3362
3363 ctx = cifs_aio_ctx_alloc();
3364 if (!ctx)
3365 return -ENOMEM;
3366
3367 ctx->cfile = cifsFileInfo_get(cfile);
3368
3369 if (!is_sync_kiocb(iocb))
3370 ctx->iocb = iocb;
3371
Dan Carpenter8a7b0d82017-05-05 08:30:40 +03003372 if (to->type == ITER_IOVEC)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003373 ctx->should_dirty = true;
3374
3375 rc = setup_aio_ctx_iter(ctx, to, READ);
3376 if (rc) {
3377 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3378 return rc;
3379 }
3380
3381 len = ctx->len;
3382
3383 /* grab a lock here due to read response handlers can access ctx */
3384 mutex_lock(&ctx->aio_mutex);
3385
3386 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3387
3388 /* if at least one read request send succeeded, then reset rc */
3389 if (!list_empty(&ctx->list))
3390 rc = 0;
3391
3392 mutex_unlock(&ctx->aio_mutex);
3393
3394 if (rc) {
3395 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3396 return rc;
3397 }
3398
3399 if (!is_sync_kiocb(iocb)) {
3400 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3401 return -EIOCBQUEUED;
3402 }
3403
3404 rc = wait_for_completion_killable(&ctx->done);
3405 if (rc) {
3406 mutex_lock(&ctx->aio_mutex);
3407 ctx->rc = rc = -EINTR;
3408 total_read = ctx->total_len;
3409 mutex_unlock(&ctx->aio_mutex);
3410 } else {
3411 rc = ctx->rc;
3412 total_read = ctx->total_len;
3413 }
3414
3415 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3416
Al Viro0165e812014-02-04 14:19:48 -05003417 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003418 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003419 return total_read;
3420 }
3421 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003422}
3423
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003424ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003425cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003426{
Al Viro496ad9a2013-01-23 17:07:38 -05003427 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003428 struct cifsInodeInfo *cinode = CIFS_I(inode);
3429 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3430 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3431 iocb->ki_filp->private_data;
3432 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3433 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003434
3435 /*
3436 * In strict cache mode we need to read from the server all the time
3437 * if we don't have level II oplock because the server can delay mtime
3438 * change - so we can't make a decision about inode invalidating.
3439 * And we can also fail with pagereading if there are mandatory locks
3440 * on pages affected by this read but not on the region from pos to
3441 * pos+len-1.
3442 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003443 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003444 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003445
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003446 if (cap_unix(tcon->ses) &&
3447 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3448 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003449 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003450
3451 /*
3452 * We need to hold the sem to be sure nobody modifies lock list
3453 * with a brlock that prevents reading.
3454 */
3455 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003456 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003457 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003458 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003459 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003460 up_read(&cinode->lock_sem);
3461 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003462}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003464static ssize_t
3465cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466{
3467 int rc = -EACCES;
3468 unsigned int bytes_read = 0;
3469 unsigned int total_read;
3470 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003471 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003473 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003474 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003475 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003476 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003478 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003479 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003480 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003482 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003483 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003485 /* FIXME: set up handlers for larger reads and/or convert to async */
3486 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3487
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303489 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003490 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303491 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003493 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003494 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003495 server = tcon->ses->server;
3496
3497 if (!server->ops->sync_read) {
3498 free_xid(xid);
3499 return -ENOSYS;
3500 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003502 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3503 pid = open_file->pid;
3504 else
3505 pid = current->tgid;
3506
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003508 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003510 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3511 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003512 do {
3513 current_read_size = min_t(uint, read_size - total_read,
3514 rsize);
3515 /*
3516 * For windows me and 9x we do not want to request more
3517 * than it negotiated since it will refuse the read
3518 * then.
3519 */
3520 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003521 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003522 current_read_size = min_t(uint,
3523 current_read_size, CIFSMaxBufSize);
3524 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003525 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003526 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 if (rc != 0)
3528 break;
3529 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003530 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003531 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003532 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003533 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003534 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003535 &bytes_read, &cur_offset,
3536 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003537 } while (rc == -EAGAIN);
3538
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 if (rc || (bytes_read == 0)) {
3540 if (total_read) {
3541 break;
3542 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003543 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 return rc;
3545 }
3546 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003547 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003548 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 }
3550 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003551 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 return total_read;
3553}
3554
Jeff Laytonca83ce32011-04-12 09:13:44 -04003555/*
3556 * If the page is mmap'ed into a process' page tables, then we need to make
3557 * sure that it doesn't change while being written back.
3558 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303559static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003560cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003561{
3562 struct page *page = vmf->page;
3563
3564 lock_page(page);
3565 return VM_FAULT_LOCKED;
3566}
3567
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003568static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003569 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003570 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003571 .page_mkwrite = cifs_page_mkwrite,
3572};
3573
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003574int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3575{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003576 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003577 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003578
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003579 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003580
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003581 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003582 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003583 if (!rc)
3584 rc = generic_file_mmap(file, vma);
3585 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003586 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003587
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003588 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003589 return rc;
3590}
3591
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3593{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 int rc, xid;
3595
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003596 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003597
Jeff Laytonabab0952010-02-12 07:44:18 -05003598 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003599 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003600 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3601 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003602 if (!rc)
3603 rc = generic_file_mmap(file, vma);
3604 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003605 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003606
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003607 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 return rc;
3609}
3610
Jeff Layton0471ca32012-05-16 07:13:16 -04003611static void
3612cifs_readv_complete(struct work_struct *work)
3613{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003614 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003615 struct cifs_readdata *rdata = container_of(work,
3616 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003617
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003618 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003619 for (i = 0; i < rdata->nr_pages; i++) {
3620 struct page *page = rdata->pages[i];
3621
Jeff Layton0471ca32012-05-16 07:13:16 -04003622 lru_cache_add_file(page);
3623
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003624 if (rdata->result == 0 ||
3625 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003626 flush_dcache_page(page);
3627 SetPageUptodate(page);
3628 }
3629
3630 unlock_page(page);
3631
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003632 if (rdata->result == 0 ||
3633 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003634 cifs_readpage_to_fscache(rdata->mapping->host, page);
3635
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003636 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003637
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003638 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003639 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003640 }
Jeff Layton6993f742012-05-16 07:13:17 -04003641 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003642}
3643
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003644static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003645readpages_fill_pages(struct TCP_Server_Info *server,
3646 struct cifs_readdata *rdata, struct iov_iter *iter,
3647 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003648{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003649 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003650 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003651 u64 eof;
3652 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003653 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003654 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003655
3656 /* determine the eof that the server (probably) has */
3657 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003658 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003659 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003660
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003661 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003662 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003663 for (i = 0; i < nr_pages; i++) {
3664 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07003665 unsigned int to_read = rdata->pagesz;
3666 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003667
Long Li1dbe3462018-05-30 12:47:55 -07003668 if (i == 0)
3669 to_read -= page_offset;
3670 else
3671 page_offset = 0;
3672
3673 n = to_read;
3674
3675 if (len >= to_read) {
3676 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07003677 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003678 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07003679 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05003680 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003681 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003682 } else if (page->index > eof_index) {
3683 /*
3684 * The VFS will not try to do readahead past the
3685 * i_size, but it's possible that we have outstanding
3686 * writes with gaps in the middle and the i_size hasn't
3687 * caught up yet. Populate those with zeroed out pages
3688 * to prevent the VFS from repeatedly attempting to
3689 * fill them until the writes are flushed.
3690 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003691 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003692 lru_cache_add_file(page);
3693 flush_dcache_page(page);
3694 SetPageUptodate(page);
3695 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003696 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003697 rdata->pages[i] = NULL;
3698 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003699 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003700 } else {
3701 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003702 lru_cache_add_file(page);
3703 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003704 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003705 rdata->pages[i] = NULL;
3706 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003707 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003708 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003709
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003710 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003711 result = copy_page_from_iter(
3712 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003713#ifdef CONFIG_CIFS_SMB_DIRECT
3714 else if (rdata->mr)
3715 result = n;
3716#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003717 else
Long Li1dbe3462018-05-30 12:47:55 -07003718 result = cifs_read_page_from_socket(
3719 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003720 if (result < 0)
3721 break;
3722
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003723 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003724 }
3725
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003726 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3727 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003728}
3729
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003730static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003731cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3732 struct cifs_readdata *rdata, unsigned int len)
3733{
3734 return readpages_fill_pages(server, rdata, NULL, len);
3735}
3736
3737static int
3738cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
3739 struct cifs_readdata *rdata,
3740 struct iov_iter *iter)
3741{
3742 return readpages_fill_pages(server, rdata, iter, iter->count);
3743}
3744
3745static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003746readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3747 unsigned int rsize, struct list_head *tmplist,
3748 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3749{
3750 struct page *page, *tpage;
3751 unsigned int expected_index;
3752 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003753 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003754
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003755 INIT_LIST_HEAD(tmplist);
3756
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003757 page = list_entry(page_list->prev, struct page, lru);
3758
3759 /*
3760 * Lock the page and put it in the cache. Since no one else
3761 * should have access to this page, we're safe to simply set
3762 * PG_locked without checking it first.
3763 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003764 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003765 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003766 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003767
3768 /* give up if we can't stick it in the cache */
3769 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003770 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003771 return rc;
3772 }
3773
3774 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003775 *offset = (loff_t)page->index << PAGE_SHIFT;
3776 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003777 *nr_pages = 1;
3778 list_move_tail(&page->lru, tmplist);
3779
3780 /* now try and add more pages onto the request */
3781 expected_index = page->index + 1;
3782 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3783 /* discontinuity ? */
3784 if (page->index != expected_index)
3785 break;
3786
3787 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003788 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003789 break;
3790
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003791 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003792 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003793 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003794 break;
3795 }
3796 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003797 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003798 expected_index++;
3799 (*nr_pages)++;
3800 }
3801 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802}
3803
Linus Torvalds1da177e2005-04-16 15:20:36 -07003804static int cifs_readpages(struct file *file, struct address_space *mapping,
3805 struct list_head *page_list, unsigned num_pages)
3806{
Jeff Layton690c5e32011-10-19 15:30:16 -04003807 int rc;
3808 struct list_head tmplist;
3809 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003810 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003811 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003812 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813
Jeff Layton690c5e32011-10-19 15:30:16 -04003814 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303815 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3816 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003817 *
3818 * After this point, every page in the list might have PG_fscache set,
3819 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303820 */
3821 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3822 &num_pages);
3823 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003824 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303825
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003826 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3827 pid = open_file->pid;
3828 else
3829 pid = current->tgid;
3830
Jeff Layton690c5e32011-10-19 15:30:16 -04003831 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003832 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833
Joe Perchesf96637b2013-05-04 22:12:25 -05003834 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3835 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003836
3837 /*
3838 * Start with the page at end of list and move it to private
3839 * list. Do the same with any following pages until we hit
3840 * the rsize limit, hit an index discontinuity, or run out of
3841 * pages. Issue the async read and then start the loop again
3842 * until the list is empty.
3843 *
3844 * Note that list order is important. The page_list is in
3845 * the order of declining indexes. When we put the pages in
3846 * the rdata->pages, then we want them in increasing order.
3847 */
3848 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003849 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003850 loff_t offset;
3851 struct page *page, *tpage;
3852 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003853 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003855 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3856 &rsize, &credits);
3857 if (rc)
3858 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859
Jeff Layton690c5e32011-10-19 15:30:16 -04003860 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003861 * Give up immediately if rsize is too small to read an entire
3862 * page. The VFS will fall back to readpage. We should never
3863 * reach this point however since we set ra_pages to 0 when the
3864 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003865 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003866 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003867 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003868 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003871 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3872 &nr_pages, &offset, &bytes);
3873 if (rc) {
3874 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003876 }
3877
Jeff Layton0471ca32012-05-16 07:13:16 -04003878 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003879 if (!rdata) {
3880 /* best to give up if we're out of mem */
3881 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3882 list_del(&page->lru);
3883 lru_cache_add_file(page);
3884 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003885 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003886 }
3887 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003888 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003889 break;
3890 }
3891
Jeff Layton6993f742012-05-16 07:13:17 -04003892 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003893 rdata->mapping = mapping;
3894 rdata->offset = offset;
3895 rdata->bytes = bytes;
3896 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003897 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07003898 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003899 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003900 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003901 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003902
3903 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3904 list_del(&page->lru);
3905 rdata->pages[rdata->nr_pages++] = page;
3906 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003907
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003908 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01003909 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003910 rc = server->ops->async_readv(rdata);
3911 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003912 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003913 for (i = 0; i < rdata->nr_pages; i++) {
3914 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003915 lru_cache_add_file(page);
3916 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003917 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003919 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003920 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 break;
3922 }
Jeff Layton6993f742012-05-16 07:13:17 -04003923
3924 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 }
3926
David Howells54afa992013-09-04 17:10:39 +00003927 /* Any pages that have been shown to fscache but didn't get added to
3928 * the pagecache must be uncached before they get returned to the
3929 * allocator.
3930 */
3931 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 return rc;
3933}
3934
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003935/*
3936 * cifs_readpage_worker must be called with the page pinned
3937 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938static int cifs_readpage_worker(struct file *file, struct page *page,
3939 loff_t *poffset)
3940{
3941 char *read_data;
3942 int rc;
3943
Suresh Jayaraman56698232010-07-05 18:13:25 +05303944 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003945 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303946 if (rc == 0)
3947 goto read_complete;
3948
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 read_data = kmap(page);
3950 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003951
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003952 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003953
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 if (rc < 0)
3955 goto io_error;
3956 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003957 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003958
Al Viro496ad9a2013-01-23 17:07:38 -05003959 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003960 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003961
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003962 if (PAGE_SIZE > rc)
3963 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964
3965 flush_dcache_page(page);
3966 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303967
3968 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003969 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303970
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003972
Linus Torvalds1da177e2005-04-16 15:20:36 -07003973io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003974 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003975 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303976
3977read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 return rc;
3979}
3980
3981static int cifs_readpage(struct file *file, struct page *page)
3982{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003983 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003985 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003987 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988
3989 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303990 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003991 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303992 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 }
3994
Joe Perchesf96637b2013-05-04 22:12:25 -05003995 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003996 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997
3998 rc = cifs_readpage_worker(file, page, &offset);
3999
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004000 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 return rc;
4002}
4003
Steve Frencha403a0a2007-07-26 15:54:16 +00004004static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4005{
4006 struct cifsFileInfo *open_file;
4007
Dave Wysochanskia8de7092019-10-03 15:16:27 +10004008 spin_lock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004009 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004010 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Wysochanskia8de7092019-10-03 15:16:27 +10004011 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004012 return 1;
4013 }
4014 }
Dave Wysochanskia8de7092019-10-03 15:16:27 +10004015 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004016 return 0;
4017}
4018
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019/* We do not want to update the file size from server for inodes
4020 open for write - to avoid races with writepage extending
4021 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004022 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 but this is tricky to do without racing with writebehind
4024 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004025bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026{
Steve Frencha403a0a2007-07-26 15:54:16 +00004027 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004028 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004029
Steve Frencha403a0a2007-07-26 15:54:16 +00004030 if (is_inode_writable(cifsInode)) {
4031 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004032 struct cifs_sb_info *cifs_sb;
4033
Steve Frenchc32a0b62006-01-12 14:41:28 -08004034 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004035 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004036 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004037 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004038 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004039 }
4040
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004041 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004042 return true;
Steve French7ba52632007-02-08 18:14:13 +00004043
Steve French4b18f2a2008-04-29 00:06:05 +00004044 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004045 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004046 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047}
4048
Nick Piggind9414772008-09-24 11:32:59 -04004049static int cifs_write_begin(struct file *file, struct address_space *mapping,
4050 loff_t pos, unsigned len, unsigned flags,
4051 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004053 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004054 pgoff_t index = pos >> PAGE_SHIFT;
4055 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004056 loff_t page_start = pos & PAGE_MASK;
4057 loff_t i_size;
4058 struct page *page;
4059 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060
Joe Perchesf96637b2013-05-04 22:12:25 -05004061 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004062
Sachin Prabhu466bd312013-09-13 14:11:57 +01004063start:
Nick Piggin54566b22009-01-04 12:00:53 -08004064 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004065 if (!page) {
4066 rc = -ENOMEM;
4067 goto out;
4068 }
Nick Piggind9414772008-09-24 11:32:59 -04004069
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004070 if (PageUptodate(page))
4071 goto out;
Steve French8a236262007-03-06 00:31:00 +00004072
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004073 /*
4074 * If we write a full page it will be up to date, no need to read from
4075 * the server. If the write is short, we'll end up doing a sync write
4076 * instead.
4077 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004078 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004079 goto out;
4080
4081 /*
4082 * optimize away the read when we have an oplock, and we're not
4083 * expecting to use any of the data we'd be reading in. That
4084 * is, when the page lies beyond the EOF, or straddles the EOF
4085 * and the write will cover all of the existing data.
4086 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004087 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004088 i_size = i_size_read(mapping->host);
4089 if (page_start >= i_size ||
4090 (offset == 0 && (pos + len) >= i_size)) {
4091 zero_user_segments(page, 0, offset,
4092 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004093 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004094 /*
4095 * PageChecked means that the parts of the page
4096 * to which we're not writing are considered up
4097 * to date. Once the data is copied to the
4098 * page, it can be set uptodate.
4099 */
4100 SetPageChecked(page);
4101 goto out;
4102 }
4103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104
Sachin Prabhu466bd312013-09-13 14:11:57 +01004105 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004106 /*
4107 * might as well read a page, it is fast enough. If we get
4108 * an error, we don't need to return it. cifs_write_end will
4109 * do a sync write instead since PG_uptodate isn't set.
4110 */
4111 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004112 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004113 oncethru = 1;
4114 goto start;
Steve French8a236262007-03-06 00:31:00 +00004115 } else {
4116 /* we could try using another file handle if there is one -
4117 but how would we lock it to prevent close of that handle
4118 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004119 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004120 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004121out:
4122 *pagep = page;
4123 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124}
4125
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304126static int cifs_release_page(struct page *page, gfp_t gfp)
4127{
4128 if (PagePrivate(page))
4129 return 0;
4130
4131 return cifs_fscache_release_page(page, gfp);
4132}
4133
Lukas Czernerd47992f2013-05-21 23:17:23 -04004134static void cifs_invalidate_page(struct page *page, unsigned int offset,
4135 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304136{
4137 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4138
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004139 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304140 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4141}
4142
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004143static int cifs_launder_page(struct page *page)
4144{
4145 int rc = 0;
4146 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004147 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004148 struct writeback_control wbc = {
4149 .sync_mode = WB_SYNC_ALL,
4150 .nr_to_write = 0,
4151 .range_start = range_start,
4152 .range_end = range_end,
4153 };
4154
Joe Perchesf96637b2013-05-04 22:12:25 -05004155 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004156
4157 if (clear_page_dirty_for_io(page))
4158 rc = cifs_writepage_locked(page, &wbc);
4159
4160 cifs_fscache_invalidate_page(page, page->mapping->host);
4161 return rc;
4162}
4163
Tejun Heo9b646972010-07-20 22:09:02 +02004164void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004165{
4166 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4167 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004168 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004169 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004170 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004171 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004172 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004173
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004174 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004175 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004176
4177 server->ops->downgrade_oplock(server, cinode,
4178 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4179
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004180 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004181 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004182 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4183 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004184 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004185 }
4186
Jeff Layton3bc303c2009-09-21 06:47:50 -04004187 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004188 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004189 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004190 else
Al Viro8737c932009-12-24 06:47:55 -05004191 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004192 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004193 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004194 rc = filemap_fdatawait(inode->i_mapping);
4195 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004196 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004197 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004198 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004199 }
4200
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004201 rc = cifs_push_locks(cfile);
4202 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004203 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004204
Jeff Layton3bc303c2009-09-21 06:47:50 -04004205 /*
4206 * releasing stale oplock after recent reconnect of smb session using
4207 * a now incorrect file handle is not a data integrity issue but do
4208 * not bother sending an oplock release if session to server still is
4209 * disconnected since oplock already released by the server
4210 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004211 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004212 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4213 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004214 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004215 }
Aurelien Aptel8092ecc2019-03-29 10:49:12 +01004216 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004217 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004218}
4219
Steve Frenchdca69282013-11-11 16:42:37 -06004220/*
4221 * The presence of cifs_direct_io() in the address space ops vector
4222 * allowes open() O_DIRECT flags which would have failed otherwise.
4223 *
4224 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4225 * so this method should never be called.
4226 *
4227 * Direct IO is not yet supported in the cached mode.
4228 */
4229static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004230cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004231{
4232 /*
4233 * FIXME
4234 * Eventually need to support direct IO for non forcedirectio mounts
4235 */
4236 return -EINVAL;
4237}
4238
4239
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004240const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241 .readpage = cifs_readpage,
4242 .readpages = cifs_readpages,
4243 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004244 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004245 .write_begin = cifs_write_begin,
4246 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304248 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004249 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304250 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004251 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004253
4254/*
4255 * cifs_readpages requires the server to support a buffer large enough to
4256 * contain the header plus one complete page of data. Otherwise, we need
4257 * to leave cifs_readpages out of the address space operations.
4258 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004259const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004260 .readpage = cifs_readpage,
4261 .writepage = cifs_writepage,
4262 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004263 .write_begin = cifs_write_begin,
4264 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004265 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304266 .releasepage = cifs_release_page,
4267 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004268 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004269};