blob: 659ce1b92c449a9845922ade79154c4421b5b600 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Nikolay Borisovf86196e2019-01-03 15:29:02 -080036#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/div64.h>
38#include "cifsfs.h"
39#include "cifspdu.h"
40#include "cifsglob.h"
41#include "cifsproto.h"
42#include "cifs_unicode.h"
43#include "cifs_debug.h"
44#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053045#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070046#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048static inline int cifs_convert_flags(unsigned int flags)
49{
50 if ((flags & O_ACCMODE) == O_RDONLY)
51 return GENERIC_READ;
52 else if ((flags & O_ACCMODE) == O_WRONLY)
53 return GENERIC_WRITE;
54 else if ((flags & O_ACCMODE) == O_RDWR) {
55 /* GENERIC_ALL is too much permission to request
56 can cause unnecessary access denied on create */
57 /* return GENERIC_ALL; */
58 return (GENERIC_READ | GENERIC_WRITE);
59 }
60
Jeff Laytone10f7b52008-05-14 10:21:33 -070061 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
62 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
63 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000064}
Jeff Laytone10f7b52008-05-14 10:21:33 -070065
Jeff Layton608712f2010-10-15 15:33:56 -040066static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000067{
Jeff Layton608712f2010-10-15 15:33:56 -040068 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070069
Steve French7fc8f4e2009-02-23 20:43:11 +000070 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000072 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040073 posix_flags = SMB_O_WRONLY;
74 else if ((flags & O_ACCMODE) == O_RDWR)
75 posix_flags = SMB_O_RDWR;
76
Steve French07b92d02013-02-18 10:34:26 -060077 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040078 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060079 if (flags & O_EXCL)
80 posix_flags |= SMB_O_EXCL;
81 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050082 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
83 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060084
Jeff Layton608712f2010-10-15 15:33:56 -040085 if (flags & O_TRUNC)
86 posix_flags |= SMB_O_TRUNC;
87 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010088 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000090 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040091 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000092 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040093 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000094 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040095 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000096
97 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
100static inline int cifs_get_disposition(unsigned int flags)
101{
102 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 return FILE_CREATE;
104 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
105 return FILE_OVERWRITE_IF;
106 else if ((flags & O_CREAT) == O_CREAT)
107 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000108 else if ((flags & O_TRUNC) == O_TRUNC)
109 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 else
111 return FILE_OPEN;
112}
113
Jeff Layton608712f2010-10-15 15:33:56 -0400114int cifs_posix_open(char *full_path, struct inode **pinode,
115 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400116 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400117{
118 int rc;
119 FILE_UNIX_BASIC_INFO *presp_data;
120 __u32 posix_flags = 0;
121 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
122 struct cifs_fattr fattr;
123 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000124 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400125
Joe Perchesf96637b2013-05-04 22:12:25 -0500126 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400127
128 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
129 if (presp_data == NULL)
130 return -ENOMEM;
131
132 tlink = cifs_sb_tlink(cifs_sb);
133 if (IS_ERR(tlink)) {
134 rc = PTR_ERR(tlink);
135 goto posix_open_ret;
136 }
137
138 tcon = tlink_tcon(tlink);
139 mode &= ~current_umask();
140
141 posix_flags = cifs_posix_convert_flags(f_flags);
142 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
143 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900144 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Steve French1013e762017-09-22 01:40:27 -0500228 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
231
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
234
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400235 oparms.tcon = tcon;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
241 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400242 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400243
244 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300245
246 if (rc)
247 goto out;
248
249 if (tcon->unix_ext)
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
251 xid);
252 else
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600254 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300255
256out:
257 kfree(buf);
258 return rc;
259}
260
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400261static bool
262cifs_has_mand_locks(struct cifsInodeInfo *cinode)
263{
264 struct cifs_fid_locks *cur;
265 bool has_locks = false;
266
267 down_read(&cinode->lock_sem);
268 list_for_each_entry(cur, &cinode->llist, llist) {
269 if (!list_empty(&cur->locks)) {
270 has_locks = true;
271 break;
272 }
273 }
274 up_read(&cinode->lock_sem);
275 return has_locks;
276}
277
Jeff Layton15ecb432010-10-15 15:34:02 -0400278struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700279cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400280 struct tcon_link *tlink, __u32 oplock)
281{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500282 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000283 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700284 struct cifsInodeInfo *cinode = CIFS_I(inode);
285 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700286 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700287 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400288 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400289
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700290 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
291 if (cfile == NULL)
292 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400293
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700294 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
295 if (!fdlocks) {
296 kfree(cfile);
297 return NULL;
298 }
299
300 INIT_LIST_HEAD(&fdlocks->locks);
301 fdlocks->cfile = cfile;
302 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700303 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700304 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700305 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700308 cfile->pid = current->tgid;
309 cfile->uid = current_fsuid();
310 cfile->dentry = dget(dentry);
311 cfile->f_flags = file->f_flags;
312 cfile->invalidHandle = false;
313 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700314 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700315 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500316 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400317
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100318 cifs_sb_active(inode->i_sb);
319
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400320 /*
321 * If the server returned a read oplock and we have mandatory brlocks,
322 * set oplock level to None.
323 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400324 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500325 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400326 oplock = 0;
327 }
328
Steve French3afca262016-09-22 18:58:16 -0500329 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400330 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700331 oplock = fid->pending_open->oplock;
332 list_del(&fid->pending_open->olist);
333
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400334 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400335 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700336
337 list_add(&cfile->tlist, &tcon->openFileList);
Steve Frenchfae80442018-10-19 17:14:32 -0500338 atomic_inc(&tcon->num_local_opens);
Steve French3afca262016-09-22 18:58:16 -0500339
Jeff Layton15ecb432010-10-15 15:34:02 -0400340 /* if readable file instance put first in list*/
341 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700342 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400343 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700344 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500345 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400346
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400348 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400349
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700350 file->private_data = cfile;
351 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400352}
353
Jeff Layton764a1b12012-07-25 14:59:54 -0400354struct cifsFileInfo *
355cifsFileInfo_get(struct cifsFileInfo *cifs_file)
356{
Steve French3afca262016-09-22 18:58:16 -0500357 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400358 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500359 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400360 return cifs_file;
361}
362
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363/*
364 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400365 * the filehandle out on the server. Must be called without holding
Steve French3afca262016-09-22 18:58:16 -0500366 * tcon->open_file_lock and cifs_file->file_info_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000367 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400368void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
369{
David Howells2b0143b2015-03-17 22:25:59 +0000370 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000371 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700372 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300373 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100374 struct super_block *sb = inode->i_sb;
375 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000376 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700377 struct cifs_fid fid;
378 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000379 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380
Steve French3afca262016-09-22 18:58:16 -0500381 spin_lock(&tcon->open_file_lock);
382
383 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400384 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500385 spin_unlock(&cifs_file->file_info_lock);
386 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000387 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400388 }
Steve French3afca262016-09-22 18:58:16 -0500389 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000390
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700391 if (server->ops->get_lease_key)
392 server->ops->get_lease_key(inode, &fid);
393
394 /* store open in pending opens to make sure we don't miss lease break */
395 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
396
Steve Frenchcdff08e2010-10-21 22:46:14 +0000397 /* remove it from the lists */
398 list_del(&cifs_file->flist);
399 list_del(&cifs_file->tlist);
Steve Frenchfae80442018-10-19 17:14:32 -0500400 atomic_dec(&tcon->num_local_opens);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000401
402 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500403 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000404 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700405 /*
406 * In strict cache mode we need invalidate mapping on the last
407 * close because it may cause a error when we open this file
408 * again and get at least level II oplock.
409 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300410 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400411 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300412 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413 }
Steve French3afca262016-09-22 18:58:16 -0500414
415 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000417 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
Jeff Laytonad635942011-07-26 12:20:17 -0400418
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700420 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400421 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700422
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400423 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700424 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400425 server->ops->close(xid, tcon, &cifs_file->fid);
426 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000427 }
428
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000429 if (oplock_break_cancelled)
430 cifs_done_oplock_break(cifsi);
431
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700432 cifs_del_pending_open(&open);
433
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700434 /*
435 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000436 * is closed anyway.
437 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700438 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700439 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000440 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400441 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000442 kfree(li);
443 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700444 list_del(&cifs_file->llist->llist);
445 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700446 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000447
448 cifs_put_tlink(cifs_file->tlink);
449 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100450 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000451 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400452}
453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700455
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456{
457 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400458 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400459 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700461 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000462 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400463 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700464 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300466 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700467 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700468 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400470 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400473 tlink = cifs_sb_tlink(cifs_sb);
474 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400475 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400476 return PTR_ERR(tlink);
477 }
478 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700479 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500481 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530483 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400484 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
486
Joe Perchesf96637b2013-05-04 22:12:25 -0500487 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000488 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000489
Namjae Jeon787aded2014-08-22 14:22:51 +0900490 if (file->f_flags & O_DIRECT &&
491 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
492 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
493 file->f_op = &cifs_file_direct_nobrl_ops;
494 else
495 file->f_op = &cifs_file_direct_ops;
496 }
497
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700498 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000499 oplock = REQ_OPLOCK;
500 else
501 oplock = 0;
502
Steve French64cc2c62009-03-04 19:54:08 +0000503 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400504 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
505 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000506 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400507 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000508 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700509 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000510 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500511 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300512 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000513 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
514 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500515 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
516 tcon->ses->serverName,
517 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000518 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000519 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
520 (rc != -EOPNOTSUPP)) /* path not found or net err */
521 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700522 /*
523 * Else fallthrough to retry open the old way on network i/o
524 * or DFS errors.
525 */
Steve French276a74a2009-03-03 18:00:34 +0000526 }
527
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700528 if (server->ops->get_lease_key)
529 server->ops->get_lease_key(inode, &fid);
530
531 cifs_add_pending_open(&fid, tlink, &open);
532
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300533 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700534 if (server->ops->get_lease_key)
535 server->ops->get_lease_key(inode, &fid);
536
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300537 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700538 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700539 if (rc) {
540 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300541 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700542 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300543 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400544
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700545 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
546 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700547 if (server->ops->close)
548 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700549 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 rc = -ENOMEM;
551 goto out;
552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530554 cifs_fscache_set_inode_cookie(inode, file);
555
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300556 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700557 /*
558 * Time to set mode which we can not set earlier due to
559 * problems creating new read-only files.
560 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300561 struct cifs_unix_set_info_args args = {
562 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800563 .uid = INVALID_UID, /* no change */
564 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300565 .ctime = NO_CHANGE_64,
566 .atime = NO_CHANGE_64,
567 .mtime = NO_CHANGE_64,
568 .device = 0,
569 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700570 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
571 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 }
573
574out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400576 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400577 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 return rc;
579}
580
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400581static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
582
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583/*
584 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400585 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700586 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400587static int
588cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400590 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000591 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400592 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 int rc = 0;
594
Rabin Vincent560d3882017-05-03 17:17:21 +0200595 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400596 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400597 /* can cache locks - no need to relock */
598 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400599 return rc;
600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400602 if (cap_unix(tcon->ses) &&
603 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
604 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
605 rc = cifs_push_posix_locks(cfile);
606 else
607 rc = tcon->ses->server->ops->push_mand_locks(cfile);
608
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400609 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 return rc;
611}
612
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700613static int
614cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
616 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400617 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400618 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000620 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700621 struct TCP_Server_Info *server;
622 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000623 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700625 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500627 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400628 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400630 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700631 mutex_lock(&cfile->fh_mutex);
632 if (!cfile->invalidHandle) {
633 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530634 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400635 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530636 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
638
David Howells2b0143b2015-03-17 22:25:59 +0000639 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700641 tcon = tlink_tcon(cfile->tlink);
642 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000643
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700644 /*
645 * Can not grab rename sem here because various ops, including those
646 * that already have the rename sem can end up causing writepage to get
647 * called and if the server was down that means we end up here, and we
648 * can never tell if the caller already has the rename_sem.
649 */
650 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000652 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700653 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400654 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000655 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 }
657
Joe Perchesf96637b2013-05-04 22:12:25 -0500658 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
659 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300661 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 oplock = REQ_OPLOCK;
663 else
Steve French4b18f2a2008-04-29 00:06:05 +0000664 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400666 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000667 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400668 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400669 /*
670 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
671 * original open. Must mask them off for a reopen.
672 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700673 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400674 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400675
Jeff Layton2422f672010-06-16 13:40:16 -0400676 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700677 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400678 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000679 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500680 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200681 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000682 goto reopen_success;
683 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700684 /*
685 * fallthrough to retry open the old way on errors, especially
686 * in the reconnect path it is important to retry hard
687 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000688 }
689
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700690 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000691
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500692 if (backup_cred(cifs_sb))
693 create_options |= CREATE_OPEN_BACKUP_INTENT;
694
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700695 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400696 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700697
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400698 oparms.tcon = tcon;
699 oparms.cifs_sb = cifs_sb;
700 oparms.desired_access = desired_access;
701 oparms.create_options = create_options;
702 oparms.disposition = disposition;
703 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400704 oparms.fid = &cfile->fid;
705 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400706
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 /*
708 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400709 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700710 * file might have write behind data that needs to be flushed and server
711 * version of file size can be stale. If we knew for sure that inode was
712 * not dirty locally we could do this.
713 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400714 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400715 if (rc == -ENOENT && oparms.reconnect == false) {
716 /* durable handle timeout is expired - open the file again */
717 rc = server->ops->open(xid, &oparms, &oplock, NULL);
718 /* indicate that we need to relock the file */
719 oparms.reconnect = true;
720 }
721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700723 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500724 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
725 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400726 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 }
Jeff Layton15886172010-10-15 15:33:59 -0400728
729reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700730 cfile->invalidHandle = false;
731 mutex_unlock(&cfile->fh_mutex);
732 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400733
734 if (can_flush) {
735 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovsky9a663962019-01-08 11:15:28 -0800736 if (!is_interrupt_error(rc))
737 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400738
Jeff Layton15886172010-10-15 15:33:59 -0400739 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700740 rc = cifs_get_inode_info_unix(&inode, full_path,
741 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400742 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700743 rc = cifs_get_inode_info(&inode, full_path, NULL,
744 inode->i_sb, xid, NULL);
745 }
746 /*
747 * Else we are writing out data to server already and could deadlock if
748 * we tried to flush data, and since we do not know if we have data that
749 * would invalidate the current end of file on the server we can not go
750 * to the server to get the new inode info.
751 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300752
Pavel Shilovskyde740252016-10-11 15:34:07 -0700753 /*
754 * If the server returned a read oplock and we have mandatory brlocks,
755 * set oplock level to None.
756 */
757 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
758 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
759 oplock = 0;
760 }
761
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400762 server->ops->set_fid(cfile, &cfile->fid, oplock);
763 if (oparms.reconnect)
764 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400765
766reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400768 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 return rc;
770}
771
772int cifs_close(struct inode *inode, struct file *file)
773{
Jeff Layton77970692011-04-05 16:23:47 -0700774 if (file->private_data != NULL) {
775 cifsFileInfo_put(file->private_data);
776 file->private_data = NULL;
777 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Steve Frenchcdff08e2010-10-21 22:46:14 +0000779 /* return code from the ->release op is always ignored */
780 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781}
782
Steve French52ace1e2016-09-22 19:23:56 -0500783void
784cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
785{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700786 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500787 struct list_head *tmp;
788 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700789 struct list_head tmp_list;
790
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800791 if (!tcon->use_persistent || !tcon->need_reopen_files)
792 return;
793
794 tcon->need_reopen_files = false;
795
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700796 cifs_dbg(FYI, "Reopen persistent handles");
797 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500798
799 /* list all files open on tree connection, reopen resilient handles */
800 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700801 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500802 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700803 if (!open_file->invalidHandle)
804 continue;
805 cifsFileInfo_get(open_file);
806 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500807 }
808 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700809
810 list_for_each_safe(tmp, tmp1, &tmp_list) {
811 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800812 if (cifs_reopen_file(open_file, false /* do not flush */))
813 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700814 list_del_init(&open_file->rlist);
815 cifsFileInfo_put(open_file);
816 }
Steve French52ace1e2016-09-22 19:23:56 -0500817}
818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819int cifs_closedir(struct inode *inode, struct file *file)
820{
821 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400822 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700823 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700824 struct cifs_tcon *tcon;
825 struct TCP_Server_Info *server;
826 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
Joe Perchesf96637b2013-05-04 22:12:25 -0500828 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700830 if (cfile == NULL)
831 return rc;
832
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400833 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700834 tcon = tlink_tcon(cfile->tlink);
835 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Joe Perchesf96637b2013-05-04 22:12:25 -0500837 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500838 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400839 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700840 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500841 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700842 if (server->ops->close_dir)
843 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
844 else
845 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500846 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700847 /* not much we can do if it fails anyway, ignore rc */
848 rc = 0;
849 } else
Steve French3afca262016-09-22 18:58:16 -0500850 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700852 buf = cfile->srch_inf.ntwrk_buf_start;
853 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500854 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700855 cfile->srch_inf.ntwrk_buf_start = NULL;
856 if (cfile->srch_inf.smallBuf)
857 cifs_small_buf_release(buf);
858 else
859 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700861
862 cifs_put_tlink(cfile->tlink);
863 kfree(file->private_data);
864 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400866 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return rc;
868}
869
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870static struct cifsLockInfo *
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000871cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000872{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400873 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000874 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400875 if (!lock)
876 return lock;
877 lock->offset = offset;
878 lock->length = length;
879 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400880 lock->pid = current->tgid;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000881 lock->flags = flags;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400882 INIT_LIST_HEAD(&lock->blist);
883 init_waitqueue_head(&lock->block_q);
884 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885}
886
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700887void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400888cifs_del_lock_waiters(struct cifsLockInfo *lock)
889{
890 struct cifsLockInfo *li, *tmp;
891 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
892 list_del_init(&li->blist);
893 wake_up(&li->block_q);
894 }
895}
896
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400897#define CIFS_LOCK_OP 0
898#define CIFS_READ_OP 1
899#define CIFS_WRITE_OP 2
900
901/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400902static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700903cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000904 __u64 length, __u8 type, __u16 flags,
905 struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400906 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300908 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700909 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300910 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400911
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700912 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400913 if (offset + length <= li->offset ||
914 offset >= li->offset + li->length)
915 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400916 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
917 server->ops->compare_fids(cfile, cur_cfile)) {
918 /* shared lock prevents write op through the same fid */
919 if (!(li->type & server->vals->shared_lock_type) ||
920 rw_check != CIFS_WRITE_OP)
921 continue;
922 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700923 if ((type & server->vals->shared_lock_type) &&
924 ((server->ops->compare_fids(cfile, cur_cfile) &&
925 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926 continue;
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000927 if (rw_check == CIFS_LOCK_OP &&
928 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
929 server->ops->compare_fids(cfile, cur_cfile))
930 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700931 if (conf_lock)
932 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700933 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400934 }
935 return false;
936}
937
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700938bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300939cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000940 __u8 type, __u16 flags,
941 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400942{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300943 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700944 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000945 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300946
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700947 list_for_each_entry(cur, &cinode->llist, llist) {
948 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000949 flags, cfile, conf_lock,
950 rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300951 if (rc)
952 break;
953 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300954
955 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400956}
957
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300958/*
959 * Check if there is another lock that prevents us to set the lock (mandatory
960 * style). If such a lock exists, update the flock structure with its
961 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
962 * or leave it the same if we can't. Returns 0 if we don't need to request to
963 * the server or 1 otherwise.
964 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400965static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300966cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
967 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400968{
969 int rc = 0;
970 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000971 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300972 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400973 bool exist;
974
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700975 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400976
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300977 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +1000978 flock->fl_flags, &conf_lock,
979 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400980 if (exist) {
981 flock->fl_start = conf_lock->offset;
982 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
983 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300984 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400985 flock->fl_type = F_RDLCK;
986 else
987 flock->fl_type = F_WRLCK;
988 } else if (!cinode->can_cache_brlcks)
989 rc = 1;
990 else
991 flock->fl_type = F_UNLCK;
992
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700993 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400994 return rc;
995}
996
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400997static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300998cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400999{
David Howells2b0143b2015-03-17 22:25:59 +00001000 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001001 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001002 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001003 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001004}
1005
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001006/*
1007 * Set the byte-range lock (mandatory style). Returns:
1008 * 1) 0, if we set the lock and don't need to request to the server;
1009 * 2) 1, if no locks prevent us but we need to request to the server;
Colin Ian King413d6102018-10-26 19:07:21 +01001010 * 3) -EACCES, if there is a lock that prevents us and wait is false.
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001011 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001013cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001014 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001016 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001017 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001018 bool exist;
1019 int rc = 0;
1020
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001021try_again:
1022 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001023 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001024
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001025 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001026 lock->type, lock->flags, &conf_lock,
1027 CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001029 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001030 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001031 return rc;
1032 }
1033
1034 if (!exist)
1035 rc = 1;
1036 else if (!wait)
1037 rc = -EACCES;
1038 else {
1039 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001040 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041 rc = wait_event_interruptible(lock->block_q,
1042 (lock->blist.prev == &lock->blist) &&
1043 (lock->blist.next == &lock->blist));
1044 if (!rc)
1045 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001046 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001047 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001048 }
1049
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001050 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001051 return rc;
1052}
1053
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001054/*
1055 * Check if there is another lock that prevents us to set the lock (posix
1056 * style). If such a lock exists, update the flock structure with its
1057 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1058 * or leave it the same if we can't. Returns 0 if we don't need to request to
1059 * the server or 1 otherwise.
1060 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001061static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1063{
1064 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001065 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001066 unsigned char saved_type = flock->fl_type;
1067
Pavel Shilovsky50792762011-10-29 17:17:57 +04001068 if ((flock->fl_flags & FL_POSIX) == 0)
1069 return 1;
1070
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001071 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001072 posix_test_lock(file, flock);
1073
1074 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1075 flock->fl_type = saved_type;
1076 rc = 1;
1077 }
1078
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001079 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001080 return rc;
1081}
1082
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001083/*
1084 * Set the byte-range lock (posix style). Returns:
1085 * 1) 0, if we set the lock and don't need to request to the server;
1086 * 2) 1, if we need to request to the server;
1087 * 3) <0, if the error occurs while setting the lock.
1088 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001089static int
1090cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1091{
Al Viro496ad9a2013-01-23 17:07:38 -05001092 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001093 int rc = 1;
1094
1095 if ((flock->fl_flags & FL_POSIX) == 0)
1096 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001097
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001098try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001099 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001100 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001101 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001102 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001103 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001104
1105 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001106 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001107 if (rc == FILE_LOCK_DEFERRED) {
NeilBrownada5c1d2018-11-30 10:04:08 +11001108 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001109 if (!rc)
1110 goto try_again;
NeilBrowncb03f942018-11-30 10:04:08 +11001111 locks_delete_block(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001112 }
Steve French9ebb3892012-04-01 13:52:54 -05001113 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001114}
1115
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001116int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001117cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001118{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001119 unsigned int xid;
1120 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001121 struct cifsLockInfo *li, *tmp;
1122 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001123 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001124 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001125 static const int types[] = {
1126 LOCKING_ANDX_LARGE_FILES,
1127 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1128 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001129 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001130
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001131 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001132 tcon = tlink_tcon(cfile->tlink);
1133
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001134 /*
1135 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001136 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001137 */
1138 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001139 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001140 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001141 return -EINVAL;
1142 }
1143
Ross Lagerwall92a81092019-01-08 18:30:56 +00001144 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1145 PAGE_SIZE);
1146 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1147 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001148 max_num = (max_buf - sizeof(struct smb_hdr)) /
1149 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001150 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001151 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001152 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001153 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001154 }
1155
1156 for (i = 0; i < 2; i++) {
1157 cur = buf;
1158 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001159 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001160 if (li->type != types[i])
1161 continue;
1162 cur->Pid = cpu_to_le16(li->pid);
1163 cur->LengthLow = cpu_to_le32((u32)li->length);
1164 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1165 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1166 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1167 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001168 stored_rc = cifs_lockv(xid, tcon,
1169 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001170 (__u8)li->type, 0, num,
1171 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001172 if (stored_rc)
1173 rc = stored_rc;
1174 cur = buf;
1175 num = 0;
1176 } else
1177 cur++;
1178 }
1179
1180 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001181 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001182 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001183 if (stored_rc)
1184 rc = stored_rc;
1185 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001186 }
1187
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001188 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001189 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001190 return rc;
1191}
1192
Jeff Layton3d224622016-05-24 06:27:44 -04001193static __u32
1194hash_lockowner(fl_owner_t owner)
1195{
1196 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1197}
1198
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001199struct lock_to_push {
1200 struct list_head llist;
1201 __u64 offset;
1202 __u64 length;
1203 __u32 pid;
1204 __u16 netfid;
1205 __u8 type;
1206};
1207
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001208static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001209cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001210{
David Howells2b0143b2015-03-17 22:25:59 +00001211 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001212 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001213 struct file_lock *flock;
1214 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001215 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001216 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001217 struct list_head locks_to_send, *el;
1218 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001219 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001220
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001221 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001222
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001223 if (!flctx)
1224 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001225
Jeff Laytone084c1b2015-02-16 14:32:03 -05001226 spin_lock(&flctx->flc_lock);
1227 list_for_each(el, &flctx->flc_posix) {
1228 count++;
1229 }
1230 spin_unlock(&flctx->flc_lock);
1231
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001232 INIT_LIST_HEAD(&locks_to_send);
1233
1234 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001235 * Allocating count locks is enough because no FL_POSIX locks can be
1236 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001237 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001238 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001239 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001240 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1241 if (!lck) {
1242 rc = -ENOMEM;
1243 goto err_out;
1244 }
1245 list_add_tail(&lck->llist, &locks_to_send);
1246 }
1247
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001248 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001249 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001250 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001251 if (el == &locks_to_send) {
1252 /*
1253 * The list ended. We don't have enough allocated
1254 * structures - something is really wrong.
1255 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001256 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001257 break;
1258 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001259 length = 1 + flock->fl_end - flock->fl_start;
1260 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1261 type = CIFS_RDLCK;
1262 else
1263 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001264 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001265 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001266 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001267 lck->length = length;
1268 lck->type = type;
1269 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001270 }
Jeff Layton6109c852015-01-16 15:05:57 -05001271 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001272
1273 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001274 int stored_rc;
1275
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001276 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001277 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001278 lck->type, 0);
1279 if (stored_rc)
1280 rc = stored_rc;
1281 list_del(&lck->llist);
1282 kfree(lck);
1283 }
1284
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001285out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001286 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001287 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001288err_out:
1289 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1290 list_del(&lck->llist);
1291 kfree(lck);
1292 }
1293 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001294}
1295
1296static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001297cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001298{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001299 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001300 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001301 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001302 int rc = 0;
1303
1304 /* we are going to update can_cache_brlcks here - need a write access */
1305 down_write(&cinode->lock_sem);
1306 if (!cinode->can_cache_brlcks) {
1307 up_write(&cinode->lock_sem);
1308 return rc;
1309 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001310
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001311 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001312 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1313 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001314 rc = cifs_push_posix_locks(cfile);
1315 else
1316 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001317
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001318 cinode->can_cache_brlcks = false;
1319 up_write(&cinode->lock_sem);
1320 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001321}
1322
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001323static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001324cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001325 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001327 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001328 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001329 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001330 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001331 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001332 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001335 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001336 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001337 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001338 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001339 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001340 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001341 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001342 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001344 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001345 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001346 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001347 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348 *lock = 1;
1349 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001350 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001351 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352 *unlock = 1;
1353 /* Check if unlock includes more than one lock range */
1354 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001355 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001356 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001357 *lock = 1;
1358 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001359 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001360 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001361 *lock = 1;
1362 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001363 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001364 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001365 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001367 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001368}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001370static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001371cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001372 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001373{
1374 int rc = 0;
1375 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001376 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1377 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001378 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001379 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001381 if (posix_lck) {
1382 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001383
1384 rc = cifs_posix_lock_test(file, flock);
1385 if (!rc)
1386 return rc;
1387
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001388 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001389 posix_lock_type = CIFS_RDLCK;
1390 else
1391 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001392 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1393 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001394 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001395 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 return rc;
1397 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001398
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001399 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001400 if (!rc)
1401 return rc;
1402
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001403 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001404 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1405 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001406 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001407 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1408 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001409 flock->fl_type = F_UNLCK;
1410 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001411 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1412 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001413 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001414 }
1415
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001416 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001417 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001418 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001419 }
1420
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001421 type &= ~server->vals->exclusive_lock_type;
1422
1423 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1424 type | server->vals->shared_lock_type,
1425 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001426 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001427 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1428 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001429 flock->fl_type = F_RDLCK;
1430 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001431 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1432 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001433 } else
1434 flock->fl_type = F_WRLCK;
1435
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001436 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001437}
1438
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001439void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001440cifs_move_llist(struct list_head *source, struct list_head *dest)
1441{
1442 struct list_head *li, *tmp;
1443 list_for_each_safe(li, tmp, source)
1444 list_move(li, dest);
1445}
1446
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001447void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001448cifs_free_llist(struct list_head *llist)
1449{
1450 struct cifsLockInfo *li, *tmp;
1451 list_for_each_entry_safe(li, tmp, llist, llist) {
1452 cifs_del_lock_waiters(li);
1453 list_del(&li->llist);
1454 kfree(li);
1455 }
1456}
1457
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001458int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001459cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1460 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001461{
1462 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001463 static const int types[] = {
1464 LOCKING_ANDX_LARGE_FILES,
1465 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1466 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001467 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001468 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001469 LOCKING_ANDX_RANGE *buf, *cur;
1470 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001471 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001472 struct cifsLockInfo *li, *tmp;
1473 __u64 length = 1 + flock->fl_end - flock->fl_start;
1474 struct list_head tmp_llist;
1475
1476 INIT_LIST_HEAD(&tmp_llist);
1477
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001478 /*
1479 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001480 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001481 */
1482 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallb9a74cd2019-01-08 18:30:57 +00001483 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001484 return -EINVAL;
1485
Ross Lagerwall92a81092019-01-08 18:30:56 +00001486 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1487 PAGE_SIZE);
1488 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1489 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001490 max_num = (max_buf - sizeof(struct smb_hdr)) /
1491 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001492 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001493 if (!buf)
1494 return -ENOMEM;
1495
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001496 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001497 for (i = 0; i < 2; i++) {
1498 cur = buf;
1499 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001500 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001501 if (flock->fl_start > li->offset ||
1502 (flock->fl_start + length) <
1503 (li->offset + li->length))
1504 continue;
1505 if (current->tgid != li->pid)
1506 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001507 if (types[i] != li->type)
1508 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001509 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001510 /*
1511 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001512 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001513 */
1514 list_del(&li->llist);
1515 cifs_del_lock_waiters(li);
1516 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001517 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001518 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001519 cur->Pid = cpu_to_le16(li->pid);
1520 cur->LengthLow = cpu_to_le32((u32)li->length);
1521 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1522 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1523 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1524 /*
1525 * We need to save a lock here to let us add it again to
1526 * the file's list if the unlock range request fails on
1527 * the server.
1528 */
1529 list_move(&li->llist, &tmp_llist);
1530 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001531 stored_rc = cifs_lockv(xid, tcon,
1532 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001533 li->type, num, 0, buf);
1534 if (stored_rc) {
1535 /*
1536 * We failed on the unlock range
1537 * request - add all locks from the tmp
1538 * list to the head of the file's list.
1539 */
1540 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001541 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001542 rc = stored_rc;
1543 } else
1544 /*
1545 * The unlock range request succeed -
1546 * free the tmp list.
1547 */
1548 cifs_free_llist(&tmp_llist);
1549 cur = buf;
1550 num = 0;
1551 } else
1552 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001553 }
1554 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001555 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001556 types[i], num, 0, buf);
1557 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001558 cifs_move_llist(&tmp_llist,
1559 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001560 rc = stored_rc;
1561 } else
1562 cifs_free_llist(&tmp_llist);
1563 }
1564 }
1565
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001566 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001567 kfree(buf);
1568 return rc;
1569}
1570
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001571static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001572cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001573 bool wait_flag, bool posix_lck, int lock, int unlock,
1574 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001575{
1576 int rc = 0;
1577 __u64 length = 1 + flock->fl_end - flock->fl_start;
1578 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1579 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001580 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001581 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001582
1583 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001584 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001585
1586 rc = cifs_posix_lock_set(file, flock);
1587 if (!rc || rc < 0)
1588 return rc;
1589
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001590 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001591 posix_lock_type = CIFS_RDLCK;
1592 else
1593 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001594
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001595 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001596 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001597
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001598 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001599 hash_lockowner(flock->fl_owner),
1600 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001601 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001602 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001603 }
1604
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001605 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001606 struct cifsLockInfo *lock;
1607
Ronnie Sahlberg96457592018-10-04 09:24:38 +10001608 lock = cifs_lock_init(flock->fl_start, length, type,
1609 flock->fl_flags);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001610 if (!lock)
1611 return -ENOMEM;
1612
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001613 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001614 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001615 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001616 return rc;
1617 }
1618 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001619 goto out;
1620
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001621 /*
1622 * Windows 7 server can delay breaking lease from read to None
1623 * if we set a byte-range lock on a file - break it explicitly
1624 * before sending the lock to the server to be sure the next
1625 * read won't conflict with non-overlapted locks due to
1626 * pagereading.
1627 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001628 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1629 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001630 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001631 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1632 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001633 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001634 }
1635
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001636 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1637 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001638 if (rc) {
1639 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001640 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001641 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001642
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001643 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001644 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001645 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001646
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001647out:
Chengyu Song00b8c952015-03-24 20:18:49 -04001648 if (flock->fl_flags & FL_POSIX && !rc)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001649 rc = locks_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001650 return rc;
1651}
1652
1653int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1654{
1655 int rc, xid;
1656 int lock = 0, unlock = 0;
1657 bool wait_flag = false;
1658 bool posix_lck = false;
1659 struct cifs_sb_info *cifs_sb;
1660 struct cifs_tcon *tcon;
1661 struct cifsInodeInfo *cinode;
1662 struct cifsFileInfo *cfile;
1663 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001664 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001665
1666 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001667 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001668
Joe Perchesf96637b2013-05-04 22:12:25 -05001669 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1670 cmd, flock->fl_flags, flock->fl_type,
1671 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001672
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001673 cfile = (struct cifsFileInfo *)file->private_data;
1674 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001675
1676 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1677 tcon->ses->server);
Al Viro7119e222014-10-22 00:25:12 -04001678 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001679 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001680 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001681
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001682 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001683 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1684 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1685 posix_lck = true;
1686 /*
1687 * BB add code here to normalize offset and length to account for
1688 * negative length which we can not accept over the wire.
1689 */
1690 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001691 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001692 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001693 return rc;
1694 }
1695
1696 if (!lock && !unlock) {
1697 /*
1698 * if no lock or unlock then nothing to do since we do not
1699 * know what it is
1700 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001701 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001702 return -EOPNOTSUPP;
1703 }
1704
1705 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1706 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001707 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 return rc;
1709}
1710
Jeff Layton597b0272012-03-23 14:40:56 -04001711/*
1712 * update the file size (if needed) after a write. Should be called with
1713 * the inode->i_lock held
1714 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001715void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001716cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1717 unsigned int bytes_written)
1718{
1719 loff_t end_of_write = offset + bytes_written;
1720
1721 if (end_of_write > cifsi->server_eof)
1722 cifsi->server_eof = end_of_write;
1723}
1724
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001725static ssize_t
1726cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1727 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728{
1729 int rc = 0;
1730 unsigned int bytes_written = 0;
1731 unsigned int total_written;
1732 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001733 struct cifs_tcon *tcon;
1734 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001735 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001736 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001737 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001738 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
Jeff Layton7da4b492010-10-15 15:34:00 -04001740 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
Al Viro35c265e2014-08-19 20:25:34 -04001742 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1743 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001745 tcon = tlink_tcon(open_file->tlink);
1746 server = tcon->ses->server;
1747
1748 if (!server->ops->sync_write)
1749 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001750
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001751 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 for (total_written = 0; write_size > total_written;
1754 total_written += bytes_written) {
1755 rc = -EAGAIN;
1756 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001757 struct kvec iov[2];
1758 unsigned int len;
1759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 /* we could deadlock if we called
1762 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001763 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001765 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 if (rc != 0)
1767 break;
1768 }
Steve French3e844692005-10-03 13:37:24 -07001769
David Howells2b0143b2015-03-17 22:25:59 +00001770 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001771 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001772 /* iov[0] is reserved for smb header */
1773 iov[1].iov_base = (char *)write_data + total_written;
1774 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001775 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001776 io_parms.tcon = tcon;
1777 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001778 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001779 rc = server->ops->sync_write(xid, &open_file->fid,
1780 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 }
1782 if (rc || (bytes_written == 0)) {
1783 if (total_written)
1784 break;
1785 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001786 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 return rc;
1788 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001789 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001790 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001791 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001792 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001793 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001794 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 }
1796
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001797 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
Jeff Layton7da4b492010-10-15 15:34:00 -04001799 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001800 spin_lock(&d_inode(dentry)->i_lock);
1801 if (*offset > d_inode(dentry)->i_size)
1802 i_size_write(d_inode(dentry), *offset);
1803 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 }
David Howells2b0143b2015-03-17 22:25:59 +00001805 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001806 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 return total_written;
1808}
1809
Jeff Layton6508d902010-09-29 19:51:11 -04001810struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1811 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001812{
1813 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001814 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001815 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001816
1817 /* only filter by fsuid on multiuser mounts */
1818 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1819 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001820
Steve French3afca262016-09-22 18:58:16 -05001821 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001822 /* we could simply get the first_list_entry since write-only entries
1823 are always at the end of the list but since the first entry might
1824 have a close pending, we go through the whole list */
1825 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001826 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001827 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001828 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001829 if (!open_file->invalidHandle) {
1830 /* found a good file */
1831 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001832 cifsFileInfo_get(open_file);
1833 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001834 return open_file;
1835 } /* else might as well continue, and look for
1836 another, or simply have the caller reopen it
1837 again rather than trying to fix this handle */
1838 } else /* write only file */
1839 break; /* write only files are last so must be done */
1840 }
Steve French3afca262016-09-22 18:58:16 -05001841 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001842 return NULL;
1843}
Steve French630f3f0c2007-10-25 21:17:17 +00001844
Jeff Layton6508d902010-09-29 19:51:11 -04001845struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1846 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001847{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001848 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001849 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001850 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001851 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001852 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001853 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001854
Steve French60808232006-04-22 15:53:05 +00001855 /* Having a null inode here (because mapping->host was set to zero by
1856 the VFS or MM) should not happen but we had reports of on oops (due to
1857 it being zero) during stress testcases so we need to check for it */
1858
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001859 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001860 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001861 dump_stack();
1862 return NULL;
1863 }
1864
Jeff Laytond3892292010-11-02 16:22:50 -04001865 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001866 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001867
Jeff Layton6508d902010-09-29 19:51:11 -04001868 /* only filter by fsuid on multiuser mounts */
1869 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1870 fsuid_only = false;
1871
Steve French3afca262016-09-22 18:58:16 -05001872 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001873refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001874 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001875 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001876 return NULL;
1877 }
Steve French6148a742005-10-05 12:23:19 -07001878 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001879 if (!any_available && open_file->pid != current->tgid)
1880 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001881 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001882 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001883 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001884 if (!open_file->invalidHandle) {
1885 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001886 cifsFileInfo_get(open_file);
1887 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001888 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001889 } else {
1890 if (!inv_file)
1891 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001892 }
Steve French6148a742005-10-05 12:23:19 -07001893 }
1894 }
Jeff Layton2846d382008-09-22 21:33:33 -04001895 /* couldn't find useable FH with same pid, try any available */
1896 if (!any_available) {
1897 any_available = true;
1898 goto refind_writable;
1899 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001900
1901 if (inv_file) {
1902 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001903 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001904 }
1905
Steve French3afca262016-09-22 18:58:16 -05001906 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001907
1908 if (inv_file) {
1909 rc = cifs_reopen_file(inv_file, false);
1910 if (!rc)
1911 return inv_file;
1912 else {
Steve French3afca262016-09-22 18:58:16 -05001913 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001914 list_move_tail(&inv_file->flist,
1915 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001916 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001917 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001918 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001919 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001920 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001921 goto refind_writable;
1922 }
1923 }
1924
Steve French6148a742005-10-05 12:23:19 -07001925 return NULL;
1926}
1927
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1929{
1930 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001931 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 char *write_data;
1933 int rc = -EFAULT;
1934 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001936 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938 if (!mapping || !mapping->host)
1939 return -EFAULT;
1940
1941 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
1943 offset += (loff_t)from;
1944 write_data = kmap(page);
1945 write_data += from;
1946
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001947 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 kunmap(page);
1949 return -EIO;
1950 }
1951
1952 /* racing with truncate? */
1953 if (offset > mapping->host->i_size) {
1954 kunmap(page);
1955 return 0; /* don't care */
1956 }
1957
1958 /* check to make sure that we are not extending the file */
1959 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001960 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
Jeff Layton6508d902010-09-29 19:51:11 -04001962 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001963 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001964 bytes_written = cifs_write(open_file, open_file->pid,
1965 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001966 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001968 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001969 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001970 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001971 else if (bytes_written < 0)
1972 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001973 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001974 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 rc = -EIO;
1976 }
1977
1978 kunmap(page);
1979 return rc;
1980}
1981
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001982static struct cifs_writedata *
1983wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1984 pgoff_t end, pgoff_t *index,
1985 unsigned int *found_pages)
1986{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001987 struct cifs_writedata *wdata;
1988
1989 wdata = cifs_writedata_alloc((unsigned int)tofind,
1990 cifs_writev_complete);
1991 if (!wdata)
1992 return NULL;
1993
Jan Kara9c19a9c2017-11-15 17:35:26 -08001994 *found_pages = find_get_pages_range_tag(mapping, index, end,
1995 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001996 return wdata;
1997}
1998
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04001999static unsigned int
2000wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2001 struct address_space *mapping,
2002 struct writeback_control *wbc,
2003 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2004{
2005 unsigned int nr_pages = 0, i;
2006 struct page *page;
2007
2008 for (i = 0; i < found_pages; i++) {
2009 page = wdata->pages[i];
2010 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002011 * At this point we hold neither the i_pages lock nor the
2012 * page lock: the page may be truncated or invalidated
2013 * (changing page->mapping to NULL), or even swizzled
2014 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002015 */
2016
2017 if (nr_pages == 0)
2018 lock_page(page);
2019 else if (!trylock_page(page))
2020 break;
2021
2022 if (unlikely(page->mapping != mapping)) {
2023 unlock_page(page);
2024 break;
2025 }
2026
2027 if (!wbc->range_cyclic && page->index > end) {
2028 *done = true;
2029 unlock_page(page);
2030 break;
2031 }
2032
2033 if (*next && (page->index != *next)) {
2034 /* Not next consecutive page */
2035 unlock_page(page);
2036 break;
2037 }
2038
2039 if (wbc->sync_mode != WB_SYNC_NONE)
2040 wait_on_page_writeback(page);
2041
2042 if (PageWriteback(page) ||
2043 !clear_page_dirty_for_io(page)) {
2044 unlock_page(page);
2045 break;
2046 }
2047
2048 /*
2049 * This actually clears the dirty bit in the radix tree.
2050 * See cifs_writepage() for more commentary.
2051 */
2052 set_page_writeback(page);
2053 if (page_offset(page) >= i_size_read(mapping->host)) {
2054 *done = true;
2055 unlock_page(page);
2056 end_page_writeback(page);
2057 break;
2058 }
2059
2060 wdata->pages[i] = page;
2061 *next = page->index + 1;
2062 ++nr_pages;
2063 }
2064
2065 /* reset index to refind any pages skipped */
2066 if (nr_pages == 0)
2067 *index = wdata->pages[0]->index + 1;
2068
2069 /* put any pages we aren't going to use */
2070 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002071 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002072 wdata->pages[i] = NULL;
2073 }
2074
2075 return nr_pages;
2076}
2077
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002078static int
2079wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2080 struct address_space *mapping, struct writeback_control *wbc)
2081{
2082 int rc = 0;
2083 struct TCP_Server_Info *server;
2084 unsigned int i;
2085
2086 wdata->sync_mode = wbc->sync_mode;
2087 wdata->nr_pages = nr_pages;
2088 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002089 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002090 wdata->tailsz = min(i_size_read(mapping->host) -
2091 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002092 (loff_t)PAGE_SIZE);
2093 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002094
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002095 if (wdata->cfile != NULL)
2096 cifsFileInfo_put(wdata->cfile);
2097 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2098 if (!wdata->cfile) {
2099 cifs_dbg(VFS, "No writable handles for inode\n");
2100 rc = -EBADF;
2101 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002102 wdata->pid = wdata->cfile->pid;
2103 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2104 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002105 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002106
2107 for (i = 0; i < nr_pages; ++i)
2108 unlock_page(wdata->pages[i]);
2109
2110 return rc;
2111}
2112
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002114 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002116 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002117 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002118 bool done = false, scanned = false, range_whole = false;
2119 pgoff_t end, index;
2120 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002121 int rc = 0;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002122 int saved_rc = 0;
Steve French0cb012d2018-10-11 01:01:02 -05002123 unsigned int xid;
Steve French50c2f752007-07-13 00:33:32 +00002124
Steve French37c0eb42005-10-05 14:50:29 -07002125 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002126 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002127 * one page at a time via cifs_writepage
2128 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002129 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002130 return generic_writepages(mapping, wbc);
2131
Steve French0cb012d2018-10-11 01:01:02 -05002132 xid = get_xid();
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002133 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002134 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002135 end = -1;
2136 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002137 index = wbc->range_start >> PAGE_SHIFT;
2138 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002139 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002140 range_whole = true;
2141 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002142 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002143 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002144retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002145 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002146 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002147 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002148
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002149 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2150 &wsize, &credits);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002151 if (rc != 0) {
2152 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002153 break;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002154 }
Steve French37c0eb42005-10-05 14:50:29 -07002155
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002156 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002157
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002158 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2159 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002160 if (!wdata) {
2161 rc = -ENOMEM;
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002162 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002163 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002164 break;
2165 }
2166
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002167 if (found_pages == 0) {
2168 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002169 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002170 break;
2171 }
2172
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002173 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2174 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002175
2176 /* nothing to write? */
2177 if (nr_pages == 0) {
2178 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002179 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002180 continue;
2181 }
2182
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002183 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002184
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002185 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002186
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002187 /* send failure -- clean up the mess */
2188 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002189 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002190 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002191 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002192 redirty_page_for_writepage(wbc,
2193 wdata->pages[i]);
2194 else
2195 SetPageError(wdata->pages[i]);
2196 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002197 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002198 }
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002199 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002200 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002201 }
2202 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002203
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002204 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2205 index = saved_index;
2206 continue;
2207 }
2208
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002209 /* Return immediately if we received a signal during writing */
2210 if (is_interrupt_error(rc)) {
2211 done = true;
2212 break;
2213 }
2214
2215 if (rc != 0 && saved_rc == 0)
2216 saved_rc = rc;
2217
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002218 wbc->nr_to_write -= nr_pages;
2219 if (wbc->nr_to_write <= 0)
2220 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002221
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002222 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002223 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002224
Steve French37c0eb42005-10-05 14:50:29 -07002225 if (!scanned && !done) {
2226 /*
2227 * We hit the last page and there is more work to be done: wrap
2228 * back to the start of the file
2229 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002230 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002231 index = 0;
2232 goto retry;
2233 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002234
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002235 if (saved_rc != 0)
2236 rc = saved_rc;
2237
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002238 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002239 mapping->writeback_index = index;
2240
Steve French0cb012d2018-10-11 01:01:02 -05002241 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 return rc;
2243}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002245static int
2246cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002248 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002249 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002251 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002253 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002254 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002255 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002256
2257 /*
2258 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2259 *
2260 * A writepage() implementation always needs to do either this,
2261 * or re-dirty the page with "redirty_page_for_writepage()" in
2262 * the case of a failure.
2263 *
2264 * Just unlocking the page will cause the radix tree tag-bits
2265 * to fail to update with the state of the page correctly.
2266 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002267 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002268retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002269 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9a663962019-01-08 11:15:28 -08002270 if (is_retryable_error(rc)) {
2271 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002272 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002273 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002274 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002275 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002276 mapping_set_error(page->mapping, rc);
2277 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002278 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002279 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002280 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002281 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002282 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 return rc;
2284}
2285
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002286static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2287{
2288 int rc = cifs_writepage_locked(page, wbc);
2289 unlock_page(page);
2290 return rc;
2291}
2292
Nick Piggind9414772008-09-24 11:32:59 -04002293static int cifs_write_end(struct file *file, struct address_space *mapping,
2294 loff_t pos, unsigned len, unsigned copied,
2295 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296{
Nick Piggind9414772008-09-24 11:32:59 -04002297 int rc;
2298 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002299 struct cifsFileInfo *cfile = file->private_data;
2300 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2301 __u32 pid;
2302
2303 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2304 pid = cfile->pid;
2305 else
2306 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Joe Perchesf96637b2013-05-04 22:12:25 -05002308 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002309 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002310
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002311 if (PageChecked(page)) {
2312 if (copied == len)
2313 SetPageUptodate(page);
2314 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002315 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002316 SetPageUptodate(page);
2317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002319 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002320 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002321 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002322
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002323 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 /* this is probably better than directly calling
2325 partialpage_write since in this function the file handle is
2326 known which we might as well leverage */
2327 /* BB check if anything else missing out of ppw
2328 such as updating last write time */
2329 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002330 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002331 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002333
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002334 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002335 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002336 rc = copied;
2337 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002338 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 }
2340
Nick Piggind9414772008-09-24 11:32:59 -04002341 if (rc > 0) {
2342 spin_lock(&inode->i_lock);
2343 if (pos > inode->i_size)
2344 i_size_write(inode, pos);
2345 spin_unlock(&inode->i_lock);
2346 }
2347
2348 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002349 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002350
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 return rc;
2352}
2353
Josef Bacik02c24a82011-07-16 20:44:56 -04002354int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2355 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002357 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002359 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002360 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002361 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002362 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002363 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002365 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002366 if (rc)
2367 return rc;
Al Viro59551022016-01-22 15:40:57 -05002368 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002369
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002370 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
Al Viro35c265e2014-08-19 20:25:34 -04002372 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2373 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002374
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002375 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002376 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002377 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002378 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002379 rc = 0; /* don't care about it in fsync */
2380 }
2381 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002382
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002383 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002384 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2385 server = tcon->ses->server;
2386 if (server->ops->flush)
2387 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2388 else
2389 rc = -ENOSYS;
2390 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002391
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002392 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002393 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002394 return rc;
2395}
2396
Josef Bacik02c24a82011-07-16 20:44:56 -04002397int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002398{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002399 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002400 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002401 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002402 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002403 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002404 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002405 struct inode *inode = file->f_mapping->host;
2406
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002407 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002408 if (rc)
2409 return rc;
Al Viro59551022016-01-22 15:40:57 -05002410 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002411
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002412 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002413
Al Viro35c265e2014-08-19 20:25:34 -04002414 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2415 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002416
2417 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002418 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2419 server = tcon->ses->server;
2420 if (server->ops->flush)
2421 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2422 else
2423 rc = -ENOSYS;
2424 }
Steve Frenchb298f222009-02-21 21:17:43 +00002425
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002426 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002427 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 return rc;
2429}
2430
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431/*
2432 * As file closes, flush all cached write data for this inode checking
2433 * for write behind errors.
2434 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002435int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436{
Al Viro496ad9a2013-01-23 17:07:38 -05002437 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 int rc = 0;
2439
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002440 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002441 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002442
Joe Perchesf96637b2013-05-04 22:12:25 -05002443 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
2445 return rc;
2446}
2447
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002448static int
2449cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2450{
2451 int rc = 0;
2452 unsigned long i;
2453
2454 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002455 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002456 if (!pages[i]) {
2457 /*
2458 * save number of pages we have already allocated and
2459 * return with ENOMEM error
2460 */
2461 num_pages = i;
2462 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002463 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002464 }
2465 }
2466
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002467 if (rc) {
2468 for (i = 0; i < num_pages; i++)
2469 put_page(pages[i]);
2470 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002471 return rc;
2472}
2473
2474static inline
2475size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2476{
2477 size_t num_pages;
2478 size_t clen;
2479
2480 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002481 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002482
2483 if (cur_len)
2484 *cur_len = clen;
2485
2486 return num_pages;
2487}
2488
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002489static void
Steve French4a5c80d2014-02-07 20:45:12 -06002490cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002491{
2492 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002493 struct cifs_writedata *wdata = container_of(refcount,
2494 struct cifs_writedata, refcount);
2495
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002496 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002497 for (i = 0; i < wdata->nr_pages; i++)
2498 put_page(wdata->pages[i]);
2499 cifs_writedata_release(refcount);
2500}
2501
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002502static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2503
Steve French4a5c80d2014-02-07 20:45:12 -06002504static void
2505cifs_uncached_writev_complete(struct work_struct *work)
2506{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002507 struct cifs_writedata *wdata = container_of(work,
2508 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002509 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002510 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2511
2512 spin_lock(&inode->i_lock);
2513 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2514 if (cifsi->server_eof > inode->i_size)
2515 i_size_write(inode, cifsi->server_eof);
2516 spin_unlock(&inode->i_lock);
2517
2518 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002519 collect_uncached_write_data(wdata->ctx);
2520 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002521 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002522}
2523
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002524static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002525wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2526 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002527{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002528 size_t save_len, copied, bytes, cur_len = *len;
2529 unsigned long i, nr_pages = *num_pages;
2530
2531 save_len = cur_len;
2532 for (i = 0; i < nr_pages; i++) {
2533 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2534 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2535 cur_len -= copied;
2536 /*
2537 * If we didn't copy as much as we expected, then that
2538 * may mean we trod into an unmapped area. Stop copying
2539 * at that point. On the next pass through the big
2540 * loop, we'll likely end up getting a zero-length
2541 * write and bailing out of it.
2542 */
2543 if (copied < bytes)
2544 break;
2545 }
2546 cur_len = save_len - cur_len;
2547 *len = cur_len;
2548
2549 /*
2550 * If we have no data to send, then that probably means that
2551 * the copy above failed altogether. That's most likely because
2552 * the address in the iovec was bogus. Return -EFAULT and let
2553 * the caller free anything we allocated and bail out.
2554 */
2555 if (!cur_len)
2556 return -EFAULT;
2557
2558 /*
2559 * i + 1 now represents the number of pages we actually used in
2560 * the copy phase above.
2561 */
2562 *num_pages = i + 1;
2563 return 0;
2564}
2565
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002566static int
Long Li8c5f9c12018-10-31 22:13:10 +00002567cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
2568 struct cifs_aio_ctx *ctx)
2569{
Long Li8c5f9c12018-10-31 22:13:10 +00002570 unsigned int wsize, credits;
2571 int rc;
2572 struct TCP_Server_Info *server =
2573 tlink_tcon(wdata->cfile->tlink)->ses->server;
2574
2575 /*
Long Li6ac79292018-12-06 04:51:06 +00002576 * Wait for credits to resend this wdata.
Long Li8c5f9c12018-10-31 22:13:10 +00002577 * Note: we are attempting to resend the whole wdata not in segments
2578 */
2579 do {
2580 rc = server->ops->wait_mtu_credits(
2581 server, wdata->bytes, &wsize, &credits);
2582
2583 if (rc)
Long Li6ac79292018-12-06 04:51:06 +00002584 goto out;
Long Li8c5f9c12018-10-31 22:13:10 +00002585
2586 if (wsize < wdata->bytes) {
2587 add_credits_and_wake_if(server, credits, 0);
2588 msleep(1000);
Long Li8c5f9c12018-10-31 22:13:10 +00002589 }
Long Li6ac79292018-12-06 04:51:06 +00002590 } while (wsize < wdata->bytes);
Long Li8c5f9c12018-10-31 22:13:10 +00002591
2592 rc = -EAGAIN;
2593 while (rc == -EAGAIN) {
2594 rc = 0;
2595 if (wdata->cfile->invalidHandle)
2596 rc = cifs_reopen_file(wdata->cfile, false);
2597 if (!rc)
2598 rc = server->ops->async_writev(wdata,
2599 cifs_uncached_writedata_release);
2600 }
2601
2602 if (!rc) {
2603 list_add_tail(&wdata->list, wdata_list);
2604 return 0;
2605 }
2606
2607 add_credits_and_wake_if(server, wdata->credits, 0);
2608out:
2609 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2610
2611 return rc;
2612}
2613
2614static int
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002615cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2616 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002617 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2618 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002619{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002620 int rc = 0;
2621 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002622 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002623 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002624 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002625 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002626 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002627 struct TCP_Server_Info *server;
Long Li8c5f9c12018-10-31 22:13:10 +00002628 struct page **pagevec;
2629 size_t start;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002630
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2632 pid = open_file->pid;
2633 else
2634 pid = current->tgid;
2635
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002636 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002637
2638 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002639 unsigned int wsize, credits;
2640
2641 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2642 &wsize, &credits);
2643 if (rc)
2644 break;
2645
Long Lib6bc8a72018-12-16 23:17:04 +00002646 cur_len = min_t(const size_t, len, wsize);
2647
Long Li8c5f9c12018-10-31 22:13:10 +00002648 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05002649 ssize_t result;
2650
2651 result = iov_iter_get_pages_alloc(
Long Lib6bc8a72018-12-16 23:17:04 +00002652 from, &pagevec, cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05002653 if (result < 0) {
Long Li8c5f9c12018-10-31 22:13:10 +00002654 cifs_dbg(VFS,
2655 "direct_writev couldn't get user pages "
2656 "(rc=%zd) iter type %d iov_offset %zd "
2657 "count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05002658 result, from->type,
Long Li8c5f9c12018-10-31 22:13:10 +00002659 from->iov_offset, from->count);
2660 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00002661
2662 rc = result;
2663 add_credits_and_wake_if(server, credits, 0);
Long Li8c5f9c12018-10-31 22:13:10 +00002664 break;
2665 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05002666 cur_len = (size_t)result;
Long Li8c5f9c12018-10-31 22:13:10 +00002667 iov_iter_advance(from, cur_len);
2668
2669 nr_pages =
2670 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
2671
2672 wdata = cifs_writedata_direct_alloc(pagevec,
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002673 cifs_uncached_writev_complete);
Long Li8c5f9c12018-10-31 22:13:10 +00002674 if (!wdata) {
2675 rc = -ENOMEM;
2676 add_credits_and_wake_if(server, credits, 0);
2677 break;
2678 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002679
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002680
Long Li8c5f9c12018-10-31 22:13:10 +00002681 wdata->page_offset = start;
2682 wdata->tailsz =
2683 nr_pages > 1 ?
2684 cur_len - (PAGE_SIZE - start) -
2685 (nr_pages - 2) * PAGE_SIZE :
2686 cur_len;
2687 } else {
2688 nr_pages = get_numpages(wsize, len, &cur_len);
2689 wdata = cifs_writedata_alloc(nr_pages,
2690 cifs_uncached_writev_complete);
2691 if (!wdata) {
2692 rc = -ENOMEM;
2693 add_credits_and_wake_if(server, credits, 0);
2694 break;
2695 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002696
Long Li8c5f9c12018-10-31 22:13:10 +00002697 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2698 if (rc) {
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002699 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002700 kfree(wdata);
2701 add_credits_and_wake_if(server, credits, 0);
2702 break;
2703 }
2704
2705 num_pages = nr_pages;
2706 rc = wdata_fill_from_iovec(
2707 wdata, from, &cur_len, &num_pages);
2708 if (rc) {
2709 for (i = 0; i < nr_pages; i++)
2710 put_page(wdata->pages[i]);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08002711 kvfree(wdata->pages);
Long Li8c5f9c12018-10-31 22:13:10 +00002712 kfree(wdata);
2713 add_credits_and_wake_if(server, credits, 0);
2714 break;
2715 }
2716
2717 /*
2718 * Bring nr_pages down to the number of pages we
2719 * actually used, and free any pages that we didn't use.
2720 */
2721 for ( ; nr_pages > num_pages; nr_pages--)
2722 put_page(wdata->pages[nr_pages - 1]);
2723
2724 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2725 }
Jeff Layton5d81de82014-02-14 07:20:35 -05002726
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002727 wdata->sync_mode = WB_SYNC_ALL;
2728 wdata->nr_pages = nr_pages;
2729 wdata->offset = (__u64)offset;
2730 wdata->cfile = cifsFileInfo_get(open_file);
2731 wdata->pid = pid;
2732 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002733 wdata->pagesz = PAGE_SIZE;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002734 wdata->credits = credits;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002735 wdata->ctx = ctx;
2736 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002737
2738 if (!wdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01002739 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002740 rc = server->ops->async_writev(wdata,
2741 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002742 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002743 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002744 kref_put(&wdata->refcount,
2745 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002746 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002747 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002748 iov_iter_advance(from, offset - saved_offset);
2749 continue;
2750 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002751 break;
2752 }
2753
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002754 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002755 offset += cur_len;
2756 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002757 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002758
2759 return rc;
2760}
2761
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002762static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2763{
2764 struct cifs_writedata *wdata, *tmp;
2765 struct cifs_tcon *tcon;
2766 struct cifs_sb_info *cifs_sb;
2767 struct dentry *dentry = ctx->cfile->dentry;
2768 unsigned int i;
2769 int rc;
2770
2771 tcon = tlink_tcon(ctx->cfile->tlink);
2772 cifs_sb = CIFS_SB(dentry->d_sb);
2773
2774 mutex_lock(&ctx->aio_mutex);
2775
2776 if (list_empty(&ctx->list)) {
2777 mutex_unlock(&ctx->aio_mutex);
2778 return;
2779 }
2780
2781 rc = ctx->rc;
2782 /*
2783 * Wait for and collect replies for any successful sends in order of
2784 * increasing offset. Once an error is hit, then return without waiting
2785 * for any more replies.
2786 */
2787restart_loop:
2788 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2789 if (!rc) {
2790 if (!try_wait_for_completion(&wdata->done)) {
2791 mutex_unlock(&ctx->aio_mutex);
2792 return;
2793 }
2794
2795 if (wdata->result)
2796 rc = wdata->result;
2797 else
2798 ctx->total_len += wdata->bytes;
2799
2800 /* resend call if it's a retryable error */
2801 if (rc == -EAGAIN) {
2802 struct list_head tmp_list;
2803 struct iov_iter tmp_from = ctx->iter;
2804
2805 INIT_LIST_HEAD(&tmp_list);
2806 list_del_init(&wdata->list);
2807
Long Li8c5f9c12018-10-31 22:13:10 +00002808 if (ctx->direct_io)
2809 rc = cifs_resend_wdata(
2810 wdata, &tmp_list, ctx);
2811 else {
2812 iov_iter_advance(&tmp_from,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002813 wdata->offset - ctx->pos);
2814
Long Li8c5f9c12018-10-31 22:13:10 +00002815 rc = cifs_write_from_iter(wdata->offset,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002816 wdata->bytes, &tmp_from,
2817 ctx->cfile, cifs_sb, &tmp_list,
2818 ctx);
Long Li8c5f9c12018-10-31 22:13:10 +00002819 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002820
2821 list_splice(&tmp_list, &ctx->list);
2822
2823 kref_put(&wdata->refcount,
2824 cifs_uncached_writedata_release);
2825 goto restart_loop;
2826 }
2827 }
2828 list_del_init(&wdata->list);
2829 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2830 }
2831
Long Li8c5f9c12018-10-31 22:13:10 +00002832 if (!ctx->direct_io)
2833 for (i = 0; i < ctx->npages; i++)
2834 put_page(ctx->bv[i].bv_page);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002835
2836 cifs_stats_bytes_written(tcon, ctx->total_len);
2837 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2838
2839 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2840
2841 mutex_unlock(&ctx->aio_mutex);
2842
2843 if (ctx->iocb && ctx->iocb->ki_complete)
2844 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2845 else
2846 complete(&ctx->done);
2847}
2848
Long Li8c5f9c12018-10-31 22:13:10 +00002849static ssize_t __cifs_writev(
2850 struct kiocb *iocb, struct iov_iter *from, bool direct)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002851{
Al Viroe9d15932015-04-06 22:44:11 -04002852 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002853 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002854 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002855 struct cifs_tcon *tcon;
2856 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002857 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04002858 struct iov_iter saved_from = *from;
Long Li8c5f9c12018-10-31 22:13:10 +00002859 size_t len = iov_iter_count(from);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002860 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002861
Al Viroe9d15932015-04-06 22:44:11 -04002862 /*
Long Li8c5f9c12018-10-31 22:13:10 +00002863 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
2864 * In this case, fall back to non-direct write function.
2865 * this could be improved by getting pages directly in ITER_KVEC
Al Viroe9d15932015-04-06 22:44:11 -04002866 */
Long Li8c5f9c12018-10-31 22:13:10 +00002867 if (direct && from->type & ITER_KVEC) {
2868 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
2869 direct = false;
2870 }
Al Viroe9d15932015-04-06 22:44:11 -04002871
Al Viro3309dd02015-04-09 12:55:47 -04002872 rc = generic_write_checks(iocb, from);
2873 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002874 return rc;
2875
Al Viro7119e222014-10-22 00:25:12 -04002876 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002877 cfile = file->private_data;
2878 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002879
2880 if (!tcon->ses->server->ops->async_writev)
2881 return -ENOSYS;
2882
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002883 ctx = cifs_aio_ctx_alloc();
2884 if (!ctx)
2885 return -ENOMEM;
2886
2887 ctx->cfile = cifsFileInfo_get(cfile);
2888
2889 if (!is_sync_kiocb(iocb))
2890 ctx->iocb = iocb;
2891
2892 ctx->pos = iocb->ki_pos;
2893
Long Li8c5f9c12018-10-31 22:13:10 +00002894 if (direct) {
2895 ctx->direct_io = true;
2896 ctx->iter = *from;
2897 ctx->len = len;
2898 } else {
2899 rc = setup_aio_ctx_iter(ctx, from, WRITE);
2900 if (rc) {
2901 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2902 return rc;
2903 }
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002904 }
2905
2906 /* grab a lock here due to read response handlers can access ctx */
2907 mutex_lock(&ctx->aio_mutex);
2908
2909 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
2910 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002911
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002912 /*
2913 * If at least one write was successfully sent, then discard any rc
2914 * value from the later writes. If the other write succeeds, then
2915 * we'll end up returning whatever was written. If it fails, then
2916 * we'll get a new rc value from that.
2917 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002918 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002919 rc = 0;
2920
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002921 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002922
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002923 if (rc) {
2924 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2925 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002926 }
2927
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002928 if (!is_sync_kiocb(iocb)) {
2929 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2930 return -EIOCBQUEUED;
2931 }
2932
2933 rc = wait_for_completion_killable(&ctx->done);
2934 if (rc) {
2935 mutex_lock(&ctx->aio_mutex);
2936 ctx->rc = rc = -EINTR;
2937 total_written = ctx->total_len;
2938 mutex_unlock(&ctx->aio_mutex);
2939 } else {
2940 rc = ctx->rc;
2941 total_written = ctx->total_len;
2942 }
2943
2944 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2945
Al Viroe9d15932015-04-06 22:44:11 -04002946 if (unlikely(!total_written))
2947 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002948
Al Viroe9d15932015-04-06 22:44:11 -04002949 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04002950 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002951}
2952
Long Li8c5f9c12018-10-31 22:13:10 +00002953ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
2954{
2955 return __cifs_writev(iocb, from, true);
2956}
2957
2958ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
2959{
2960 return __cifs_writev(iocb, from, false);
2961}
2962
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002963static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002964cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002965{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002966 struct file *file = iocb->ki_filp;
2967 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2968 struct inode *inode = file->f_mapping->host;
2969 struct cifsInodeInfo *cinode = CIFS_I(inode);
2970 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002971 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002972
Rabin Vincent966681c2017-06-29 16:01:42 +02002973 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002974 /*
2975 * We need to hold the sem to be sure nobody modifies lock list
2976 * with a brlock that prevents writing.
2977 */
2978 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04002979
Al Viro3309dd02015-04-09 12:55:47 -04002980 rc = generic_write_checks(iocb, from);
2981 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002982 goto out;
2983
Al Viro5f380c72015-04-07 11:28:12 -04002984 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Ronnie Sahlberg96457592018-10-04 09:24:38 +10002985 server->vals->exclusive_lock_type, 0,
2986 NULL, CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002987 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002988 else
2989 rc = -EACCES;
2990out:
Rabin Vincent966681c2017-06-29 16:01:42 +02002991 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002992 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002993
Christoph Hellwige2592212016-04-07 08:52:01 -07002994 if (rc > 0)
2995 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002996 return rc;
2997}
2998
2999ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04003000cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003001{
Al Viro496ad9a2013-01-23 17:07:38 -05003002 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003003 struct cifsInodeInfo *cinode = CIFS_I(inode);
3004 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3005 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3006 iocb->ki_filp->private_data;
3007 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003008 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003009
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003010 written = cifs_get_writer(cinode);
3011 if (written)
3012 return written;
3013
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003014 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003015 if (cap_unix(tcon->ses) &&
3016 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003017 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04003018 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003019 goto out;
3020 }
Al Viro3dae8752014-04-03 12:05:17 -04003021 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003022 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04003023 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04003024 /*
3025 * For non-oplocked files in strict cache mode we need to write the data
3026 * to the server exactly from the pos to pos+len-1 rather than flush all
3027 * affected pages because it may cause a error with mandatory locks on
3028 * these pages but not on the region from pos to ppos+len-1.
3029 */
Al Viro3dae8752014-04-03 12:05:17 -04003030 written = cifs_user_writev(iocb, from);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003031 if (written > 0 && CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003032 /*
3033 * Windows 7 server can delay breaking level2 oplock if a write
3034 * request comes - break it on the client to prevent reading
3035 * an old data.
3036 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003037 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003038 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
3039 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003040 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003041 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003042out:
3043 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04003044 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05003045}
3046
Jeff Layton0471ca32012-05-16 07:13:16 -04003047static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07003048cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04003049{
3050 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003051
Long Lif9f5aca2018-05-30 12:47:54 -07003052 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04003053 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07003054 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04003055 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003056 INIT_LIST_HEAD(&rdata->list);
3057 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04003058 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04003059 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07003060
Jeff Layton0471ca32012-05-16 07:13:16 -04003061 return rdata;
3062}
3063
Long Lif9f5aca2018-05-30 12:47:54 -07003064static struct cifs_readdata *
3065cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3066{
3067 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07003068 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07003069 struct cifs_readdata *ret = NULL;
3070
3071 if (pages) {
3072 ret = cifs_readdata_direct_alloc(pages, complete);
3073 if (!ret)
3074 kfree(pages);
3075 }
3076
3077 return ret;
3078}
3079
Jeff Layton6993f742012-05-16 07:13:17 -04003080void
3081cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04003082{
Jeff Layton6993f742012-05-16 07:13:17 -04003083 struct cifs_readdata *rdata = container_of(refcount,
3084 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07003085#ifdef CONFIG_CIFS_SMB_DIRECT
3086 if (rdata->mr) {
3087 smbd_deregister_mr(rdata->mr);
3088 rdata->mr = NULL;
3089 }
3090#endif
Jeff Layton6993f742012-05-16 07:13:17 -04003091 if (rdata->cfile)
3092 cifsFileInfo_put(rdata->cfile);
3093
Long Lif9f5aca2018-05-30 12:47:54 -07003094 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003095 kfree(rdata);
3096}
3097
Jeff Layton2a1bb132012-05-16 07:13:17 -04003098static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003099cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003100{
3101 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003102 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003103 unsigned int i;
3104
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003105 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003106 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3107 if (!page) {
3108 rc = -ENOMEM;
3109 break;
3110 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003111 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003112 }
3113
3114 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003115 for (i = 0; i < nr_pages; i++) {
3116 put_page(rdata->pages[i]);
3117 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003118 }
3119 }
3120 return rc;
3121}
3122
3123static void
3124cifs_uncached_readdata_release(struct kref *refcount)
3125{
Jeff Layton1c892542012-05-16 07:13:17 -04003126 struct cifs_readdata *rdata = container_of(refcount,
3127 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003128 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003129
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003130 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003131 for (i = 0; i < rdata->nr_pages; i++) {
3132 put_page(rdata->pages[i]);
Jeff Layton1c892542012-05-16 07:13:17 -04003133 }
3134 cifs_readdata_release(refcount);
3135}
3136
Jeff Layton1c892542012-05-16 07:13:17 -04003137/**
3138 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3139 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003140 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003141 *
3142 * This function copies data from a list of pages in a readdata response into
3143 * an array of iovecs. It will first calculate where the data should go
3144 * based on the info in the readdata and then copy the data into that spot.
3145 */
Al Viro7f25bba2014-02-04 14:07:43 -05003146static int
3147cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003148{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003149 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003150 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003151
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003152 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003153 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003154 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003155 size_t written;
3156
David Howells00e23702018-10-22 13:07:28 +01003157 if (unlikely(iov_iter_is_pipe(iter))) {
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003158 void *addr = kmap_atomic(page);
3159
3160 written = copy_to_iter(addr, copy, iter);
3161 kunmap_atomic(addr);
3162 } else
3163 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003164 remaining -= written;
3165 if (written < copy && iov_iter_count(iter) > 0)
3166 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003167 }
Al Viro7f25bba2014-02-04 14:07:43 -05003168 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003169}
3170
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003171static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3172
Jeff Layton1c892542012-05-16 07:13:17 -04003173static void
3174cifs_uncached_readv_complete(struct work_struct *work)
3175{
3176 struct cifs_readdata *rdata = container_of(work,
3177 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003178
3179 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003180 collect_uncached_read_data(rdata->ctx);
3181 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003182 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3183}
3184
3185static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003186uncached_fill_pages(struct TCP_Server_Info *server,
3187 struct cifs_readdata *rdata, struct iov_iter *iter,
3188 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003189{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003190 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003191 unsigned int i;
3192 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003193 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003194
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003195 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003196 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003197 for (i = 0; i < nr_pages; i++) {
3198 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003199 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003200 unsigned int segment_size = rdata->pagesz;
3201
3202 if (i == 0)
3203 segment_size -= page_offset;
3204 else
3205 page_offset = 0;
3206
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003207
Al Viro71335662016-01-09 19:54:50 -05003208 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003209 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003210 rdata->pages[i] = NULL;
3211 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003212 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003213 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003214 }
Long Li1dbe3462018-05-30 12:47:55 -07003215
Al Viro71335662016-01-09 19:54:50 -05003216 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003217 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003218 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003219 n = segment_size;
3220 else
Al Viro71335662016-01-09 19:54:50 -05003221 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003222 len -= n;
3223
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003224 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003225 result = copy_page_from_iter(
3226 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003227#ifdef CONFIG_CIFS_SMB_DIRECT
3228 else if (rdata->mr)
3229 result = n;
3230#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003231 else
Long Li1dbe3462018-05-30 12:47:55 -07003232 result = cifs_read_page_from_socket(
3233 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003234 if (result < 0)
3235 break;
3236
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003237 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003238 }
3239
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003240 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3241 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003242}
3243
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003244static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003245cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3246 struct cifs_readdata *rdata, unsigned int len)
3247{
3248 return uncached_fill_pages(server, rdata, NULL, len);
3249}
3250
3251static int
3252cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3253 struct cifs_readdata *rdata,
3254 struct iov_iter *iter)
3255{
3256 return uncached_fill_pages(server, rdata, iter, iter->count);
3257}
3258
Long Li6e6e2b82018-10-31 22:13:09 +00003259static int cifs_resend_rdata(struct cifs_readdata *rdata,
3260 struct list_head *rdata_list,
3261 struct cifs_aio_ctx *ctx)
3262{
Long Li6e6e2b82018-10-31 22:13:09 +00003263 unsigned int rsize, credits;
3264 int rc;
3265 struct TCP_Server_Info *server =
3266 tlink_tcon(rdata->cfile->tlink)->ses->server;
3267
3268 /*
Long Li6ac79292018-12-06 04:51:06 +00003269 * Wait for credits to resend this rdata.
Long Li6e6e2b82018-10-31 22:13:09 +00003270 * Note: we are attempting to resend the whole rdata not in segments
3271 */
3272 do {
3273 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3274 &rsize, &credits);
3275
3276 if (rc)
Long Li6ac79292018-12-06 04:51:06 +00003277 goto out;
Long Li6e6e2b82018-10-31 22:13:09 +00003278
3279 if (rsize < rdata->bytes) {
3280 add_credits_and_wake_if(server, credits, 0);
3281 msleep(1000);
Long Li6e6e2b82018-10-31 22:13:09 +00003282 }
Long Li6ac79292018-12-06 04:51:06 +00003283 } while (rsize < rdata->bytes);
Long Li6e6e2b82018-10-31 22:13:09 +00003284
3285 rc = -EAGAIN;
3286 while (rc == -EAGAIN) {
3287 rc = 0;
3288 if (rdata->cfile->invalidHandle)
3289 rc = cifs_reopen_file(rdata->cfile, true);
3290 if (!rc)
3291 rc = server->ops->async_readv(rdata);
3292 }
3293
3294 if (!rc) {
3295 /* Add to aio pending list */
3296 list_add_tail(&rdata->list, rdata_list);
3297 return 0;
3298 }
3299
3300 add_credits_and_wake_if(server, rdata->credits, 0);
3301out:
3302 kref_put(&rdata->refcount,
3303 cifs_uncached_readdata_release);
3304
3305 return rc;
3306}
3307
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003308static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003309cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003310 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3311 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003313 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003314 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003315 size_t cur_len;
3316 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003317 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003318 struct TCP_Server_Info *server;
Long Li6e6e2b82018-10-31 22:13:09 +00003319 struct page **pagevec;
3320 size_t start;
3321 struct iov_iter direct_iov = ctx->iter;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003322
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003323 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003324
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003325 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3326 pid = open_file->pid;
3327 else
3328 pid = current->tgid;
3329
Long Li6e6e2b82018-10-31 22:13:09 +00003330 if (ctx->direct_io)
3331 iov_iter_advance(&direct_iov, offset - ctx->pos);
3332
Jeff Layton1c892542012-05-16 07:13:17 -04003333 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003334 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3335 &rsize, &credits);
3336 if (rc)
3337 break;
3338
3339 cur_len = min_t(const size_t, len, rsize);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003340
Long Li6e6e2b82018-10-31 22:13:09 +00003341 if (ctx->direct_io) {
Steve Frenchb98e26d2018-11-01 10:54:32 -05003342 ssize_t result;
Long Li6e6e2b82018-10-31 22:13:09 +00003343
Steve Frenchb98e26d2018-11-01 10:54:32 -05003344 result = iov_iter_get_pages_alloc(
Long Li6e6e2b82018-10-31 22:13:09 +00003345 &direct_iov, &pagevec,
3346 cur_len, &start);
Steve Frenchb98e26d2018-11-01 10:54:32 -05003347 if (result < 0) {
Long Li6e6e2b82018-10-31 22:13:09 +00003348 cifs_dbg(VFS,
Long Li54e94ff2018-12-16 22:41:07 +00003349 "couldn't get user pages (rc=%zd)"
Long Li6e6e2b82018-10-31 22:13:09 +00003350 " iter type %d"
3351 " iov_offset %zd count %zd\n",
Steve Frenchb98e26d2018-11-01 10:54:32 -05003352 result, direct_iov.type,
Long Li6e6e2b82018-10-31 22:13:09 +00003353 direct_iov.iov_offset,
3354 direct_iov.count);
3355 dump_stack();
Long Li54e94ff2018-12-16 22:41:07 +00003356
3357 rc = result;
3358 add_credits_and_wake_if(server, credits, 0);
Long Li6e6e2b82018-10-31 22:13:09 +00003359 break;
3360 }
Steve Frenchb98e26d2018-11-01 10:54:32 -05003361 cur_len = (size_t)result;
Long Li6e6e2b82018-10-31 22:13:09 +00003362 iov_iter_advance(&direct_iov, cur_len);
3363
3364 rdata = cifs_readdata_direct_alloc(
3365 pagevec, cifs_uncached_readv_complete);
3366 if (!rdata) {
3367 add_credits_and_wake_if(server, credits, 0);
3368 rc = -ENOMEM;
3369 break;
3370 }
3371
3372 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
3373 rdata->page_offset = start;
3374 rdata->tailsz = npages > 1 ?
3375 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
3376 cur_len;
3377
3378 } else {
3379
3380 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3381 /* allocate a readdata struct */
3382 rdata = cifs_readdata_alloc(npages,
Jeff Layton1c892542012-05-16 07:13:17 -04003383 cifs_uncached_readv_complete);
Long Li6e6e2b82018-10-31 22:13:09 +00003384 if (!rdata) {
3385 add_credits_and_wake_if(server, credits, 0);
3386 rc = -ENOMEM;
3387 break;
3388 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003389
Long Li6e6e2b82018-10-31 22:13:09 +00003390 rc = cifs_read_allocate_pages(rdata, npages);
Pavel Shilovsky9bda8722019-01-23 17:12:09 -08003391 if (rc) {
3392 kvfree(rdata->pages);
3393 kfree(rdata);
3394 add_credits_and_wake_if(server, credits, 0);
3395 break;
3396 }
Long Li6e6e2b82018-10-31 22:13:09 +00003397
3398 rdata->tailsz = PAGE_SIZE;
3399 }
Jeff Layton1c892542012-05-16 07:13:17 -04003400
3401 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003402 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003403 rdata->offset = offset;
3404 rdata->bytes = cur_len;
3405 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003406 rdata->pagesz = PAGE_SIZE;
3407 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003408 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003409 rdata->credits = credits;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003410 rdata->ctx = ctx;
3411 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003412
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003413 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01003414 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003415 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003416 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003417 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003418 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003419 cifs_uncached_readdata_release);
3420 if (rc == -EAGAIN) {
3421 iov_iter_revert(&direct_iov, cur_len);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003422 continue;
Long Li6e6e2b82018-10-31 22:13:09 +00003423 }
Jeff Layton1c892542012-05-16 07:13:17 -04003424 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 }
Jeff Layton1c892542012-05-16 07:13:17 -04003426
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003427 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003428 offset += cur_len;
3429 len -= cur_len;
3430 } while (len > 0);
3431
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003432 return rc;
3433}
3434
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003435static void
3436collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003437{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003438 struct cifs_readdata *rdata, *tmp;
3439 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003440 struct cifs_sb_info *cifs_sb;
3441 struct cifs_tcon *tcon;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003442 unsigned int i;
3443 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003444
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003445 tcon = tlink_tcon(ctx->cfile->tlink);
3446 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003447
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003448 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003449
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003450 if (list_empty(&ctx->list)) {
3451 mutex_unlock(&ctx->aio_mutex);
3452 return;
3453 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003454
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003455 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003456 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003457again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003458 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003459 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003460 if (!try_wait_for_completion(&rdata->done)) {
3461 mutex_unlock(&ctx->aio_mutex);
3462 return;
3463 }
3464
3465 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003466 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003467 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003468 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003469
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003470 list_del_init(&rdata->list);
3471 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003472
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003473 /*
3474 * Got a part of data and then reconnect has
3475 * happened -- fill the buffer and continue
3476 * reading.
3477 */
3478 if (got_bytes && got_bytes < rdata->bytes) {
Long Li6e6e2b82018-10-31 22:13:09 +00003479 rc = 0;
3480 if (!ctx->direct_io)
3481 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003482 if (rc) {
3483 kref_put(&rdata->refcount,
Long Li6e6e2b82018-10-31 22:13:09 +00003484 cifs_uncached_readdata_release);
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003485 continue;
3486 }
3487 }
3488
Long Li6e6e2b82018-10-31 22:13:09 +00003489 if (ctx->direct_io) {
3490 /*
3491 * Re-use rdata as this is a
3492 * direct I/O
3493 */
3494 rc = cifs_resend_rdata(
3495 rdata,
3496 &tmp_list, ctx);
3497 } else {
3498 rc = cifs_send_async_read(
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003499 rdata->offset + got_bytes,
3500 rdata->bytes - got_bytes,
3501 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003502 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003503
Long Li6e6e2b82018-10-31 22:13:09 +00003504 kref_put(&rdata->refcount,
3505 cifs_uncached_readdata_release);
3506 }
3507
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003508 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003509
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003510 goto again;
3511 } else if (rdata->result)
3512 rc = rdata->result;
Long Li6e6e2b82018-10-31 22:13:09 +00003513 else if (!ctx->direct_io)
Jeff Layton1c892542012-05-16 07:13:17 -04003514 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003515
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003516 /* if there was a short read -- discard anything left */
3517 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3518 rc = -ENODATA;
Long Li6e6e2b82018-10-31 22:13:09 +00003519
3520 ctx->total_len += rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003521 }
3522 list_del_init(&rdata->list);
3523 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003525
Long Li6e6e2b82018-10-31 22:13:09 +00003526 if (!ctx->direct_io) {
3527 for (i = 0; i < ctx->npages; i++) {
3528 if (ctx->should_dirty)
3529 set_page_dirty(ctx->bv[i].bv_page);
3530 put_page(ctx->bv[i].bv_page);
3531 }
Al Viro7f25bba2014-02-04 14:07:43 -05003532
Long Li6e6e2b82018-10-31 22:13:09 +00003533 ctx->total_len = ctx->len - iov_iter_count(to);
3534 }
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003535
3536 cifs_stats_bytes_read(tcon, ctx->total_len);
Jeff Layton1c892542012-05-16 07:13:17 -04003537
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003538 /* mask nodata case */
3539 if (rc == -ENODATA)
3540 rc = 0;
3541
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003542 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3543
3544 mutex_unlock(&ctx->aio_mutex);
3545
3546 if (ctx->iocb && ctx->iocb->ki_complete)
3547 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3548 else
3549 complete(&ctx->done);
3550}
3551
Long Li6e6e2b82018-10-31 22:13:09 +00003552static ssize_t __cifs_readv(
3553 struct kiocb *iocb, struct iov_iter *to, bool direct)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003554{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003555 size_t len;
Long Li6e6e2b82018-10-31 22:13:09 +00003556 struct file *file = iocb->ki_filp;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003557 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003558 struct cifsFileInfo *cfile;
Long Li6e6e2b82018-10-31 22:13:09 +00003559 struct cifs_tcon *tcon;
3560 ssize_t rc, total_read = 0;
3561 loff_t offset = iocb->ki_pos;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003562 struct cifs_aio_ctx *ctx;
3563
Long Li6e6e2b82018-10-31 22:13:09 +00003564 /*
3565 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
3566 * fall back to data copy read path
3567 * this could be improved by getting pages directly in ITER_KVEC
3568 */
3569 if (direct && to->type & ITER_KVEC) {
3570 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
3571 direct = false;
3572 }
3573
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003574 len = iov_iter_count(to);
3575 if (!len)
3576 return 0;
3577
3578 cifs_sb = CIFS_FILE_SB(file);
3579 cfile = file->private_data;
3580 tcon = tlink_tcon(cfile->tlink);
3581
3582 if (!tcon->ses->server->ops->async_readv)
3583 return -ENOSYS;
3584
3585 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3586 cifs_dbg(FYI, "attempting read on write only file instance\n");
3587
3588 ctx = cifs_aio_ctx_alloc();
3589 if (!ctx)
3590 return -ENOMEM;
3591
3592 ctx->cfile = cifsFileInfo_get(cfile);
3593
3594 if (!is_sync_kiocb(iocb))
3595 ctx->iocb = iocb;
3596
David Howells00e23702018-10-22 13:07:28 +01003597 if (iter_is_iovec(to))
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003598 ctx->should_dirty = true;
3599
Long Li6e6e2b82018-10-31 22:13:09 +00003600 if (direct) {
3601 ctx->pos = offset;
3602 ctx->direct_io = true;
3603 ctx->iter = *to;
3604 ctx->len = len;
3605 } else {
3606 rc = setup_aio_ctx_iter(ctx, to, READ);
3607 if (rc) {
3608 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3609 return rc;
3610 }
3611 len = ctx->len;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003612 }
3613
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003614 /* grab a lock here due to read response handlers can access ctx */
3615 mutex_lock(&ctx->aio_mutex);
3616
3617 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3618
3619 /* if at least one read request send succeeded, then reset rc */
3620 if (!list_empty(&ctx->list))
3621 rc = 0;
3622
3623 mutex_unlock(&ctx->aio_mutex);
3624
3625 if (rc) {
3626 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3627 return rc;
3628 }
3629
3630 if (!is_sync_kiocb(iocb)) {
3631 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3632 return -EIOCBQUEUED;
3633 }
3634
3635 rc = wait_for_completion_killable(&ctx->done);
3636 if (rc) {
3637 mutex_lock(&ctx->aio_mutex);
3638 ctx->rc = rc = -EINTR;
3639 total_read = ctx->total_len;
3640 mutex_unlock(&ctx->aio_mutex);
3641 } else {
3642 rc = ctx->rc;
3643 total_read = ctx->total_len;
3644 }
3645
3646 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3647
Al Viro0165e812014-02-04 14:19:48 -05003648 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003649 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003650 return total_read;
3651 }
3652 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003653}
3654
Long Li6e6e2b82018-10-31 22:13:09 +00003655ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
3656{
3657 return __cifs_readv(iocb, to, true);
3658}
3659
3660ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3661{
3662 return __cifs_readv(iocb, to, false);
3663}
3664
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003665ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003666cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003667{
Al Viro496ad9a2013-01-23 17:07:38 -05003668 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003669 struct cifsInodeInfo *cinode = CIFS_I(inode);
3670 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3671 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3672 iocb->ki_filp->private_data;
3673 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3674 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003675
3676 /*
3677 * In strict cache mode we need to read from the server all the time
3678 * if we don't have level II oplock because the server can delay mtime
3679 * change - so we can't make a decision about inode invalidating.
3680 * And we can also fail with pagereading if there are mandatory locks
3681 * on pages affected by this read but not on the region from pos to
3682 * pos+len-1.
3683 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003684 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003685 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003686
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003687 if (cap_unix(tcon->ses) &&
3688 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3689 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003690 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003691
3692 /*
3693 * We need to hold the sem to be sure nobody modifies lock list
3694 * with a brlock that prevents reading.
3695 */
3696 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003697 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003698 tcon->ses->server->vals->shared_lock_type,
Ronnie Sahlberg96457592018-10-04 09:24:38 +10003699 0, NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003700 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003701 up_read(&cinode->lock_sem);
3702 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003703}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003705static ssize_t
3706cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707{
3708 int rc = -EACCES;
3709 unsigned int bytes_read = 0;
3710 unsigned int total_read;
3711 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003712 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003714 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003715 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003716 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003717 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003719 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003720 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003721 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003723 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003724 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003726 /* FIXME: set up handlers for larger reads and/or convert to async */
3727 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3728
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303730 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003731 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303732 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003734 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003735 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003736 server = tcon->ses->server;
3737
3738 if (!server->ops->sync_read) {
3739 free_xid(xid);
3740 return -ENOSYS;
3741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003743 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3744 pid = open_file->pid;
3745 else
3746 pid = current->tgid;
3747
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003749 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003751 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3752 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003753 do {
3754 current_read_size = min_t(uint, read_size - total_read,
3755 rsize);
3756 /*
3757 * For windows me and 9x we do not want to request more
3758 * than it negotiated since it will refuse the read
3759 * then.
3760 */
3761 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003762 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003763 current_read_size = min_t(uint,
3764 current_read_size, CIFSMaxBufSize);
3765 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003766 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003767 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 if (rc != 0)
3769 break;
3770 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003771 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003772 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003773 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003774 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003775 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003776 &bytes_read, &cur_offset,
3777 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003778 } while (rc == -EAGAIN);
3779
Linus Torvalds1da177e2005-04-16 15:20:36 -07003780 if (rc || (bytes_read == 0)) {
3781 if (total_read) {
3782 break;
3783 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003784 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003785 return rc;
3786 }
3787 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003788 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003789 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790 }
3791 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003792 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 return total_read;
3794}
3795
Jeff Laytonca83ce32011-04-12 09:13:44 -04003796/*
3797 * If the page is mmap'ed into a process' page tables, then we need to make
3798 * sure that it doesn't change while being written back.
3799 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303800static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003801cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003802{
3803 struct page *page = vmf->page;
3804
3805 lock_page(page);
3806 return VM_FAULT_LOCKED;
3807}
3808
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003809static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003810 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003811 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003812 .page_mkwrite = cifs_page_mkwrite,
3813};
3814
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003815int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3816{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003817 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003818 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003819
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003820 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003821
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003822 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003823 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003824 if (!rc)
3825 rc = generic_file_mmap(file, vma);
3826 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003827 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003828
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003829 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003830 return rc;
3831}
3832
Linus Torvalds1da177e2005-04-16 15:20:36 -07003833int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3834{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 int rc, xid;
3836
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003837 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003838
Jeff Laytonabab0952010-02-12 07:44:18 -05003839 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003840 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003841 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3842 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003843 if (!rc)
3844 rc = generic_file_mmap(file, vma);
3845 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003846 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003847
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003848 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849 return rc;
3850}
3851
Jeff Layton0471ca32012-05-16 07:13:16 -04003852static void
3853cifs_readv_complete(struct work_struct *work)
3854{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003855 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003856 struct cifs_readdata *rdata = container_of(work,
3857 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003858
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003859 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003860 for (i = 0; i < rdata->nr_pages; i++) {
3861 struct page *page = rdata->pages[i];
3862
Jeff Layton0471ca32012-05-16 07:13:16 -04003863 lru_cache_add_file(page);
3864
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003865 if (rdata->result == 0 ||
3866 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003867 flush_dcache_page(page);
3868 SetPageUptodate(page);
3869 }
3870
3871 unlock_page(page);
3872
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003873 if (rdata->result == 0 ||
3874 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003875 cifs_readpage_to_fscache(rdata->mapping->host, page);
3876
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003877 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003878
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003879 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003880 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003881 }
Jeff Layton6993f742012-05-16 07:13:17 -04003882 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003883}
3884
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003885static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003886readpages_fill_pages(struct TCP_Server_Info *server,
3887 struct cifs_readdata *rdata, struct iov_iter *iter,
3888 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003889{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003890 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003891 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003892 u64 eof;
3893 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003894 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003895 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003896
3897 /* determine the eof that the server (probably) has */
3898 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003899 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003900 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003901
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003902 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003903 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003904 for (i = 0; i < nr_pages; i++) {
3905 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07003906 unsigned int to_read = rdata->pagesz;
3907 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003908
Long Li1dbe3462018-05-30 12:47:55 -07003909 if (i == 0)
3910 to_read -= page_offset;
3911 else
3912 page_offset = 0;
3913
3914 n = to_read;
3915
3916 if (len >= to_read) {
3917 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07003918 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003919 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07003920 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05003921 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003922 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003923 } else if (page->index > eof_index) {
3924 /*
3925 * The VFS will not try to do readahead past the
3926 * i_size, but it's possible that we have outstanding
3927 * writes with gaps in the middle and the i_size hasn't
3928 * caught up yet. Populate those with zeroed out pages
3929 * to prevent the VFS from repeatedly attempting to
3930 * fill them until the writes are flushed.
3931 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003932 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003933 lru_cache_add_file(page);
3934 flush_dcache_page(page);
3935 SetPageUptodate(page);
3936 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003937 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003938 rdata->pages[i] = NULL;
3939 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003940 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003941 } else {
3942 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003943 lru_cache_add_file(page);
3944 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003945 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003946 rdata->pages[i] = NULL;
3947 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003948 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003949 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003950
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003951 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003952 result = copy_page_from_iter(
3953 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003954#ifdef CONFIG_CIFS_SMB_DIRECT
3955 else if (rdata->mr)
3956 result = n;
3957#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003958 else
Long Li1dbe3462018-05-30 12:47:55 -07003959 result = cifs_read_page_from_socket(
3960 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003961 if (result < 0)
3962 break;
3963
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003964 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003965 }
3966
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003967 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3968 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003969}
3970
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003971static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003972cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3973 struct cifs_readdata *rdata, unsigned int len)
3974{
3975 return readpages_fill_pages(server, rdata, NULL, len);
3976}
3977
3978static int
3979cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
3980 struct cifs_readdata *rdata,
3981 struct iov_iter *iter)
3982{
3983 return readpages_fill_pages(server, rdata, iter, iter->count);
3984}
3985
3986static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003987readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3988 unsigned int rsize, struct list_head *tmplist,
3989 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3990{
3991 struct page *page, *tpage;
3992 unsigned int expected_index;
3993 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003994 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003995
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003996 INIT_LIST_HEAD(tmplist);
3997
Nikolay Borisovf86196e2019-01-03 15:29:02 -08003998 page = lru_to_page(page_list);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003999
4000 /*
4001 * Lock the page and put it in the cache. Since no one else
4002 * should have access to this page, we're safe to simply set
4003 * PG_locked without checking it first.
4004 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004005 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004006 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07004007 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004008
4009 /* give up if we can't stick it in the cache */
4010 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004011 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004012 return rc;
4013 }
4014
4015 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004016 *offset = (loff_t)page->index << PAGE_SHIFT;
4017 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004018 *nr_pages = 1;
4019 list_move_tail(&page->lru, tmplist);
4020
4021 /* now try and add more pages onto the request */
4022 expected_index = page->index + 1;
4023 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
4024 /* discontinuity ? */
4025 if (page->index != expected_index)
4026 break;
4027
4028 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004029 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004030 break;
4031
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004032 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07004033 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08004034 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004035 break;
4036 }
4037 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004038 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04004039 expected_index++;
4040 (*nr_pages)++;
4041 }
4042 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043}
4044
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045static int cifs_readpages(struct file *file, struct address_space *mapping,
4046 struct list_head *page_list, unsigned num_pages)
4047{
Jeff Layton690c5e32011-10-19 15:30:16 -04004048 int rc;
4049 struct list_head tmplist;
4050 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04004051 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004052 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04004053 pid_t pid;
Steve French0cb012d2018-10-11 01:01:02 -05004054 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055
Steve French0cb012d2018-10-11 01:01:02 -05004056 xid = get_xid();
Jeff Layton690c5e32011-10-19 15:30:16 -04004057 /*
Suresh Jayaraman566982362010-07-05 18:13:25 +05304058 * Reads as many pages as possible from fscache. Returns -ENOBUFS
4059 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00004060 *
4061 * After this point, every page in the list might have PG_fscache set,
4062 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman566982362010-07-05 18:13:25 +05304063 */
4064 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
4065 &num_pages);
Steve French0cb012d2018-10-11 01:01:02 -05004066 if (rc == 0) {
4067 free_xid(xid);
Jeff Layton690c5e32011-10-19 15:30:16 -04004068 return rc;
Steve French0cb012d2018-10-11 01:01:02 -05004069 }
Suresh Jayaraman566982362010-07-05 18:13:25 +05304070
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00004071 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4072 pid = open_file->pid;
4073 else
4074 pid = current->tgid;
4075
Jeff Layton690c5e32011-10-19 15:30:16 -04004076 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004077 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078
Joe Perchesf96637b2013-05-04 22:12:25 -05004079 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4080 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04004081
4082 /*
4083 * Start with the page at end of list and move it to private
4084 * list. Do the same with any following pages until we hit
4085 * the rsize limit, hit an index discontinuity, or run out of
4086 * pages. Issue the async read and then start the loop again
4087 * until the list is empty.
4088 *
4089 * Note that list order is important. The page_list is in
4090 * the order of declining indexes. When we put the pages in
4091 * the rdata->pages, then we want them in increasing order.
4092 */
4093 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004094 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04004095 loff_t offset;
4096 struct page *page, *tpage;
4097 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004098 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004100 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
4101 &rsize, &credits);
4102 if (rc)
4103 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104
Jeff Layton690c5e32011-10-19 15:30:16 -04004105 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004106 * Give up immediately if rsize is too small to read an entire
4107 * page. The VFS will fall back to readpage. We should never
4108 * reach this point however since we set ra_pages to 0 when the
4109 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04004110 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004111 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004112 add_credits_and_wake_if(server, credits, 0);
Steve French0cb012d2018-10-11 01:01:02 -05004113 free_xid(xid);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004114 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004117 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
4118 &nr_pages, &offset, &bytes);
4119 if (rc) {
4120 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04004122 }
4123
Jeff Layton0471ca32012-05-16 07:13:16 -04004124 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04004125 if (!rdata) {
4126 /* best to give up if we're out of mem */
4127 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4128 list_del(&page->lru);
4129 lru_cache_add_file(page);
4130 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004131 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04004132 }
4133 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004134 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04004135 break;
4136 }
4137
Jeff Layton6993f742012-05-16 07:13:17 -04004138 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04004139 rdata->mapping = mapping;
4140 rdata->offset = offset;
4141 rdata->bytes = bytes;
4142 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004143 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07004144 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07004145 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08004146 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004147 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004148
4149 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
4150 list_del(&page->lru);
4151 rdata->pages[rdata->nr_pages++] = page;
4152 }
Jeff Layton690c5e32011-10-19 15:30:16 -04004153
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004154 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01004155 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04004156 rc = server->ops->async_readv(rdata);
4157 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04004158 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07004159 for (i = 0; i < rdata->nr_pages; i++) {
4160 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04004161 lru_cache_add_file(page);
4162 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004163 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04004165 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04004166 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004167 break;
4168 }
Jeff Layton6993f742012-05-16 07:13:17 -04004169
4170 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 }
4172
David Howells54afa992013-09-04 17:10:39 +00004173 /* Any pages that have been shown to fscache but didn't get added to
4174 * the pagecache must be uncached before they get returned to the
4175 * allocator.
4176 */
4177 cifs_fscache_readpages_cancel(mapping->host, page_list);
Steve French0cb012d2018-10-11 01:01:02 -05004178 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179 return rc;
4180}
4181
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01004182/*
4183 * cifs_readpage_worker must be called with the page pinned
4184 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185static int cifs_readpage_worker(struct file *file, struct page *page,
4186 loff_t *poffset)
4187{
4188 char *read_data;
4189 int rc;
4190
Suresh Jayaraman566982362010-07-05 18:13:25 +05304191 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05004192 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304193 if (rc == 0)
4194 goto read_complete;
4195
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 read_data = kmap(page);
4197 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004198
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004199 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004200
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201 if (rc < 0)
4202 goto io_error;
4203 else
Joe Perchesf96637b2013-05-04 22:12:25 -05004204 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004205
Steve French9b9c5be2018-09-22 12:07:06 -05004206 /* we do not want atime to be less than mtime, it broke some apps */
4207 file_inode(file)->i_atime = current_time(file_inode(file));
4208 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4209 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4210 else
4211 file_inode(file)->i_atime = current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004212
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004213 if (PAGE_SIZE > rc)
4214 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215
4216 flush_dcache_page(page);
4217 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304218
4219 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05004220 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05304221
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004223
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004225 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004226 unlock_page(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05304227
4228read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 return rc;
4230}
4231
4232static int cifs_readpage(struct file *file, struct page *page)
4233{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004234 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004236 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004238 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239
4240 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304241 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004242 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05304243 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244 }
4245
Joe Perchesf96637b2013-05-04 22:12:25 -05004246 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004247 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
4249 rc = cifs_readpage_worker(file, page, &offset);
4250
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004251 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 return rc;
4253}
4254
Steve Frencha403a0a2007-07-26 15:54:16 +00004255static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4256{
4257 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05004258 struct cifs_tcon *tcon =
4259 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00004260
Steve French3afca262016-09-22 18:58:16 -05004261 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004262 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004263 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05004264 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004265 return 1;
4266 }
4267 }
Steve French3afca262016-09-22 18:58:16 -05004268 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004269 return 0;
4270}
4271
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272/* We do not want to update the file size from server for inodes
4273 open for write - to avoid races with writepage extending
4274 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004275 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 but this is tricky to do without racing with writebehind
4277 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004278bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279{
Steve Frencha403a0a2007-07-26 15:54:16 +00004280 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004281 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004282
Steve Frencha403a0a2007-07-26 15:54:16 +00004283 if (is_inode_writable(cifsInode)) {
4284 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004285 struct cifs_sb_info *cifs_sb;
4286
Steve Frenchc32a0b62006-01-12 14:41:28 -08004287 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004288 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004289 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004290 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004291 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004292 }
4293
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004294 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004295 return true;
Steve French7ba526312007-02-08 18:14:13 +00004296
Steve French4b18f2a2008-04-29 00:06:05 +00004297 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004298 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004299 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300}
4301
Nick Piggind9414772008-09-24 11:32:59 -04004302static int cifs_write_begin(struct file *file, struct address_space *mapping,
4303 loff_t pos, unsigned len, unsigned flags,
4304 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004306 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004307 pgoff_t index = pos >> PAGE_SHIFT;
4308 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004309 loff_t page_start = pos & PAGE_MASK;
4310 loff_t i_size;
4311 struct page *page;
4312 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313
Joe Perchesf96637b2013-05-04 22:12:25 -05004314 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004315
Sachin Prabhu466bd312013-09-13 14:11:57 +01004316start:
Nick Piggin54566b22009-01-04 12:00:53 -08004317 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004318 if (!page) {
4319 rc = -ENOMEM;
4320 goto out;
4321 }
Nick Piggind9414772008-09-24 11:32:59 -04004322
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004323 if (PageUptodate(page))
4324 goto out;
Steve French8a236262007-03-06 00:31:00 +00004325
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004326 /*
4327 * If we write a full page it will be up to date, no need to read from
4328 * the server. If the write is short, we'll end up doing a sync write
4329 * instead.
4330 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004331 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004332 goto out;
4333
4334 /*
4335 * optimize away the read when we have an oplock, and we're not
4336 * expecting to use any of the data we'd be reading in. That
4337 * is, when the page lies beyond the EOF, or straddles the EOF
4338 * and the write will cover all of the existing data.
4339 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004340 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004341 i_size = i_size_read(mapping->host);
4342 if (page_start >= i_size ||
4343 (offset == 0 && (pos + len) >= i_size)) {
4344 zero_user_segments(page, 0, offset,
4345 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004346 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004347 /*
4348 * PageChecked means that the parts of the page
4349 * to which we're not writing are considered up
4350 * to date. Once the data is copied to the
4351 * page, it can be set uptodate.
4352 */
4353 SetPageChecked(page);
4354 goto out;
4355 }
4356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357
Sachin Prabhu466bd312013-09-13 14:11:57 +01004358 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004359 /*
4360 * might as well read a page, it is fast enough. If we get
4361 * an error, we don't need to return it. cifs_write_end will
4362 * do a sync write instead since PG_uptodate isn't set.
4363 */
4364 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004365 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004366 oncethru = 1;
4367 goto start;
Steve French8a236262007-03-06 00:31:00 +00004368 } else {
4369 /* we could try using another file handle if there is one -
4370 but how would we lock it to prevent close of that handle
4371 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004372 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004373 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004374out:
4375 *pagep = page;
4376 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377}
4378
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304379static int cifs_release_page(struct page *page, gfp_t gfp)
4380{
4381 if (PagePrivate(page))
4382 return 0;
4383
4384 return cifs_fscache_release_page(page, gfp);
4385}
4386
Lukas Czernerd47992f2013-05-21 23:17:23 -04004387static void cifs_invalidate_page(struct page *page, unsigned int offset,
4388 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304389{
4390 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4391
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004392 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304393 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4394}
4395
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004396static int cifs_launder_page(struct page *page)
4397{
4398 int rc = 0;
4399 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004400 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004401 struct writeback_control wbc = {
4402 .sync_mode = WB_SYNC_ALL,
4403 .nr_to_write = 0,
4404 .range_start = range_start,
4405 .range_end = range_end,
4406 };
4407
Joe Perchesf96637b2013-05-04 22:12:25 -05004408 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004409
4410 if (clear_page_dirty_for_io(page))
4411 rc = cifs_writepage_locked(page, &wbc);
4412
4413 cifs_fscache_invalidate_page(page, page->mapping->host);
4414 return rc;
4415}
4416
Tejun Heo9b646972010-07-20 22:09:02 +02004417void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004418{
4419 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4420 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004421 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004422 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004423 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004424 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004425 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004426
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004427 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004428 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004429
4430 server->ops->downgrade_oplock(server, cinode,
4431 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4432
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004433 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004434 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004435 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4436 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004437 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004438 }
4439
Jeff Layton3bc303c2009-09-21 06:47:50 -04004440 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004441 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004442 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004443 else
Al Viro8737c932009-12-24 06:47:55 -05004444 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004445 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004446 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004447 rc = filemap_fdatawait(inode->i_mapping);
4448 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004449 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004450 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004451 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004452 }
4453
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004454 rc = cifs_push_locks(cfile);
4455 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004456 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004457
Jeff Layton3bc303c2009-09-21 06:47:50 -04004458 /*
4459 * releasing stale oplock after recent reconnect of smb session using
4460 * a now incorrect file handle is not a data integrity issue but do
4461 * not bother sending an oplock release if session to server still is
4462 * disconnected since oplock already released by the server
4463 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004464 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004465 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4466 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004467 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004468 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004469 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004470}
4471
Steve Frenchdca69282013-11-11 16:42:37 -06004472/*
4473 * The presence of cifs_direct_io() in the address space ops vector
4474 * allowes open() O_DIRECT flags which would have failed otherwise.
4475 *
4476 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4477 * so this method should never be called.
4478 *
4479 * Direct IO is not yet supported in the cached mode.
4480 */
4481static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004482cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004483{
4484 /*
4485 * FIXME
4486 * Eventually need to support direct IO for non forcedirectio mounts
4487 */
4488 return -EINVAL;
4489}
4490
4491
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004492const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004493 .readpage = cifs_readpage,
4494 .readpages = cifs_readpages,
4495 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004496 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004497 .write_begin = cifs_write_begin,
4498 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304500 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004501 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304502 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004503 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004505
4506/*
4507 * cifs_readpages requires the server to support a buffer large enough to
4508 * contain the header plus one complete page of data. Otherwise, we need
4509 * to leave cifs_readpages out of the address space operations.
4510 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004511const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004512 .readpage = cifs_readpage,
4513 .writepage = cifs_writepage,
4514 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004515 .write_begin = cifs_write_begin,
4516 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004517 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304518 .releasepage = cifs_release_page,
4519 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004520 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004521};