blob: a8e2bc47dcf27b8e1449ecb6b260aa5d5673f8cd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Long Libd3dcc62017-11-22 17:38:47 -070045#include "smbdirect.h"
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French1013e762017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
Pavel Shilovskyd72c2112019-09-30 10:06:18 -0700255 if (rc) {
256 server->ops->close(xid, tcon, fid);
257 if (rc == -ESTALE)
258 rc = -EOPENSTALE;
259 }
260
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300261out:
262 kfree(buf);
263 return rc;
264}
265
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400266static bool
267cifs_has_mand_locks(struct cifsInodeInfo *cinode)
268{
269 struct cifs_fid_locks *cur;
270 bool has_locks = false;
271
272 down_read(&cinode->lock_sem);
273 list_for_each_entry(cur, &cinode->llist, llist) {
274 if (!list_empty(&cur->locks)) {
275 has_locks = true;
276 break;
277 }
278 }
279 up_read(&cinode->lock_sem);
280 return has_locks;
281}
282
Dave Wysochanski80b42f42019-10-23 05:02:33 -0400283void
284cifs_down_write(struct rw_semaphore *sem)
285{
286 while (!down_write_trylock(sem))
287 msleep(10);
288}
289
Jeff Layton15ecb432010-10-15 15:34:02 -0400290struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700291cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400292 struct tcon_link *tlink, __u32 oplock)
293{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500294 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000295 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700296 struct cifsInodeInfo *cinode = CIFS_I(inode);
297 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700298 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700299 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400300 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400301
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700302 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
303 if (cfile == NULL)
304 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400305
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
307 if (!fdlocks) {
308 kfree(cfile);
309 return NULL;
310 }
311
312 INIT_LIST_HEAD(&fdlocks->locks);
313 fdlocks->cfile = cfile;
314 cfile->llist = fdlocks;
Dave Wysochanski80b42f42019-10-23 05:02:33 -0400315 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700316 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700317 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700318
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700319 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700320 cfile->pid = current->tgid;
321 cfile->uid = current_fsuid();
322 cfile->dentry = dget(dentry);
323 cfile->f_flags = file->f_flags;
324 cfile->invalidHandle = false;
325 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700326 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700327 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500328 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400329
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100330 cifs_sb_active(inode->i_sb);
331
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400332 /*
333 * If the server returned a read oplock and we have mandatory brlocks,
334 * set oplock level to None.
335 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400336 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500337 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400338 oplock = 0;
339 }
340
Steve French3afca262016-09-22 18:58:16 -0500341 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400342 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700343 oplock = fid->pending_open->oplock;
344 list_del(&fid->pending_open->olist);
345
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400346 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400347 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700348
349 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500350
Jeff Layton15ecb432010-10-15 15:34:02 -0400351 /* if readable file instance put first in list*/
Ronnie Sahlbergacc07942019-06-05 10:38:38 +1000352 spin_lock(&cinode->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400353 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700354 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400355 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700356 list_add_tail(&cfile->flist, &cinode->openFileList);
Ronnie Sahlbergacc07942019-06-05 10:38:38 +1000357 spin_unlock(&cinode->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500358 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400359
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400360 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400361 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400362
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700363 file->private_data = cfile;
364 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400365}
366
Jeff Layton764a1b12012-07-25 14:59:54 -0400367struct cifsFileInfo *
368cifsFileInfo_get(struct cifsFileInfo *cifs_file)
369{
Steve French3afca262016-09-22 18:58:16 -0500370 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400371 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500372 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400373 return cifs_file;
374}
375
Aurelien Aptel8092ecc2019-03-29 10:49:12 +0100376/**
377 * cifsFileInfo_put - release a reference of file priv data
378 *
379 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000380 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400381void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
382{
Aurelien Aptel8092ecc2019-03-29 10:49:12 +0100383 _cifsFileInfo_put(cifs_file, true);
384}
385
386/**
387 * _cifsFileInfo_put - release a reference of file priv data
388 *
389 * This may involve closing the filehandle @cifs_file out on the
390 * server. Must be called without holding tcon->open_file_lock and
391 * cifs_file->file_info_lock.
392 *
393 * If @wait_for_oplock_handler is true and we are releasing the last
394 * reference, wait for any running oplock break handler of the file
395 * and cancel any pending one. If calling this function from the
396 * oplock break handler, you need to pass false.
397 *
398 */
399void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
400{
David Howells2b0143b2015-03-17 22:25:59 +0000401 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000402 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700403 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300404 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100405 struct super_block *sb = inode->i_sb;
406 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000407 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700408 struct cifs_fid fid;
409 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000410 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000411
Steve French3afca262016-09-22 18:58:16 -0500412 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky01332b02019-10-23 15:37:19 -0700413 spin_lock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500414 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400415 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500416 spin_unlock(&cifs_file->file_info_lock);
Pavel Shilovsky01332b02019-10-23 15:37:19 -0700417 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500418 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400420 }
Steve French3afca262016-09-22 18:58:16 -0500421 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000422
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700423 if (server->ops->get_lease_key)
424 server->ops->get_lease_key(inode, &fid);
425
426 /* store open in pending opens to make sure we don't miss lease break */
427 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
428
Steve Frenchcdff08e2010-10-21 22:46:14 +0000429 /* remove it from the lists */
430 list_del(&cifs_file->flist);
431 list_del(&cifs_file->tlist);
432
433 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500434 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000435 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700436 /*
437 * In strict cache mode we need invalidate mapping on the last
438 * close because it may cause a error when we open this file
439 * again and get at least level II oplock.
440 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300441 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400442 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300443 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000444 }
Steve French3afca262016-09-22 18:58:16 -0500445
Pavel Shilovsky01332b02019-10-23 15:37:19 -0700446 spin_unlock(&cifsi->open_file_lock);
Steve French3afca262016-09-22 18:58:16 -0500447 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000448
Aurelien Aptel8092ecc2019-03-29 10:49:12 +0100449 oplock_break_cancelled = wait_oplock_handler ?
450 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400451
Steve Frenchcdff08e2010-10-21 22:46:14 +0000452 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700453 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400454 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700455
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400456 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700457 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400458 server->ops->close(xid, tcon, &cifs_file->fid);
459 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000460 }
461
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000462 if (oplock_break_cancelled)
463 cifs_done_oplock_break(cifsi);
464
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700465 cifs_del_pending_open(&open);
466
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700467 /*
468 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000469 * is closed anyway.
470 */
Dave Wysochanski80b42f42019-10-23 05:02:33 -0400471 cifs_down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700472 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000473 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400474 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000475 kfree(li);
476 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700477 list_del(&cifs_file->llist->llist);
478 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700479 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000480
481 cifs_put_tlink(cifs_file->tlink);
482 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100483 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000484 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400485}
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700488
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
490 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400491 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400492 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700494 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000495 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400496 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300499 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700500 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700501 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400503 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
505 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400506 tlink = cifs_sb_tlink(cifs_sb);
507 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400508 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400509 return PTR_ERR(tlink);
510 }
511 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700512 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500514 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530516 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400517 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 }
519
Joe Perchesf96637b2013-05-04 22:12:25 -0500520 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000521 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000522
Namjae Jeon787aded2014-08-22 14:22:51 +0900523 if (file->f_flags & O_DIRECT &&
524 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
525 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
526 file->f_op = &cifs_file_direct_nobrl_ops;
527 else
528 file->f_op = &cifs_file_direct_ops;
529 }
530
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700531 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000532 oplock = REQ_OPLOCK;
533 else
534 oplock = 0;
535
Steve French64cc2c62009-03-04 19:54:08 +0000536 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400537 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
538 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000539 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400540 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000541 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700542 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000543 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500544 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300545 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000546 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
547 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500548 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
549 tcon->ses->serverName,
550 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000551 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000552 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
553 (rc != -EOPNOTSUPP)) /* path not found or net err */
554 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700555 /*
556 * Else fallthrough to retry open the old way on network i/o
557 * or DFS errors.
558 */
Steve French276a74a2009-03-03 18:00:34 +0000559 }
560
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700561 if (server->ops->get_lease_key)
562 server->ops->get_lease_key(inode, &fid);
563
564 cifs_add_pending_open(&fid, tlink, &open);
565
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300566 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700567 if (server->ops->get_lease_key)
568 server->ops->get_lease_key(inode, &fid);
569
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300570 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700571 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700572 if (rc) {
573 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300574 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700575 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300576 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400577
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700578 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
579 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700580 if (server->ops->close)
581 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700582 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 rc = -ENOMEM;
584 goto out;
585 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530587 cifs_fscache_set_inode_cookie(inode, file);
588
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300589 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700590 /*
591 * Time to set mode which we can not set earlier due to
592 * problems creating new read-only files.
593 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300594 struct cifs_unix_set_info_args args = {
595 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800596 .uid = INVALID_UID, /* no change */
597 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300598 .ctime = NO_CHANGE_64,
599 .atime = NO_CHANGE_64,
600 .mtime = NO_CHANGE_64,
601 .device = 0,
602 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700603 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
604 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 }
606
607out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400609 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400610 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 return rc;
612}
613
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400614static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
615
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700616/*
617 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400618 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700619 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400620static int
621cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400623 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000624 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400625 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 int rc = 0;
627
Rabin Vincent560d3882017-05-03 17:17:21 +0200628 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400629 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400630 /* can cache locks - no need to relock */
631 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400632 return rc;
633 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400635 if (cap_unix(tcon->ses) &&
636 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
637 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
638 rc = cifs_push_posix_locks(cfile);
639 else
640 rc = tcon->ses->server->ops->push_mand_locks(cfile);
641
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400642 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 return rc;
644}
645
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700646static int
647cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
649 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400650 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400651 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000653 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700654 struct TCP_Server_Info *server;
655 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000656 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700658 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500660 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400661 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400663 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700664 mutex_lock(&cfile->fh_mutex);
665 if (!cfile->invalidHandle) {
666 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530667 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400668 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530669 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 }
671
David Howells2b0143b2015-03-17 22:25:59 +0000672 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 tcon = tlink_tcon(cfile->tlink);
675 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000676
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700677 /*
678 * Can not grab rename sem here because various ops, including those
679 * that already have the rename sem can end up causing writepage to get
680 * called and if the server was down that means we end up here, and we
681 * can never tell if the caller already has the rename_sem.
682 */
683 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000685 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700686 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400687 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000688 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 }
690
Joe Perchesf96637b2013-05-04 22:12:25 -0500691 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
692 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300694 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 oplock = REQ_OPLOCK;
696 else
Steve French4b18f2a2008-04-29 00:06:05 +0000697 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400699 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000700 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400701 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400702 /*
703 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
704 * original open. Must mask them off for a reopen.
705 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700706 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400707 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400708
Jeff Layton2422f672010-06-16 13:40:16 -0400709 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700710 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400711 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000712 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500713 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200714 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000715 goto reopen_success;
716 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700717 /*
718 * fallthrough to retry open the old way on errors, especially
719 * in the reconnect path it is important to retry hard
720 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000721 }
722
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700723 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000724
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500725 if (backup_cred(cifs_sb))
726 create_options |= CREATE_OPEN_BACKUP_INTENT;
727
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700728 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400729 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700730
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400731 oparms.tcon = tcon;
732 oparms.cifs_sb = cifs_sb;
733 oparms.desired_access = desired_access;
734 oparms.create_options = create_options;
735 oparms.disposition = disposition;
736 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400737 oparms.fid = &cfile->fid;
738 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400739
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700740 /*
741 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400742 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700743 * file might have write behind data that needs to be flushed and server
744 * version of file size can be stale. If we knew for sure that inode was
745 * not dirty locally we could do this.
746 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400747 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400748 if (rc == -ENOENT && oparms.reconnect == false) {
749 /* durable handle timeout is expired - open the file again */
750 rc = server->ops->open(xid, &oparms, &oplock, NULL);
751 /* indicate that we need to relock the file */
752 oparms.reconnect = true;
753 }
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700756 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500757 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
758 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400759 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 }
Jeff Layton15886172010-10-15 15:33:59 -0400761
762reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700763 cfile->invalidHandle = false;
764 mutex_unlock(&cfile->fh_mutex);
765 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400766
767 if (can_flush) {
768 rc = filemap_write_and_wait(inode->i_mapping);
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -0800769 if (!is_interrupt_error(rc))
770 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400771
Jeff Layton15886172010-10-15 15:33:59 -0400772 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700773 rc = cifs_get_inode_info_unix(&inode, full_path,
774 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400775 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700776 rc = cifs_get_inode_info(&inode, full_path, NULL,
777 inode->i_sb, xid, NULL);
778 }
779 /*
780 * Else we are writing out data to server already and could deadlock if
781 * we tried to flush data, and since we do not know if we have data that
782 * would invalidate the current end of file on the server we can not go
783 * to the server to get the new inode info.
784 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300785
Pavel Shilovskyde740252016-10-11 15:34:07 -0700786 /*
787 * If the server returned a read oplock and we have mandatory brlocks,
788 * set oplock level to None.
789 */
790 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
791 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
792 oplock = 0;
793 }
794
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400795 server->ops->set_fid(cfile, &cfile->fid, oplock);
796 if (oparms.reconnect)
797 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400798
799reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400801 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 return rc;
803}
804
805int cifs_close(struct inode *inode, struct file *file)
806{
Jeff Layton77970692011-04-05 16:23:47 -0700807 if (file->private_data != NULL) {
808 cifsFileInfo_put(file->private_data);
809 file->private_data = NULL;
810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Steve Frenchcdff08e2010-10-21 22:46:14 +0000812 /* return code from the ->release op is always ignored */
813 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814}
815
Steve French52ace1e2016-09-22 19:23:56 -0500816void
817cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
818{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700819 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500820 struct list_head *tmp;
821 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700822 struct list_head tmp_list;
823
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800824 if (!tcon->use_persistent || !tcon->need_reopen_files)
825 return;
826
827 tcon->need_reopen_files = false;
828
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700829 cifs_dbg(FYI, "Reopen persistent handles");
830 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500831
832 /* list all files open on tree connection, reopen resilient handles */
833 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700834 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500835 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700836 if (!open_file->invalidHandle)
837 continue;
838 cifsFileInfo_get(open_file);
839 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500840 }
841 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700842
843 list_for_each_safe(tmp, tmp1, &tmp_list) {
844 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky96a988f2016-11-29 11:31:23 -0800845 if (cifs_reopen_file(open_file, false /* do not flush */))
846 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700847 list_del_init(&open_file->rlist);
848 cifsFileInfo_put(open_file);
849 }
Steve French52ace1e2016-09-22 19:23:56 -0500850}
851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852int cifs_closedir(struct inode *inode, struct file *file)
853{
854 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400855 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700856 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700857 struct cifs_tcon *tcon;
858 struct TCP_Server_Info *server;
859 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
Joe Perchesf96637b2013-05-04 22:12:25 -0500861 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700863 if (cfile == NULL)
864 return rc;
865
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400866 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700867 tcon = tlink_tcon(cfile->tlink);
868 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Joe Perchesf96637b2013-05-04 22:12:25 -0500870 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500871 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400872 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700873 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500874 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700875 if (server->ops->close_dir)
876 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
877 else
878 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500879 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700880 /* not much we can do if it fails anyway, ignore rc */
881 rc = 0;
882 } else
Steve French3afca262016-09-22 18:58:16 -0500883 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700885 buf = cfile->srch_inf.ntwrk_buf_start;
886 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500887 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700888 cfile->srch_inf.ntwrk_buf_start = NULL;
889 if (cfile->srch_inf.smallBuf)
890 cifs_small_buf_release(buf);
891 else
892 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700894
895 cifs_put_tlink(cfile->tlink);
896 kfree(file->private_data);
897 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400899 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return rc;
901}
902
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400903static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300904cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000905{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400906 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000907 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400908 if (!lock)
909 return lock;
910 lock->offset = offset;
911 lock->length = length;
912 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400913 lock->pid = current->tgid;
914 INIT_LIST_HEAD(&lock->blist);
915 init_waitqueue_head(&lock->block_q);
916 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400917}
918
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700919void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920cifs_del_lock_waiters(struct cifsLockInfo *lock)
921{
922 struct cifsLockInfo *li, *tmp;
923 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
924 list_del_init(&li->blist);
925 wake_up(&li->block_q);
926 }
927}
928
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400929#define CIFS_LOCK_OP 0
930#define CIFS_READ_OP 1
931#define CIFS_WRITE_OP 2
932
933/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400934static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700935cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
936 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400937 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400938{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300939 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700940 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300941 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400942
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700943 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400944 if (offset + length <= li->offset ||
945 offset >= li->offset + li->length)
946 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400947 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
948 server->ops->compare_fids(cfile, cur_cfile)) {
949 /* shared lock prevents write op through the same fid */
950 if (!(li->type & server->vals->shared_lock_type) ||
951 rw_check != CIFS_WRITE_OP)
952 continue;
953 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700954 if ((type & server->vals->shared_lock_type) &&
955 ((server->ops->compare_fids(cfile, cur_cfile) &&
956 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400957 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700958 if (conf_lock)
959 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700960 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400961 }
962 return false;
963}
964
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700965bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300966cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700967 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400968 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400969{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300970 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700971 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000972 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300973
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700974 list_for_each_entry(cur, &cinode->llist, llist) {
975 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700976 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300977 if (rc)
978 break;
979 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300980
981 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400982}
983
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300984/*
985 * Check if there is another lock that prevents us to set the lock (mandatory
986 * style). If such a lock exists, update the flock structure with its
987 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
988 * or leave it the same if we can't. Returns 0 if we don't need to request to
989 * the server or 1 otherwise.
990 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400991static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300992cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
993 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400994{
995 int rc = 0;
996 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000997 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300998 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400999 bool exist;
1000
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001001 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001002
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001003 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001004 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001005 if (exist) {
1006 flock->fl_start = conf_lock->offset;
1007 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1008 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001009 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001010 flock->fl_type = F_RDLCK;
1011 else
1012 flock->fl_type = F_WRLCK;
1013 } else if (!cinode->can_cache_brlcks)
1014 rc = 1;
1015 else
1016 flock->fl_type = F_UNLCK;
1017
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001018 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001019 return rc;
1020}
1021
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001022static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001023cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001024{
David Howells2b0143b2015-03-17 22:25:59 +00001025 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Dave Wysochanski80b42f42019-10-23 05:02:33 -04001026 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001027 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001028 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001029}
1030
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001031/*
1032 * Set the byte-range lock (mandatory style). Returns:
1033 * 1) 0, if we set the lock and don't need to request to the server;
1034 * 2) 1, if no locks prevent us but we need to request to the server;
1035 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1036 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001037static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001038cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001039 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001040{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001041 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001042 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001043 bool exist;
1044 int rc = 0;
1045
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001046try_again:
1047 exist = false;
Dave Wysochanski80b42f42019-10-23 05:02:33 -04001048 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001049
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001050 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001051 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001052 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001053 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001054 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001055 return rc;
1056 }
1057
1058 if (!exist)
1059 rc = 1;
1060 else if (!wait)
1061 rc = -EACCES;
1062 else {
1063 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001064 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001065 rc = wait_event_interruptible(lock->block_q,
1066 (lock->blist.prev == &lock->blist) &&
1067 (lock->blist.next == &lock->blist));
1068 if (!rc)
1069 goto try_again;
Dave Wysochanski80b42f42019-10-23 05:02:33 -04001070 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001071 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001072 }
1073
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001074 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001075 return rc;
1076}
1077
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001078/*
1079 * Check if there is another lock that prevents us to set the lock (posix
1080 * style). If such a lock exists, update the flock structure with its
1081 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1082 * or leave it the same if we can't. Returns 0 if we don't need to request to
1083 * the server or 1 otherwise.
1084 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001085static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001086cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1087{
1088 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001089 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001090 unsigned char saved_type = flock->fl_type;
1091
Pavel Shilovsky50792762011-10-29 17:17:57 +04001092 if ((flock->fl_flags & FL_POSIX) == 0)
1093 return 1;
1094
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001095 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001096 posix_test_lock(file, flock);
1097
1098 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1099 flock->fl_type = saved_type;
1100 rc = 1;
1101 }
1102
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001103 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001104 return rc;
1105}
1106
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001107/*
1108 * Set the byte-range lock (posix style). Returns:
1109 * 1) 0, if we set the lock and don't need to request to the server;
1110 * 2) 1, if we need to request to the server;
1111 * 3) <0, if the error occurs while setting the lock.
1112 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001113static int
1114cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1115{
Al Viro496ad9a2013-01-23 17:07:38 -05001116 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001117 int rc = 1;
1118
1119 if ((flock->fl_flags & FL_POSIX) == 0)
1120 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001121
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001122try_again:
Dave Wysochanski80b42f42019-10-23 05:02:33 -04001123 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001124 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001125 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001126 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001127 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001128
1129 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001130 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001131 if (rc == FILE_LOCK_DEFERRED) {
1132 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1133 if (!rc)
1134 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001135 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001136 }
Steve French9ebb3892012-04-01 13:52:54 -05001137 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001138}
1139
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001140int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001141cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001142{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001143 unsigned int xid;
1144 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001145 struct cifsLockInfo *li, *tmp;
1146 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001147 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001148 LOCKING_ANDX_RANGE *buf, *cur;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001149 static const int types[] = {
1150 LOCKING_ANDX_LARGE_FILES,
1151 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1152 };
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001153 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001154
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001155 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001156 tcon = tlink_tcon(cfile->tlink);
1157
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001158 /*
1159 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwall2a71a472019-01-08 18:30:57 +00001160 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001161 */
1162 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwall2a71a472019-01-08 18:30:57 +00001163 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001164 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001165 return -EINVAL;
1166 }
1167
Ross Lagerwall63715c12019-01-08 18:30:56 +00001168 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1169 PAGE_SIZE);
1170 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1171 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001172 max_num = (max_buf - sizeof(struct smb_hdr)) /
1173 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001174 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001175 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001176 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001177 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001178 }
1179
1180 for (i = 0; i < 2; i++) {
1181 cur = buf;
1182 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001183 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001184 if (li->type != types[i])
1185 continue;
1186 cur->Pid = cpu_to_le16(li->pid);
1187 cur->LengthLow = cpu_to_le32((u32)li->length);
1188 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1189 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1190 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1191 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001192 stored_rc = cifs_lockv(xid, tcon,
1193 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001194 (__u8)li->type, 0, num,
1195 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001196 if (stored_rc)
1197 rc = stored_rc;
1198 cur = buf;
1199 num = 0;
1200 } else
1201 cur++;
1202 }
1203
1204 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001205 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001206 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001207 if (stored_rc)
1208 rc = stored_rc;
1209 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001210 }
1211
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001212 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001213 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001214 return rc;
1215}
1216
Jeff Layton3d224622016-05-24 06:27:44 -04001217static __u32
1218hash_lockowner(fl_owner_t owner)
1219{
1220 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1221}
1222
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001223struct lock_to_push {
1224 struct list_head llist;
1225 __u64 offset;
1226 __u64 length;
1227 __u32 pid;
1228 __u16 netfid;
1229 __u8 type;
1230};
1231
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001232static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001233cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001234{
David Howells2b0143b2015-03-17 22:25:59 +00001235 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001236 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001237 struct file_lock *flock;
1238 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001239 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001240 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001241 struct list_head locks_to_send, *el;
1242 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001243 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001244
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001245 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001246
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001247 if (!flctx)
1248 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001249
Jeff Laytone084c1b2015-02-16 14:32:03 -05001250 spin_lock(&flctx->flc_lock);
1251 list_for_each(el, &flctx->flc_posix) {
1252 count++;
1253 }
1254 spin_unlock(&flctx->flc_lock);
1255
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001256 INIT_LIST_HEAD(&locks_to_send);
1257
1258 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001259 * Allocating count locks is enough because no FL_POSIX locks can be
1260 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001261 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001262 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001263 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001264 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1265 if (!lck) {
1266 rc = -ENOMEM;
1267 goto err_out;
1268 }
1269 list_add_tail(&lck->llist, &locks_to_send);
1270 }
1271
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001272 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001273 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001274 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001275 if (el == &locks_to_send) {
1276 /*
1277 * The list ended. We don't have enough allocated
1278 * structures - something is really wrong.
1279 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001280 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001281 break;
1282 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001283 length = 1 + flock->fl_end - flock->fl_start;
1284 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1285 type = CIFS_RDLCK;
1286 else
1287 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001288 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001289 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001290 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001291 lck->length = length;
1292 lck->type = type;
1293 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001294 }
Jeff Layton6109c852015-01-16 15:05:57 -05001295 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001296
1297 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001298 int stored_rc;
1299
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001300 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001301 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001302 lck->type, 0);
1303 if (stored_rc)
1304 rc = stored_rc;
1305 list_del(&lck->llist);
1306 kfree(lck);
1307 }
1308
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001309out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001310 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001311 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001312err_out:
1313 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1314 list_del(&lck->llist);
1315 kfree(lck);
1316 }
1317 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001318}
1319
1320static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001321cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001322{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001323 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001324 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001325 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001326 int rc = 0;
1327
1328 /* we are going to update can_cache_brlcks here - need a write access */
Dave Wysochanski80b42f42019-10-23 05:02:33 -04001329 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001330 if (!cinode->can_cache_brlcks) {
1331 up_write(&cinode->lock_sem);
1332 return rc;
1333 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001334
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001335 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001336 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1337 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001338 rc = cifs_push_posix_locks(cfile);
1339 else
1340 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001341
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001342 cinode->can_cache_brlcks = false;
1343 up_write(&cinode->lock_sem);
1344 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001345}
1346
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001347static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001348cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001349 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001351 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001352 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001353 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001354 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001355 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001356 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001357 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001359 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001360 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001361 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001362 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001363 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001364 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1365 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001366 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001368 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001369 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001370 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001371 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001372 *lock = 1;
1373 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001374 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001375 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001376 *unlock = 1;
1377 /* Check if unlock includes more than one lock range */
1378 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001379 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001380 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001381 *lock = 1;
1382 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001383 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001384 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001385 *lock = 1;
1386 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001387 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001388 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001389 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001391 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001392}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001394static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001395cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001396 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001397{
1398 int rc = 0;
1399 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001400 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1401 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001402 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001403 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001405 if (posix_lck) {
1406 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001407
1408 rc = cifs_posix_lock_test(file, flock);
1409 if (!rc)
1410 return rc;
1411
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001412 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001413 posix_lock_type = CIFS_RDLCK;
1414 else
1415 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001416 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1417 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001418 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001419 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 return rc;
1421 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001422
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001423 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001424 if (!rc)
1425 return rc;
1426
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001428 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1429 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001430 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001431 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1432 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001433 flock->fl_type = F_UNLCK;
1434 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001435 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1436 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001437 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001438 }
1439
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001440 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001441 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001442 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001443 }
1444
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001445 type &= ~server->vals->exclusive_lock_type;
1446
1447 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1448 type | server->vals->shared_lock_type,
1449 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001450 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001451 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1452 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001453 flock->fl_type = F_RDLCK;
1454 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001455 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1456 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001457 } else
1458 flock->fl_type = F_WRLCK;
1459
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001460 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001461}
1462
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001463void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001464cifs_move_llist(struct list_head *source, struct list_head *dest)
1465{
1466 struct list_head *li, *tmp;
1467 list_for_each_safe(li, tmp, source)
1468 list_move(li, dest);
1469}
1470
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001471void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001472cifs_free_llist(struct list_head *llist)
1473{
1474 struct cifsLockInfo *li, *tmp;
1475 list_for_each_entry_safe(li, tmp, llist, llist) {
1476 cifs_del_lock_waiters(li);
1477 list_del(&li->llist);
1478 kfree(li);
1479 }
1480}
1481
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001482int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001483cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1484 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001485{
1486 int rc = 0, stored_rc;
Colin Ian King4d61eda2017-09-19 16:27:39 +01001487 static const int types[] = {
1488 LOCKING_ANDX_LARGE_FILES,
1489 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1490 };
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001491 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001492 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001493 LOCKING_ANDX_RANGE *buf, *cur;
1494 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001495 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001496 struct cifsLockInfo *li, *tmp;
1497 __u64 length = 1 + flock->fl_end - flock->fl_start;
1498 struct list_head tmp_llist;
1499
1500 INIT_LIST_HEAD(&tmp_llist);
1501
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001502 /*
1503 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwall2a71a472019-01-08 18:30:57 +00001504 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001505 */
1506 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwall2a71a472019-01-08 18:30:57 +00001507 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001508 return -EINVAL;
1509
Ross Lagerwall63715c12019-01-08 18:30:56 +00001510 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1511 PAGE_SIZE);
1512 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1513 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001514 max_num = (max_buf - sizeof(struct smb_hdr)) /
1515 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001516 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001517 if (!buf)
1518 return -ENOMEM;
1519
Dave Wysochanski80b42f42019-10-23 05:02:33 -04001520 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001521 for (i = 0; i < 2; i++) {
1522 cur = buf;
1523 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001524 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001525 if (flock->fl_start > li->offset ||
1526 (flock->fl_start + length) <
1527 (li->offset + li->length))
1528 continue;
1529 if (current->tgid != li->pid)
1530 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001531 if (types[i] != li->type)
1532 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001533 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001534 /*
1535 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001536 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001537 */
1538 list_del(&li->llist);
1539 cifs_del_lock_waiters(li);
1540 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001541 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001542 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001543 cur->Pid = cpu_to_le16(li->pid);
1544 cur->LengthLow = cpu_to_le32((u32)li->length);
1545 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1546 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1547 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1548 /*
1549 * We need to save a lock here to let us add it again to
1550 * the file's list if the unlock range request fails on
1551 * the server.
1552 */
1553 list_move(&li->llist, &tmp_llist);
1554 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001555 stored_rc = cifs_lockv(xid, tcon,
1556 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001557 li->type, num, 0, buf);
1558 if (stored_rc) {
1559 /*
1560 * We failed on the unlock range
1561 * request - add all locks from the tmp
1562 * list to the head of the file's list.
1563 */
1564 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001565 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001566 rc = stored_rc;
1567 } else
1568 /*
1569 * The unlock range request succeed -
1570 * free the tmp list.
1571 */
1572 cifs_free_llist(&tmp_llist);
1573 cur = buf;
1574 num = 0;
1575 } else
1576 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001577 }
1578 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001579 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001580 types[i], num, 0, buf);
1581 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001582 cifs_move_llist(&tmp_llist,
1583 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001584 rc = stored_rc;
1585 } else
1586 cifs_free_llist(&tmp_llist);
1587 }
1588 }
1589
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001590 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001591 kfree(buf);
1592 return rc;
1593}
1594
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001595static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001596cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001597 bool wait_flag, bool posix_lck, int lock, int unlock,
1598 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001599{
1600 int rc = 0;
1601 __u64 length = 1 + flock->fl_end - flock->fl_start;
1602 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1603 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001604 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001605 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001606
1607 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001608 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001609
1610 rc = cifs_posix_lock_set(file, flock);
1611 if (!rc || rc < 0)
1612 return rc;
1613
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001614 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001615 posix_lock_type = CIFS_RDLCK;
1616 else
1617 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001618
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001619 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001620 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001621
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001622 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001623 hash_lockowner(flock->fl_owner),
1624 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001625 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001626 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001627 }
1628
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001629 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001630 struct cifsLockInfo *lock;
1631
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001632 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001633 if (!lock)
1634 return -ENOMEM;
1635
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001636 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001637 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001638 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001639 return rc;
1640 }
1641 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001642 goto out;
1643
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001644 /*
1645 * Windows 7 server can delay breaking lease from read to None
1646 * if we set a byte-range lock on a file - break it explicitly
1647 * before sending the lock to the server to be sure the next
1648 * read won't conflict with non-overlapted locks due to
1649 * pagereading.
1650 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001651 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1652 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001653 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001654 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1655 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001656 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001657 }
1658
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001659 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1660 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001661 if (rc) {
1662 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001663 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001664 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001665
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001666 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001667 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001668 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001669
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001670out:
Aurelien Aptel29386512019-03-14 18:44:16 +01001671 if (flock->fl_flags & FL_POSIX) {
1672 /*
1673 * If this is a request to remove all locks because we
1674 * are closing the file, it doesn't matter if the
1675 * unlocking failed as both cifs.ko and the SMB server
1676 * remove the lock on file close
1677 */
1678 if (rc) {
1679 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1680 if (!(flock->fl_flags & FL_CLOSE))
1681 return rc;
1682 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001683 rc = locks_lock_file_wait(file, flock);
Aurelien Aptel29386512019-03-14 18:44:16 +01001684 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001685 return rc;
1686}
1687
1688int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1689{
1690 int rc, xid;
1691 int lock = 0, unlock = 0;
1692 bool wait_flag = false;
1693 bool posix_lck = false;
1694 struct cifs_sb_info *cifs_sb;
1695 struct cifs_tcon *tcon;
1696 struct cifsInodeInfo *cinode;
1697 struct cifsFileInfo *cfile;
1698 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001699 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001700
1701 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001702 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001703
Joe Perchesf96637b2013-05-04 22:12:25 -05001704 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1705 cmd, flock->fl_flags, flock->fl_type,
1706 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001707
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001708 cfile = (struct cifsFileInfo *)file->private_data;
1709 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001710
1711 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1712 tcon->ses->server);
1713
Al Viro7119e222014-10-22 00:25:12 -04001714 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001715 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001716 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001717
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001718 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001719 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1720 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1721 posix_lck = true;
1722 /*
1723 * BB add code here to normalize offset and length to account for
1724 * negative length which we can not accept over the wire.
1725 */
1726 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001727 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001728 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001729 return rc;
1730 }
1731
1732 if (!lock && !unlock) {
1733 /*
1734 * if no lock or unlock then nothing to do since we do not
1735 * know what it is
1736 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001737 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001738 return -EOPNOTSUPP;
1739 }
1740
1741 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1742 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001743 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 return rc;
1745}
1746
Jeff Layton597b0272012-03-23 14:40:56 -04001747/*
1748 * update the file size (if needed) after a write. Should be called with
1749 * the inode->i_lock held
1750 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001751void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001752cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1753 unsigned int bytes_written)
1754{
1755 loff_t end_of_write = offset + bytes_written;
1756
1757 if (end_of_write > cifsi->server_eof)
1758 cifsi->server_eof = end_of_write;
1759}
1760
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001761static ssize_t
1762cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1763 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764{
1765 int rc = 0;
1766 unsigned int bytes_written = 0;
1767 unsigned int total_written;
1768 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001769 struct cifs_tcon *tcon;
1770 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001771 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001772 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001773 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001774 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
Jeff Layton7da4b492010-10-15 15:34:00 -04001776 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
Al Viro35c265e2014-08-19 20:25:34 -04001778 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1779 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001781 tcon = tlink_tcon(open_file->tlink);
1782 server = tcon->ses->server;
1783
1784 if (!server->ops->sync_write)
1785 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001786
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001787 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 for (total_written = 0; write_size > total_written;
1790 total_written += bytes_written) {
1791 rc = -EAGAIN;
1792 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001793 struct kvec iov[2];
1794 unsigned int len;
1795
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 /* we could deadlock if we called
1798 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001799 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001801 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 if (rc != 0)
1803 break;
1804 }
Steve French3e844692005-10-03 13:37:24 -07001805
David Howells2b0143b2015-03-17 22:25:59 +00001806 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001807 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001808 /* iov[0] is reserved for smb header */
1809 iov[1].iov_base = (char *)write_data + total_written;
1810 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001811 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001812 io_parms.tcon = tcon;
1813 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001814 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001815 rc = server->ops->sync_write(xid, &open_file->fid,
1816 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 }
1818 if (rc || (bytes_written == 0)) {
1819 if (total_written)
1820 break;
1821 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001822 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 return rc;
1824 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001825 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001826 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001827 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001828 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001829 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 }
1832
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001833 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
Jeff Layton7da4b492010-10-15 15:34:00 -04001835 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001836 spin_lock(&d_inode(dentry)->i_lock);
1837 if (*offset > d_inode(dentry)->i_size)
1838 i_size_write(d_inode(dentry), *offset);
1839 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 }
David Howells2b0143b2015-03-17 22:25:59 +00001841 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001842 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 return total_written;
1844}
1845
Jeff Layton6508d902010-09-29 19:51:11 -04001846struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1847 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001848{
1849 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001850 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1851
1852 /* only filter by fsuid on multiuser mounts */
1853 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1854 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001855
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001856 spin_lock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001857 /* we could simply get the first_list_entry since write-only entries
1858 are always at the end of the list but since the first entry might
1859 have a close pending, we go through the whole list */
1860 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001861 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001862 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001863 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001864 if (!open_file->invalidHandle) {
1865 /* found a good file */
1866 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001867 cifsFileInfo_get(open_file);
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001868 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001869 return open_file;
1870 } /* else might as well continue, and look for
1871 another, or simply have the caller reopen it
1872 again rather than trying to fix this handle */
1873 } else /* write only file */
1874 break; /* write only files are last so must be done */
1875 }
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001876 spin_unlock(&cifs_inode->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001877 return NULL;
1878}
Steve French630f3f0c2007-10-25 21:17:17 +00001879
Jeff Layton6508d902010-09-29 19:51:11 -04001880struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1881 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001882{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001883 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001884 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001885 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001886 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001887 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001888
Steve French60808232006-04-22 15:53:05 +00001889 /* Having a null inode here (because mapping->host was set to zero by
1890 the VFS or MM) should not happen but we had reports of on oops (due to
1891 it being zero) during stress testcases so we need to check for it */
1892
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001893 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001894 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001895 dump_stack();
1896 return NULL;
1897 }
1898
Jeff Laytond3892292010-11-02 16:22:50 -04001899 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1900
Jeff Layton6508d902010-09-29 19:51:11 -04001901 /* only filter by fsuid on multiuser mounts */
1902 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1903 fsuid_only = false;
1904
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001905 spin_lock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001906refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001907 if (refind > MAX_REOPEN_ATT) {
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001908 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001909 return NULL;
1910 }
Steve French6148a742005-10-05 12:23:19 -07001911 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001912 if (!any_available && open_file->pid != current->tgid)
1913 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001914 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001915 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001916 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001917 if (!open_file->invalidHandle) {
1918 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001919 cifsFileInfo_get(open_file);
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001920 spin_unlock(&cifs_inode->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001921 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001922 } else {
1923 if (!inv_file)
1924 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001925 }
Steve French6148a742005-10-05 12:23:19 -07001926 }
1927 }
Jeff Layton2846d382008-09-22 21:33:33 -04001928 /* couldn't find useable FH with same pid, try any available */
1929 if (!any_available) {
1930 any_available = true;
1931 goto refind_writable;
1932 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001933
1934 if (inv_file) {
1935 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001936 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001937 }
1938
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001939 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001940
1941 if (inv_file) {
1942 rc = cifs_reopen_file(inv_file, false);
1943 if (!rc)
1944 return inv_file;
1945 else {
Ronnie Sahlbergacc07942019-06-05 10:38:38 +10001946 spin_lock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001947 list_move_tail(&inv_file->flist,
1948 &cifs_inode->openFileList);
Ronnie Sahlbergacc07942019-06-05 10:38:38 +10001949 spin_unlock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001950 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001951 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001952 inv_file = NULL;
Dave Wysochanskia8de7092019-10-03 15:16:27 +10001953 spin_lock(&cifs_inode->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001954 goto refind_writable;
1955 }
1956 }
1957
Steve French6148a742005-10-05 12:23:19 -07001958 return NULL;
1959}
1960
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1962{
1963 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001964 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 char *write_data;
1966 int rc = -EFAULT;
1967 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001969 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
1971 if (!mapping || !mapping->host)
1972 return -EFAULT;
1973
1974 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
1976 offset += (loff_t)from;
1977 write_data = kmap(page);
1978 write_data += from;
1979
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001980 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 kunmap(page);
1982 return -EIO;
1983 }
1984
1985 /* racing with truncate? */
1986 if (offset > mapping->host->i_size) {
1987 kunmap(page);
1988 return 0; /* don't care */
1989 }
1990
1991 /* check to make sure that we are not extending the file */
1992 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001993 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
Jeff Layton6508d902010-09-29 19:51:11 -04001995 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001996 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001997 bytes_written = cifs_write(open_file, open_file->pid,
1998 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001999 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07002001 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002002 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07002003 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00002004 else if (bytes_written < 0)
2005 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07002006 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05002007 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 rc = -EIO;
2009 }
2010
2011 kunmap(page);
2012 return rc;
2013}
2014
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002015static struct cifs_writedata *
2016wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2017 pgoff_t end, pgoff_t *index,
2018 unsigned int *found_pages)
2019{
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002020 struct cifs_writedata *wdata;
2021
2022 wdata = cifs_writedata_alloc((unsigned int)tofind,
2023 cifs_writev_complete);
2024 if (!wdata)
2025 return NULL;
2026
Jan Kara9c19a9c2017-11-15 17:35:26 -08002027 *found_pages = find_get_pages_range_tag(mapping, index, end,
2028 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002029 return wdata;
2030}
2031
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002032static unsigned int
2033wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2034 struct address_space *mapping,
2035 struct writeback_control *wbc,
2036 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2037{
2038 unsigned int nr_pages = 0, i;
2039 struct page *page;
2040
2041 for (i = 0; i < found_pages; i++) {
2042 page = wdata->pages[i];
2043 /*
Matthew Wilcoxb93b0162018-04-10 16:36:56 -07002044 * At this point we hold neither the i_pages lock nor the
2045 * page lock: the page may be truncated or invalidated
2046 * (changing page->mapping to NULL), or even swizzled
2047 * back from swapper_space to tmpfs file mapping
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002048 */
2049
2050 if (nr_pages == 0)
2051 lock_page(page);
2052 else if (!trylock_page(page))
2053 break;
2054
2055 if (unlikely(page->mapping != mapping)) {
2056 unlock_page(page);
2057 break;
2058 }
2059
2060 if (!wbc->range_cyclic && page->index > end) {
2061 *done = true;
2062 unlock_page(page);
2063 break;
2064 }
2065
2066 if (*next && (page->index != *next)) {
2067 /* Not next consecutive page */
2068 unlock_page(page);
2069 break;
2070 }
2071
2072 if (wbc->sync_mode != WB_SYNC_NONE)
2073 wait_on_page_writeback(page);
2074
2075 if (PageWriteback(page) ||
2076 !clear_page_dirty_for_io(page)) {
2077 unlock_page(page);
2078 break;
2079 }
2080
2081 /*
2082 * This actually clears the dirty bit in the radix tree.
2083 * See cifs_writepage() for more commentary.
2084 */
2085 set_page_writeback(page);
2086 if (page_offset(page) >= i_size_read(mapping->host)) {
2087 *done = true;
2088 unlock_page(page);
2089 end_page_writeback(page);
2090 break;
2091 }
2092
2093 wdata->pages[i] = page;
2094 *next = page->index + 1;
2095 ++nr_pages;
2096 }
2097
2098 /* reset index to refind any pages skipped */
2099 if (nr_pages == 0)
2100 *index = wdata->pages[0]->index + 1;
2101
2102 /* put any pages we aren't going to use */
2103 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002104 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002105 wdata->pages[i] = NULL;
2106 }
2107
2108 return nr_pages;
2109}
2110
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002111static int
2112wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2113 struct address_space *mapping, struct writeback_control *wbc)
2114{
2115 int rc = 0;
2116 struct TCP_Server_Info *server;
2117 unsigned int i;
2118
2119 wdata->sync_mode = wbc->sync_mode;
2120 wdata->nr_pages = nr_pages;
2121 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002122 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002123 wdata->tailsz = min(i_size_read(mapping->host) -
2124 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002125 (loff_t)PAGE_SIZE);
2126 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002127
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002128 if (wdata->cfile != NULL)
2129 cifsFileInfo_put(wdata->cfile);
2130 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2131 if (!wdata->cfile) {
2132 cifs_dbg(VFS, "No writable handles for inode\n");
2133 rc = -EBADF;
2134 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002135 wdata->pid = wdata->cfile->pid;
2136 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2137 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002138 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002139
2140 for (i = 0; i < nr_pages; ++i)
2141 unlock_page(wdata->pages[i]);
2142
2143 return rc;
2144}
2145
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002147 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002149 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002150 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002151 bool done = false, scanned = false, range_whole = false;
2152 pgoff_t end, index;
2153 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002154 int rc = 0;
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002155 int saved_rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002156
Steve French37c0eb42005-10-05 14:50:29 -07002157 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002158 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002159 * one page at a time via cifs_writepage
2160 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002161 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002162 return generic_writepages(mapping, wbc);
2163
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002164 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002165 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002166 end = -1;
2167 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002168 index = wbc->range_start >> PAGE_SHIFT;
2169 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002170 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002171 range_whole = true;
2172 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002173 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002174 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002175retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002176 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002177 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002178 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002179
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002180 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2181 &wsize, &credits);
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002182 if (rc != 0) {
2183 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002184 break;
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002185 }
Steve French37c0eb42005-10-05 14:50:29 -07002186
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002187 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002188
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002189 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2190 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002191 if (!wdata) {
2192 rc = -ENOMEM;
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002193 done = true;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002194 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002195 break;
2196 }
2197
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002198 if (found_pages == 0) {
2199 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002200 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002201 break;
2202 }
2203
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002204 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2205 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002206
2207 /* nothing to write? */
2208 if (nr_pages == 0) {
2209 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002210 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002211 continue;
2212 }
2213
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002214 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002215
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002216 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002217
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002218 /* send failure -- clean up the mess */
2219 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002220 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002221 for (i = 0; i < nr_pages; ++i) {
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002222 if (is_retryable_error(rc))
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002223 redirty_page_for_writepage(wbc,
2224 wdata->pages[i]);
2225 else
2226 SetPageError(wdata->pages[i]);
2227 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002228 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002229 }
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002230 if (!is_retryable_error(rc))
Jeff Layton941b8532011-01-11 07:24:01 -05002231 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002232 }
2233 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002234
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002235 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2236 index = saved_index;
2237 continue;
2238 }
2239
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002240 /* Return immediately if we received a signal during writing */
2241 if (is_interrupt_error(rc)) {
2242 done = true;
2243 break;
2244 }
2245
2246 if (rc != 0 && saved_rc == 0)
2247 saved_rc = rc;
2248
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002249 wbc->nr_to_write -= nr_pages;
2250 if (wbc->nr_to_write <= 0)
2251 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002252
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002253 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002254 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002255
Steve French37c0eb42005-10-05 14:50:29 -07002256 if (!scanned && !done) {
2257 /*
2258 * We hit the last page and there is more work to be done: wrap
2259 * back to the start of the file
2260 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002261 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002262 index = 0;
2263 goto retry;
2264 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002265
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002266 if (saved_rc != 0)
2267 rc = saved_rc;
2268
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002269 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002270 mapping->writeback_index = index;
2271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 return rc;
2273}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002275static int
2276cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002278 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002279 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002281 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002283 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002284 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002285 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002286
2287 /*
2288 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2289 *
2290 * A writepage() implementation always needs to do either this,
2291 * or re-dirty the page with "redirty_page_for_writepage()" in
2292 * the case of a failure.
2293 *
2294 * Just unlocking the page will cause the radix tree tag-bits
2295 * to fail to update with the state of the page correctly.
2296 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002297 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002298retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002299 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovskyfb2dabe2019-01-08 11:15:28 -08002300 if (is_retryable_error(rc)) {
2301 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
Jeff Layton97b37f22017-05-25 06:59:52 -04002302 goto retry_write;
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002303 redirty_page_for_writepage(wbc, page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002304 } else if (rc != 0) {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002305 SetPageError(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002306 mapping_set_error(page->mapping, rc);
2307 } else {
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002308 SetPageUptodate(page);
Jeff Layton97b37f22017-05-25 06:59:52 -04002309 }
Linus Torvaldscb876f42006-12-23 16:19:07 -08002310 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002311 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002312 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 return rc;
2314}
2315
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002316static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2317{
2318 int rc = cifs_writepage_locked(page, wbc);
2319 unlock_page(page);
2320 return rc;
2321}
2322
Nick Piggind9414772008-09-24 11:32:59 -04002323static int cifs_write_end(struct file *file, struct address_space *mapping,
2324 loff_t pos, unsigned len, unsigned copied,
2325 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326{
Nick Piggind9414772008-09-24 11:32:59 -04002327 int rc;
2328 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002329 struct cifsFileInfo *cfile = file->private_data;
2330 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2331 __u32 pid;
2332
2333 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2334 pid = cfile->pid;
2335 else
2336 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337
Joe Perchesf96637b2013-05-04 22:12:25 -05002338 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002339 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002340
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002341 if (PageChecked(page)) {
2342 if (copied == len)
2343 SetPageUptodate(page);
2344 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002345 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002346 SetPageUptodate(page);
2347
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002349 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002350 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002351 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002352
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002353 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 /* this is probably better than directly calling
2355 partialpage_write since in this function the file handle is
2356 known which we might as well leverage */
2357 /* BB check if anything else missing out of ppw
2358 such as updating last write time */
2359 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002360 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002361 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002363
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002364 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002365 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002366 rc = copied;
2367 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002368 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 }
2370
Nick Piggind9414772008-09-24 11:32:59 -04002371 if (rc > 0) {
2372 spin_lock(&inode->i_lock);
2373 if (pos > inode->i_size)
2374 i_size_write(inode, pos);
2375 spin_unlock(&inode->i_lock);
2376 }
2377
2378 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002379 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002380
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 return rc;
2382}
2383
Josef Bacik02c24a82011-07-16 20:44:56 -04002384int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2385 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002387 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002389 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002390 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002391 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002392 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002393 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002395 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002396 if (rc)
2397 return rc;
Al Viro59551022016-01-22 15:40:57 -05002398 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002399
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002400 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
Al Viro35c265e2014-08-19 20:25:34 -04002402 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2403 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002404
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002405 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002406 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002407 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002408 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002409 rc = 0; /* don't care about it in fsync */
2410 }
2411 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002412
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002413 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002414 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2415 server = tcon->ses->server;
2416 if (server->ops->flush)
2417 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2418 else
2419 rc = -ENOSYS;
2420 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002421
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002422 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002423 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002424 return rc;
2425}
2426
Josef Bacik02c24a82011-07-16 20:44:56 -04002427int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002428{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002429 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002430 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002431 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002432 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002433 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002434 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002435 struct inode *inode = file->f_mapping->host;
2436
Jeff Layton3b49c9a2017-07-07 15:20:52 -04002437 rc = file_write_and_wait_range(file, start, end);
Josef Bacik02c24a82011-07-16 20:44:56 -04002438 if (rc)
2439 return rc;
Al Viro59551022016-01-22 15:40:57 -05002440 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002441
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002442 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002443
Al Viro35c265e2014-08-19 20:25:34 -04002444 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2445 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002446
2447 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002448 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2449 server = tcon->ses->server;
2450 if (server->ops->flush)
2451 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2452 else
2453 rc = -ENOSYS;
2454 }
Steve Frenchb298f222009-02-21 21:17:43 +00002455
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002456 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002457 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 return rc;
2459}
2460
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461/*
2462 * As file closes, flush all cached write data for this inode checking
2463 * for write behind errors.
2464 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002465int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466{
Al Viro496ad9a2013-01-23 17:07:38 -05002467 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 int rc = 0;
2469
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002470 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002471 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002472
Joe Perchesf96637b2013-05-04 22:12:25 -05002473 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
2475 return rc;
2476}
2477
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002478static int
2479cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2480{
2481 int rc = 0;
2482 unsigned long i;
2483
2484 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002485 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002486 if (!pages[i]) {
2487 /*
2488 * save number of pages we have already allocated and
2489 * return with ENOMEM error
2490 */
2491 num_pages = i;
2492 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002493 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002494 }
2495 }
2496
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002497 if (rc) {
2498 for (i = 0; i < num_pages; i++)
2499 put_page(pages[i]);
2500 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002501 return rc;
2502}
2503
2504static inline
2505size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2506{
2507 size_t num_pages;
2508 size_t clen;
2509
2510 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002511 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002512
2513 if (cur_len)
2514 *cur_len = clen;
2515
2516 return num_pages;
2517}
2518
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002519static void
Steve French4a5c80d2014-02-07 20:45:12 -06002520cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002521{
2522 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002523 struct cifs_writedata *wdata = container_of(refcount,
2524 struct cifs_writedata, refcount);
2525
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002526 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
Steve French4a5c80d2014-02-07 20:45:12 -06002527 for (i = 0; i < wdata->nr_pages; i++)
2528 put_page(wdata->pages[i]);
2529 cifs_writedata_release(refcount);
2530}
2531
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002532static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
2533
Steve French4a5c80d2014-02-07 20:45:12 -06002534static void
2535cifs_uncached_writev_complete(struct work_struct *work)
2536{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002537 struct cifs_writedata *wdata = container_of(work,
2538 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002539 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002540 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2541
2542 spin_lock(&inode->i_lock);
2543 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2544 if (cifsi->server_eof > inode->i_size)
2545 i_size_write(inode, cifsi->server_eof);
2546 spin_unlock(&inode->i_lock);
2547
2548 complete(&wdata->done);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002549 collect_uncached_write_data(wdata->ctx);
2550 /* the below call can possibly free the last ref to aio ctx */
Steve French4a5c80d2014-02-07 20:45:12 -06002551 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002552}
2553
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002554static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002555wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2556 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002557{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002558 size_t save_len, copied, bytes, cur_len = *len;
2559 unsigned long i, nr_pages = *num_pages;
2560
2561 save_len = cur_len;
2562 for (i = 0; i < nr_pages; i++) {
2563 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2564 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2565 cur_len -= copied;
2566 /*
2567 * If we didn't copy as much as we expected, then that
2568 * may mean we trod into an unmapped area. Stop copying
2569 * at that point. On the next pass through the big
2570 * loop, we'll likely end up getting a zero-length
2571 * write and bailing out of it.
2572 */
2573 if (copied < bytes)
2574 break;
2575 }
2576 cur_len = save_len - cur_len;
2577 *len = cur_len;
2578
2579 /*
2580 * If we have no data to send, then that probably means that
2581 * the copy above failed altogether. That's most likely because
2582 * the address in the iovec was bogus. Return -EFAULT and let
2583 * the caller free anything we allocated and bail out.
2584 */
2585 if (!cur_len)
2586 return -EFAULT;
2587
2588 /*
2589 * i + 1 now represents the number of pages we actually used in
2590 * the copy phase above.
2591 */
2592 *num_pages = i + 1;
2593 return 0;
2594}
2595
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002596static int
2597cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2598 struct cifsFileInfo *open_file,
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002599 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
2600 struct cifs_aio_ctx *ctx)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002601{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002602 int rc = 0;
2603 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002604 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002605 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002606 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002607 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002608 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002609 struct TCP_Server_Info *server;
2610
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002611 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2612 pid = open_file->pid;
2613 else
2614 pid = current->tgid;
2615
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002616 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002617
2618 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002619 unsigned int wsize, credits;
2620
2621 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2622 &wsize, &credits);
2623 if (rc)
2624 break;
2625
2626 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002627 wdata = cifs_writedata_alloc(nr_pages,
2628 cifs_uncached_writev_complete);
2629 if (!wdata) {
2630 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002631 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002632 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002633 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002634
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002635 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2636 if (rc) {
2637 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002638 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002639 break;
2640 }
2641
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002642 num_pages = nr_pages;
2643 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2644 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002645 for (i = 0; i < nr_pages; i++)
2646 put_page(wdata->pages[i]);
2647 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002648 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002649 break;
2650 }
2651
2652 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002653 * Bring nr_pages down to the number of pages we actually used,
2654 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002655 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002656 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002657 put_page(wdata->pages[nr_pages - 1]);
2658
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002659 wdata->sync_mode = WB_SYNC_ALL;
2660 wdata->nr_pages = nr_pages;
2661 wdata->offset = (__u64)offset;
2662 wdata->cfile = cifsFileInfo_get(open_file);
2663 wdata->pid = pid;
2664 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002665 wdata->pagesz = PAGE_SIZE;
2666 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002667 wdata->credits = credits;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002668 wdata->ctx = ctx;
2669 kref_get(&ctx->refcount);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002670
2671 if (!wdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01002672 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002673 rc = server->ops->async_writev(wdata,
2674 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002675 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002676 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002677 kref_put(&wdata->refcount,
2678 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002679 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002680 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002681 iov_iter_advance(from, offset - saved_offset);
2682 continue;
2683 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002684 break;
2685 }
2686
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002687 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002688 offset += cur_len;
2689 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002690 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002691
2692 return rc;
2693}
2694
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002695static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2696{
2697 struct cifs_writedata *wdata, *tmp;
2698 struct cifs_tcon *tcon;
2699 struct cifs_sb_info *cifs_sb;
2700 struct dentry *dentry = ctx->cfile->dentry;
2701 unsigned int i;
2702 int rc;
2703
2704 tcon = tlink_tcon(ctx->cfile->tlink);
2705 cifs_sb = CIFS_SB(dentry->d_sb);
2706
2707 mutex_lock(&ctx->aio_mutex);
2708
2709 if (list_empty(&ctx->list)) {
2710 mutex_unlock(&ctx->aio_mutex);
2711 return;
2712 }
2713
2714 rc = ctx->rc;
2715 /*
2716 * Wait for and collect replies for any successful sends in order of
2717 * increasing offset. Once an error is hit, then return without waiting
2718 * for any more replies.
2719 */
2720restart_loop:
2721 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
2722 if (!rc) {
2723 if (!try_wait_for_completion(&wdata->done)) {
2724 mutex_unlock(&ctx->aio_mutex);
2725 return;
2726 }
2727
2728 if (wdata->result)
2729 rc = wdata->result;
2730 else
2731 ctx->total_len += wdata->bytes;
2732
2733 /* resend call if it's a retryable error */
2734 if (rc == -EAGAIN) {
2735 struct list_head tmp_list;
2736 struct iov_iter tmp_from = ctx->iter;
2737
2738 INIT_LIST_HEAD(&tmp_list);
2739 list_del_init(&wdata->list);
2740
2741 iov_iter_advance(&tmp_from,
2742 wdata->offset - ctx->pos);
2743
2744 rc = cifs_write_from_iter(wdata->offset,
2745 wdata->bytes, &tmp_from,
2746 ctx->cfile, cifs_sb, &tmp_list,
2747 ctx);
2748
2749 list_splice(&tmp_list, &ctx->list);
2750
2751 kref_put(&wdata->refcount,
2752 cifs_uncached_writedata_release);
2753 goto restart_loop;
2754 }
2755 }
2756 list_del_init(&wdata->list);
2757 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2758 }
2759
2760 for (i = 0; i < ctx->npages; i++)
2761 put_page(ctx->bv[i].bv_page);
2762
2763 cifs_stats_bytes_written(tcon, ctx->total_len);
2764 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2765
2766 ctx->rc = (rc == 0) ? ctx->total_len : rc;
2767
2768 mutex_unlock(&ctx->aio_mutex);
2769
2770 if (ctx->iocb && ctx->iocb->ki_complete)
2771 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
2772 else
2773 complete(&ctx->done);
2774}
2775
Al Viroe9d15932015-04-06 22:44:11 -04002776ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002777{
Al Viroe9d15932015-04-06 22:44:11 -04002778 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002779 ssize_t total_written = 0;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002780 struct cifsFileInfo *cfile;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002781 struct cifs_tcon *tcon;
2782 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002783 struct cifs_aio_ctx *ctx;
Al Virofc56b982016-09-21 18:18:23 -04002784 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002785 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002786
Al Viroe9d15932015-04-06 22:44:11 -04002787 /*
2788 * BB - optimize the way when signing is disabled. We can drop this
2789 * extra memory-to-memory copying and use iovec buffers for constructing
2790 * write request.
2791 */
2792
Al Viro3309dd02015-04-09 12:55:47 -04002793 rc = generic_write_checks(iocb, from);
2794 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002795 return rc;
2796
Al Viro7119e222014-10-22 00:25:12 -04002797 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002798 cfile = file->private_data;
2799 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002800
2801 if (!tcon->ses->server->ops->async_writev)
2802 return -ENOSYS;
2803
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002804 ctx = cifs_aio_ctx_alloc();
2805 if (!ctx)
2806 return -ENOMEM;
2807
2808 ctx->cfile = cifsFileInfo_get(cfile);
2809
2810 if (!is_sync_kiocb(iocb))
2811 ctx->iocb = iocb;
2812
2813 ctx->pos = iocb->ki_pos;
2814
2815 rc = setup_aio_ctx_iter(ctx, from, WRITE);
2816 if (rc) {
2817 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2818 return rc;
2819 }
2820
2821 /* grab a lock here due to read response handlers can access ctx */
2822 mutex_lock(&ctx->aio_mutex);
2823
2824 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
2825 cfile, cifs_sb, &ctx->list, ctx);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002826
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002827 /*
2828 * If at least one write was successfully sent, then discard any rc
2829 * value from the later writes. If the other write succeeds, then
2830 * we'll end up returning whatever was written. If it fails, then
2831 * we'll get a new rc value from that.
2832 */
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002833 if (!list_empty(&ctx->list))
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002834 rc = 0;
2835
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002836 mutex_unlock(&ctx->aio_mutex);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002837
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002838 if (rc) {
2839 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2840 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002841 }
2842
Pavel Shilovskyc610c4b2017-04-25 11:52:31 -07002843 if (!is_sync_kiocb(iocb)) {
2844 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2845 return -EIOCBQUEUED;
2846 }
2847
2848 rc = wait_for_completion_killable(&ctx->done);
2849 if (rc) {
2850 mutex_lock(&ctx->aio_mutex);
2851 ctx->rc = rc = -EINTR;
2852 total_written = ctx->total_len;
2853 mutex_unlock(&ctx->aio_mutex);
2854 } else {
2855 rc = ctx->rc;
2856 total_written = ctx->total_len;
2857 }
2858
2859 kref_put(&ctx->refcount, cifs_aio_ctx_release);
2860
Al Viroe9d15932015-04-06 22:44:11 -04002861 if (unlikely(!total_written))
2862 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002863
Al Viroe9d15932015-04-06 22:44:11 -04002864 iocb->ki_pos += total_written;
Al Viroe9d15932015-04-06 22:44:11 -04002865 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002866}
2867
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002868static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002869cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002870{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002871 struct file *file = iocb->ki_filp;
2872 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2873 struct inode *inode = file->f_mapping->host;
2874 struct cifsInodeInfo *cinode = CIFS_I(inode);
2875 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002876 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002877
Rabin Vincent966681c2017-06-29 16:01:42 +02002878 inode_lock(inode);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002879 /*
2880 * We need to hold the sem to be sure nobody modifies lock list
2881 * with a brlock that prevents writing.
2882 */
2883 down_read(&cinode->lock_sem);
Al Viro5f380c72015-04-07 11:28:12 -04002884
Al Viro3309dd02015-04-09 12:55:47 -04002885 rc = generic_write_checks(iocb, from);
2886 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002887 goto out;
2888
Al Viro5f380c72015-04-07 11:28:12 -04002889 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002890 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002891 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002892 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002893 else
2894 rc = -EACCES;
2895out:
Rabin Vincent966681c2017-06-29 16:01:42 +02002896 up_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002897 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002898
Christoph Hellwige2592212016-04-07 08:52:01 -07002899 if (rc > 0)
2900 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002901 return rc;
2902}
2903
2904ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002905cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002906{
Al Viro496ad9a2013-01-23 17:07:38 -05002907 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002908 struct cifsInodeInfo *cinode = CIFS_I(inode);
2909 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2910 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2911 iocb->ki_filp->private_data;
2912 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002913 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002914
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002915 written = cifs_get_writer(cinode);
2916 if (written)
2917 return written;
2918
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002919 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002920 if (cap_unix(tcon->ses) &&
2921 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002922 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002923 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002924 goto out;
2925 }
Al Viro3dae8752014-04-03 12:05:17 -04002926 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002927 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002928 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002929 /*
2930 * For non-oplocked files in strict cache mode we need to write the data
2931 * to the server exactly from the pos to pos+len-1 rather than flush all
2932 * affected pages because it may cause a error with mandatory locks on
2933 * these pages but not on the region from pos to ppos+len-1.
2934 */
Al Viro3dae8752014-04-03 12:05:17 -04002935 written = cifs_user_writev(iocb, from);
Pavel Shilovsky43eaa6c2019-03-04 17:48:01 -08002936 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002937 /*
Pavel Shilovsky43eaa6c2019-03-04 17:48:01 -08002938 * We have read level caching and we have just sent a write
2939 * request to the server thus making data in the cache stale.
2940 * Zap the cache and set oplock/lease level to NONE to avoid
2941 * reading stale data from the cache. All subsequent read
2942 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002943 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002944 cifs_zap_mapping(inode);
Pavel Shilovsky43eaa6c2019-03-04 17:48:01 -08002945 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05002946 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002947 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002948 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002949out:
2950 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002951 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002952}
2953
Jeff Layton0471ca32012-05-16 07:13:16 -04002954static struct cifs_readdata *
Long Lif9f5aca2018-05-30 12:47:54 -07002955cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002956{
2957 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002958
Long Lif9f5aca2018-05-30 12:47:54 -07002959 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002960 if (rdata != NULL) {
Long Lif9f5aca2018-05-30 12:47:54 -07002961 rdata->pages = pages;
Jeff Layton6993f742012-05-16 07:13:17 -04002962 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002963 INIT_LIST_HEAD(&rdata->list);
2964 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002965 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002966 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002967
Jeff Layton0471ca32012-05-16 07:13:16 -04002968 return rdata;
2969}
2970
Long Lif9f5aca2018-05-30 12:47:54 -07002971static struct cifs_readdata *
2972cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2973{
2974 struct page **pages =
Kees Cook6396bb22018-06-12 14:03:40 -07002975 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Long Lif9f5aca2018-05-30 12:47:54 -07002976 struct cifs_readdata *ret = NULL;
2977
2978 if (pages) {
2979 ret = cifs_readdata_direct_alloc(pages, complete);
2980 if (!ret)
2981 kfree(pages);
2982 }
2983
2984 return ret;
2985}
2986
Jeff Layton6993f742012-05-16 07:13:17 -04002987void
2988cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002989{
Jeff Layton6993f742012-05-16 07:13:17 -04002990 struct cifs_readdata *rdata = container_of(refcount,
2991 struct cifs_readdata, refcount);
Long Libd3dcc62017-11-22 17:38:47 -07002992#ifdef CONFIG_CIFS_SMB_DIRECT
2993 if (rdata->mr) {
2994 smbd_deregister_mr(rdata->mr);
2995 rdata->mr = NULL;
2996 }
2997#endif
Jeff Layton6993f742012-05-16 07:13:17 -04002998 if (rdata->cfile)
2999 cifsFileInfo_put(rdata->cfile);
3000
Long Lif9f5aca2018-05-30 12:47:54 -07003001 kvfree(rdata->pages);
Jeff Layton0471ca32012-05-16 07:13:16 -04003002 kfree(rdata);
3003}
3004
Jeff Layton2a1bb132012-05-16 07:13:17 -04003005static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003006cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04003007{
3008 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003009 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04003010 unsigned int i;
3011
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003012 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04003013 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3014 if (!page) {
3015 rc = -ENOMEM;
3016 break;
3017 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003018 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04003019 }
3020
3021 if (rc) {
Roberto Bergantinos Corpas297a2512019-05-28 09:38:14 +02003022 unsigned int nr_page_failed = i;
3023
3024 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003025 put_page(rdata->pages[i]);
3026 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003027 }
3028 }
3029 return rc;
3030}
3031
3032static void
3033cifs_uncached_readdata_release(struct kref *refcount)
3034{
Jeff Layton1c892542012-05-16 07:13:17 -04003035 struct cifs_readdata *rdata = container_of(refcount,
3036 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003037 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003038
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003039 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003040 for (i = 0; i < rdata->nr_pages; i++) {
3041 put_page(rdata->pages[i]);
3042 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04003043 }
3044 cifs_readdata_release(refcount);
3045}
3046
Jeff Layton1c892542012-05-16 07:13:17 -04003047/**
3048 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3049 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05003050 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04003051 *
3052 * This function copies data from a list of pages in a readdata response into
3053 * an array of iovecs. It will first calculate where the data should go
3054 * based on the info in the readdata and then copy the data into that spot.
3055 */
Al Viro7f25bba2014-02-04 14:07:43 -05003056static int
3057cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04003058{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04003059 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003060 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04003061
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003062 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003063 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02003064 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovsky9c257022017-01-19 13:53:15 -08003065 size_t written;
3066
3067 if (unlikely(iter->type & ITER_PIPE)) {
3068 void *addr = kmap_atomic(page);
3069
3070 written = copy_to_iter(addr, copy, iter);
3071 kunmap_atomic(addr);
3072 } else
3073 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05003074 remaining -= written;
3075 if (written < copy && iov_iter_count(iter) > 0)
3076 break;
Jeff Layton1c892542012-05-16 07:13:17 -04003077 }
Al Viro7f25bba2014-02-04 14:07:43 -05003078 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04003079}
3080
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003081static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3082
Jeff Layton1c892542012-05-16 07:13:17 -04003083static void
3084cifs_uncached_readv_complete(struct work_struct *work)
3085{
3086 struct cifs_readdata *rdata = container_of(work,
3087 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04003088
3089 complete(&rdata->done);
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003090 collect_uncached_read_data(rdata->ctx);
3091 /* the below call can possibly free the last ref to aio ctx */
Jeff Layton1c892542012-05-16 07:13:17 -04003092 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3093}
3094
3095static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003096uncached_fill_pages(struct TCP_Server_Info *server,
3097 struct cifs_readdata *rdata, struct iov_iter *iter,
3098 unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04003099{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003100 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003101 unsigned int i;
3102 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003103 unsigned int page_offset = rdata->page_offset;
Jeff Layton1c892542012-05-16 07:13:17 -04003104
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003105 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07003106 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003107 for (i = 0; i < nr_pages; i++) {
3108 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05003109 size_t n;
Long Li1dbe3462018-05-30 12:47:55 -07003110 unsigned int segment_size = rdata->pagesz;
3111
3112 if (i == 0)
3113 segment_size -= page_offset;
3114 else
3115 page_offset = 0;
3116
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003117
Al Viro71335662016-01-09 19:54:50 -05003118 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04003119 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003120 rdata->pages[i] = NULL;
3121 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04003122 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07003123 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003124 }
Long Li1dbe3462018-05-30 12:47:55 -07003125
Al Viro71335662016-01-09 19:54:50 -05003126 n = len;
Long Li1dbe3462018-05-30 12:47:55 -07003127 if (len >= segment_size)
Al Viro71335662016-01-09 19:54:50 -05003128 /* enough data to fill the page */
Long Li1dbe3462018-05-30 12:47:55 -07003129 n = segment_size;
3130 else
Al Viro71335662016-01-09 19:54:50 -05003131 rdata->tailsz = len;
Long Li1dbe3462018-05-30 12:47:55 -07003132 len -= n;
3133
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003134 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003135 result = copy_page_from_iter(
3136 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003137#ifdef CONFIG_CIFS_SMB_DIRECT
3138 else if (rdata->mr)
3139 result = n;
3140#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003141 else
Long Li1dbe3462018-05-30 12:47:55 -07003142 result = cifs_read_page_from_socket(
3143 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003144 if (result < 0)
3145 break;
3146
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003147 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003148 }
3149
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003150 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3151 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003152}
3153
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003154static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003155cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3156 struct cifs_readdata *rdata, unsigned int len)
3157{
3158 return uncached_fill_pages(server, rdata, NULL, len);
3159}
3160
3161static int
3162cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3163 struct cifs_readdata *rdata,
3164 struct iov_iter *iter)
3165{
3166 return uncached_fill_pages(server, rdata, iter, iter->count);
3167}
3168
3169static int
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003170cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003171 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3172 struct cifs_aio_ctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003174 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003175 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003176 size_t cur_len;
3177 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003178 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003179 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003180
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003181 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003182
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003183 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3184 pid = open_file->pid;
3185 else
3186 pid = current->tgid;
3187
Jeff Layton1c892542012-05-16 07:13:17 -04003188 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003189 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3190 &rsize, &credits);
3191 if (rc)
3192 break;
3193
3194 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04003195 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003196
Jeff Layton1c892542012-05-16 07:13:17 -04003197 /* allocate a readdata struct */
3198 rdata = cifs_readdata_alloc(npages,
3199 cifs_uncached_readv_complete);
3200 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003201 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003202 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003203 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003205
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003206 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003207 if (rc)
3208 goto error;
3209
3210 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003211 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003212 rdata->offset = offset;
3213 rdata->bytes = cur_len;
3214 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003215 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07003216 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003217 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003218 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003219 rdata->credits = credits;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003220 rdata->ctx = ctx;
3221 kref_get(&ctx->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04003222
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003223 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01003224 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003225 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003226error:
3227 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003228 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003229 kref_put(&rdata->refcount,
3230 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003231 if (rc == -EAGAIN)
3232 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003233 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 }
Jeff Layton1c892542012-05-16 07:13:17 -04003235
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003236 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003237 offset += cur_len;
3238 len -= cur_len;
3239 } while (len > 0);
3240
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003241 return rc;
3242}
3243
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003244static void
3245collect_uncached_read_data(struct cifs_aio_ctx *ctx)
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003246{
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003247 struct cifs_readdata *rdata, *tmp;
3248 struct iov_iter *to = &ctx->iter;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003249 struct cifs_sb_info *cifs_sb;
3250 struct cifs_tcon *tcon;
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003251 unsigned int i;
3252 int rc;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003253
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003254 tcon = tlink_tcon(ctx->cfile->tlink);
3255 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003256
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003257 mutex_lock(&ctx->aio_mutex);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003258
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003259 if (list_empty(&ctx->list)) {
3260 mutex_unlock(&ctx->aio_mutex);
3261 return;
3262 }
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003263
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003264 rc = ctx->rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003265 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003266again:
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003267 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
Jeff Layton1c892542012-05-16 07:13:17 -04003268 if (!rc) {
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003269 if (!try_wait_for_completion(&rdata->done)) {
3270 mutex_unlock(&ctx->aio_mutex);
3271 return;
3272 }
3273
3274 if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003275 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003276 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003277 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003278
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003279 list_del_init(&rdata->list);
3280 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003281
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003282 /*
3283 * Got a part of data and then reconnect has
3284 * happened -- fill the buffer and continue
3285 * reading.
3286 */
3287 if (got_bytes && got_bytes < rdata->bytes) {
3288 rc = cifs_readdata_to_iov(rdata, to);
3289 if (rc) {
3290 kref_put(&rdata->refcount,
3291 cifs_uncached_readdata_release);
3292 continue;
3293 }
3294 }
3295
3296 rc = cifs_send_async_read(
3297 rdata->offset + got_bytes,
3298 rdata->bytes - got_bytes,
3299 rdata->cfile, cifs_sb,
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003300 &tmp_list, ctx);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003301
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003302 list_splice(&tmp_list, &ctx->list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003303
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003304 kref_put(&rdata->refcount,
3305 cifs_uncached_readdata_release);
3306 goto again;
3307 } else if (rdata->result)
3308 rc = rdata->result;
3309 else
Jeff Layton1c892542012-05-16 07:13:17 -04003310 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003311
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003312 /* if there was a short read -- discard anything left */
3313 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3314 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003315 }
3316 list_del_init(&rdata->list);
3317 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003319
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003320 for (i = 0; i < ctx->npages; i++) {
3321 if (ctx->should_dirty)
3322 set_page_dirty(ctx->bv[i].bv_page);
3323 put_page(ctx->bv[i].bv_page);
3324 }
Al Viro7f25bba2014-02-04 14:07:43 -05003325
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003326 ctx->total_len = ctx->len - iov_iter_count(to);
3327
3328 cifs_stats_bytes_read(tcon, ctx->total_len);
Jeff Layton1c892542012-05-16 07:13:17 -04003329
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003330 /* mask nodata case */
3331 if (rc == -ENODATA)
3332 rc = 0;
3333
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003334 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3335
3336 mutex_unlock(&ctx->aio_mutex);
3337
3338 if (ctx->iocb && ctx->iocb->ki_complete)
3339 ctx->iocb->ki_complete(ctx->iocb, ctx->rc, 0);
3340 else
3341 complete(&ctx->done);
3342}
3343
3344ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3345{
3346 struct file *file = iocb->ki_filp;
3347 ssize_t rc;
3348 size_t len;
3349 ssize_t total_read = 0;
3350 loff_t offset = iocb->ki_pos;
3351 struct cifs_sb_info *cifs_sb;
3352 struct cifs_tcon *tcon;
3353 struct cifsFileInfo *cfile;
3354 struct cifs_aio_ctx *ctx;
3355
3356 len = iov_iter_count(to);
3357 if (!len)
3358 return 0;
3359
3360 cifs_sb = CIFS_FILE_SB(file);
3361 cfile = file->private_data;
3362 tcon = tlink_tcon(cfile->tlink);
3363
3364 if (!tcon->ses->server->ops->async_readv)
3365 return -ENOSYS;
3366
3367 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3368 cifs_dbg(FYI, "attempting read on write only file instance\n");
3369
3370 ctx = cifs_aio_ctx_alloc();
3371 if (!ctx)
3372 return -ENOMEM;
3373
3374 ctx->cfile = cifsFileInfo_get(cfile);
3375
3376 if (!is_sync_kiocb(iocb))
3377 ctx->iocb = iocb;
3378
Dan Carpenter8a7b0d82017-05-05 08:30:40 +03003379 if (to->type == ITER_IOVEC)
Pavel Shilovsky6685c5e2017-04-25 11:52:30 -07003380 ctx->should_dirty = true;
3381
3382 rc = setup_aio_ctx_iter(ctx, to, READ);
3383 if (rc) {
3384 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3385 return rc;
3386 }
3387
3388 len = ctx->len;
3389
3390 /* grab a lock here due to read response handlers can access ctx */
3391 mutex_lock(&ctx->aio_mutex);
3392
3393 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
3394
3395 /* if at least one read request send succeeded, then reset rc */
3396 if (!list_empty(&ctx->list))
3397 rc = 0;
3398
3399 mutex_unlock(&ctx->aio_mutex);
3400
3401 if (rc) {
3402 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3403 return rc;
3404 }
3405
3406 if (!is_sync_kiocb(iocb)) {
3407 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3408 return -EIOCBQUEUED;
3409 }
3410
3411 rc = wait_for_completion_killable(&ctx->done);
3412 if (rc) {
3413 mutex_lock(&ctx->aio_mutex);
3414 ctx->rc = rc = -EINTR;
3415 total_read = ctx->total_len;
3416 mutex_unlock(&ctx->aio_mutex);
3417 } else {
3418 rc = ctx->rc;
3419 total_read = ctx->total_len;
3420 }
3421
3422 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3423
Al Viro0165e812014-02-04 14:19:48 -05003424 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003425 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003426 return total_read;
3427 }
3428 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003429}
3430
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003431ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003432cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003433{
Al Viro496ad9a2013-01-23 17:07:38 -05003434 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003435 struct cifsInodeInfo *cinode = CIFS_I(inode);
3436 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3437 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3438 iocb->ki_filp->private_data;
3439 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3440 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003441
3442 /*
3443 * In strict cache mode we need to read from the server all the time
3444 * if we don't have level II oplock because the server can delay mtime
3445 * change - so we can't make a decision about inode invalidating.
3446 * And we can also fail with pagereading if there are mandatory locks
3447 * on pages affected by this read but not on the region from pos to
3448 * pos+len-1.
3449 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003450 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003451 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003452
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003453 if (cap_unix(tcon->ses) &&
3454 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3455 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003456 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003457
3458 /*
3459 * We need to hold the sem to be sure nobody modifies lock list
3460 * with a brlock that prevents reading.
3461 */
3462 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003463 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003464 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003465 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003466 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003467 up_read(&cinode->lock_sem);
3468 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003469}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003471static ssize_t
3472cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473{
3474 int rc = -EACCES;
3475 unsigned int bytes_read = 0;
3476 unsigned int total_read;
3477 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003478 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003480 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003481 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003482 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003483 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003485 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003486 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003487 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003489 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003490 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003492 /* FIXME: set up handlers for larger reads and/or convert to async */
3493 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3494
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303496 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003497 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303498 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003500 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003501 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003502 server = tcon->ses->server;
3503
3504 if (!server->ops->sync_read) {
3505 free_xid(xid);
3506 return -ENOSYS;
3507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003509 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3510 pid = open_file->pid;
3511 else
3512 pid = current->tgid;
3513
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003515 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003517 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3518 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003519 do {
3520 current_read_size = min_t(uint, read_size - total_read,
3521 rsize);
3522 /*
3523 * For windows me and 9x we do not want to request more
3524 * than it negotiated since it will refuse the read
3525 * then.
3526 */
3527 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003528 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003529 current_read_size = min_t(uint,
3530 current_read_size, CIFSMaxBufSize);
3531 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003532 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003533 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 if (rc != 0)
3535 break;
3536 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003537 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003538 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003539 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003540 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003541 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003542 &bytes_read, &cur_offset,
3543 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003544 } while (rc == -EAGAIN);
3545
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 if (rc || (bytes_read == 0)) {
3547 if (total_read) {
3548 break;
3549 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003550 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 return rc;
3552 }
3553 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003554 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003555 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003556 }
3557 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003558 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 return total_read;
3560}
3561
Jeff Laytonca83ce32011-04-12 09:13:44 -04003562/*
3563 * If the page is mmap'ed into a process' page tables, then we need to make
3564 * sure that it doesn't change while being written back.
3565 */
Souptick Joardera5240cb2018-04-15 00:58:25 +05303566static vm_fault_t
Dave Jiang11bac802017-02-24 14:56:41 -08003567cifs_page_mkwrite(struct vm_fault *vmf)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003568{
3569 struct page *page = vmf->page;
3570
3571 lock_page(page);
3572 return VM_FAULT_LOCKED;
3573}
3574
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003575static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003576 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003577 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003578 .page_mkwrite = cifs_page_mkwrite,
3579};
3580
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003581int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3582{
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003583 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003584 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003585
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003586 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003587
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003588 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003589 rc = cifs_zap_mapping(inode);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003590 if (!rc)
3591 rc = generic_file_mmap(file, vma);
3592 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003593 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003594
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003595 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003596 return rc;
3597}
3598
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3600{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 int rc, xid;
3602
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003603 xid = get_xid();
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003604
Jeff Laytonabab0952010-02-12 07:44:18 -05003605 rc = cifs_revalidate_file(file);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003606 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003607 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3608 rc);
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003609 if (!rc)
3610 rc = generic_file_mmap(file, vma);
3611 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003612 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxf04a7032017-12-15 12:48:32 -08003613
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003614 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003615 return rc;
3616}
3617
Jeff Layton0471ca32012-05-16 07:13:16 -04003618static void
3619cifs_readv_complete(struct work_struct *work)
3620{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003621 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003622 struct cifs_readdata *rdata = container_of(work,
3623 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003624
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003625 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003626 for (i = 0; i < rdata->nr_pages; i++) {
3627 struct page *page = rdata->pages[i];
3628
Jeff Layton0471ca32012-05-16 07:13:16 -04003629 lru_cache_add_file(page);
3630
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003631 if (rdata->result == 0 ||
3632 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003633 flush_dcache_page(page);
3634 SetPageUptodate(page);
3635 }
3636
3637 unlock_page(page);
3638
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003639 if (rdata->result == 0 ||
3640 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003641 cifs_readpage_to_fscache(rdata->mapping->host, page);
3642
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003643 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003644
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003645 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003646 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003647 }
Jeff Layton6993f742012-05-16 07:13:17 -04003648 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003649}
3650
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003651static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003652readpages_fill_pages(struct TCP_Server_Info *server,
3653 struct cifs_readdata *rdata, struct iov_iter *iter,
3654 unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003655{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003656 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003657 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003658 u64 eof;
3659 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003660 unsigned int nr_pages = rdata->nr_pages;
Long Li1dbe3462018-05-30 12:47:55 -07003661 unsigned int page_offset = rdata->page_offset;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003662
3663 /* determine the eof that the server (probably) has */
3664 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003665 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003666 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003667
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003668 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003669 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003670 for (i = 0; i < nr_pages; i++) {
3671 struct page *page = rdata->pages[i];
Long Li1dbe3462018-05-30 12:47:55 -07003672 unsigned int to_read = rdata->pagesz;
3673 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003674
Long Li1dbe3462018-05-30 12:47:55 -07003675 if (i == 0)
3676 to_read -= page_offset;
3677 else
3678 page_offset = 0;
3679
3680 n = to_read;
3681
3682 if (len >= to_read) {
3683 len -= to_read;
Jeff Layton8321fec2012-09-19 06:22:32 -07003684 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003685 /* enough for partial page, fill and zero the rest */
Long Li1dbe3462018-05-30 12:47:55 -07003686 zero_user(page, len + page_offset, to_read - len);
Al Viro71335662016-01-09 19:54:50 -05003687 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003688 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003689 } else if (page->index > eof_index) {
3690 /*
3691 * The VFS will not try to do readahead past the
3692 * i_size, but it's possible that we have outstanding
3693 * writes with gaps in the middle and the i_size hasn't
3694 * caught up yet. Populate those with zeroed out pages
3695 * to prevent the VFS from repeatedly attempting to
3696 * fill them until the writes are flushed.
3697 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003698 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003699 lru_cache_add_file(page);
3700 flush_dcache_page(page);
3701 SetPageUptodate(page);
3702 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003703 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003704 rdata->pages[i] = NULL;
3705 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003706 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003707 } else {
3708 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003709 lru_cache_add_file(page);
3710 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003711 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003712 rdata->pages[i] = NULL;
3713 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003714 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003715 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003716
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003717 if (iter)
Long Li1dbe3462018-05-30 12:47:55 -07003718 result = copy_page_from_iter(
3719 page, page_offset, n, iter);
Long Libd3dcc62017-11-22 17:38:47 -07003720#ifdef CONFIG_CIFS_SMB_DIRECT
3721 else if (rdata->mr)
3722 result = n;
3723#endif
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003724 else
Long Li1dbe3462018-05-30 12:47:55 -07003725 result = cifs_read_page_from_socket(
3726 server, page, page_offset, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003727 if (result < 0)
3728 break;
3729
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003730 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003731 }
3732
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003733 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3734 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003735}
3736
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003737static int
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003738cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3739 struct cifs_readdata *rdata, unsigned int len)
3740{
3741 return readpages_fill_pages(server, rdata, NULL, len);
3742}
3743
3744static int
3745cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
3746 struct cifs_readdata *rdata,
3747 struct iov_iter *iter)
3748{
3749 return readpages_fill_pages(server, rdata, iter, iter->count);
3750}
3751
3752static int
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003753readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3754 unsigned int rsize, struct list_head *tmplist,
3755 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3756{
3757 struct page *page, *tpage;
3758 unsigned int expected_index;
3759 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003760 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003761
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003762 INIT_LIST_HEAD(tmplist);
3763
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003764 page = list_entry(page_list->prev, struct page, lru);
3765
3766 /*
3767 * Lock the page and put it in the cache. Since no one else
3768 * should have access to this page, we're safe to simply set
3769 * PG_locked without checking it first.
3770 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003771 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003772 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003773 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003774
3775 /* give up if we can't stick it in the cache */
3776 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003777 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003778 return rc;
3779 }
3780
3781 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003782 *offset = (loff_t)page->index << PAGE_SHIFT;
3783 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003784 *nr_pages = 1;
3785 list_move_tail(&page->lru, tmplist);
3786
3787 /* now try and add more pages onto the request */
3788 expected_index = page->index + 1;
3789 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3790 /* discontinuity ? */
3791 if (page->index != expected_index)
3792 break;
3793
3794 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003795 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003796 break;
3797
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003798 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003799 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003800 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003801 break;
3802 }
3803 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003804 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003805 expected_index++;
3806 (*nr_pages)++;
3807 }
3808 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809}
3810
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811static int cifs_readpages(struct file *file, struct address_space *mapping,
3812 struct list_head *page_list, unsigned num_pages)
3813{
Jeff Layton690c5e32011-10-19 15:30:16 -04003814 int rc;
3815 struct list_head tmplist;
3816 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003817 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003818 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003819 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820
Jeff Layton690c5e32011-10-19 15:30:16 -04003821 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303822 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3823 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003824 *
3825 * After this point, every page in the list might have PG_fscache set,
3826 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303827 */
3828 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3829 &num_pages);
3830 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003831 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303832
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003833 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3834 pid = open_file->pid;
3835 else
3836 pid = current->tgid;
3837
Jeff Layton690c5e32011-10-19 15:30:16 -04003838 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003839 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840
Joe Perchesf96637b2013-05-04 22:12:25 -05003841 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3842 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003843
3844 /*
3845 * Start with the page at end of list and move it to private
3846 * list. Do the same with any following pages until we hit
3847 * the rsize limit, hit an index discontinuity, or run out of
3848 * pages. Issue the async read and then start the loop again
3849 * until the list is empty.
3850 *
3851 * Note that list order is important. The page_list is in
3852 * the order of declining indexes. When we put the pages in
3853 * the rdata->pages, then we want them in increasing order.
3854 */
3855 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003856 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003857 loff_t offset;
3858 struct page *page, *tpage;
3859 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003860 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003862 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3863 &rsize, &credits);
3864 if (rc)
3865 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003866
Jeff Layton690c5e32011-10-19 15:30:16 -04003867 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003868 * Give up immediately if rsize is too small to read an entire
3869 * page. The VFS will fall back to readpage. We should never
3870 * reach this point however since we set ra_pages to 0 when the
3871 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003872 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003873 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003874 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003875 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003877
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003878 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3879 &nr_pages, &offset, &bytes);
3880 if (rc) {
3881 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003883 }
3884
Jeff Layton0471ca32012-05-16 07:13:16 -04003885 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003886 if (!rdata) {
3887 /* best to give up if we're out of mem */
3888 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3889 list_del(&page->lru);
3890 lru_cache_add_file(page);
3891 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003892 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003893 }
3894 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003895 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003896 break;
3897 }
3898
Jeff Layton6993f742012-05-16 07:13:17 -04003899 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003900 rdata->mapping = mapping;
3901 rdata->offset = offset;
3902 rdata->bytes = bytes;
3903 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003904 rdata->pagesz = PAGE_SIZE;
Long Li1dbe3462018-05-30 12:47:55 -07003905 rdata->tailsz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003906 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskyd70b9102016-11-17 16:20:18 -08003907 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003908 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003909
3910 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3911 list_del(&page->lru);
3912 rdata->pages[rdata->nr_pages++] = page;
3913 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003914
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003915 if (!rdata->cfile->invalidHandle ||
Germano Percossi1fa839b2017-04-07 12:29:38 +01003916 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003917 rc = server->ops->async_readv(rdata);
3918 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003919 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003920 for (i = 0; i < rdata->nr_pages; i++) {
3921 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003922 lru_cache_add_file(page);
3923 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003924 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003926 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003927 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 break;
3929 }
Jeff Layton6993f742012-05-16 07:13:17 -04003930
3931 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 }
3933
David Howells54afa992013-09-04 17:10:39 +00003934 /* Any pages that have been shown to fscache but didn't get added to
3935 * the pagecache must be uncached before they get returned to the
3936 * allocator.
3937 */
3938 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 return rc;
3940}
3941
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003942/*
3943 * cifs_readpage_worker must be called with the page pinned
3944 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945static int cifs_readpage_worker(struct file *file, struct page *page,
3946 loff_t *poffset)
3947{
3948 char *read_data;
3949 int rc;
3950
Suresh Jayaraman56698232010-07-05 18:13:25 +05303951 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003952 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303953 if (rc == 0)
3954 goto read_complete;
3955
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 read_data = kmap(page);
3957 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003958
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003959 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003960
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961 if (rc < 0)
3962 goto io_error;
3963 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003964 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003965
Al Viro496ad9a2013-01-23 17:07:38 -05003966 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003967 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003968
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003969 if (PAGE_SIZE > rc)
3970 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971
3972 flush_dcache_page(page);
3973 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303974
3975 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003976 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303977
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003979
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003981 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003982 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303983
3984read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003985 return rc;
3986}
3987
3988static int cifs_readpage(struct file *file, struct page *page)
3989{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003990 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003992 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003994 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003995
3996 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303997 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003998 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303999 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 }
4001
Joe Perchesf96637b2013-05-04 22:12:25 -05004002 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00004003 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004
4005 rc = cifs_readpage_worker(file, page, &offset);
4006
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04004007 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 return rc;
4009}
4010
Steve Frencha403a0a2007-07-26 15:54:16 +00004011static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4012{
4013 struct cifsFileInfo *open_file;
4014
Dave Wysochanskia8de7092019-10-03 15:16:27 +10004015 spin_lock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004016 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04004017 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Dave Wysochanskia8de7092019-10-03 15:16:27 +10004018 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004019 return 1;
4020 }
4021 }
Dave Wysochanskia8de7092019-10-03 15:16:27 +10004022 spin_unlock(&cifs_inode->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00004023 return 0;
4024}
4025
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026/* We do not want to update the file size from server for inodes
4027 open for write - to avoid races with writepage extending
4028 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004029 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030 but this is tricky to do without racing with writebehind
4031 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00004032bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033{
Steve Frencha403a0a2007-07-26 15:54:16 +00004034 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00004035 return true;
Steve French23e7dd72005-10-20 13:44:56 -07004036
Steve Frencha403a0a2007-07-26 15:54:16 +00004037 if (is_inode_writable(cifsInode)) {
4038 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08004039 struct cifs_sb_info *cifs_sb;
4040
Steve Frenchc32a0b62006-01-12 14:41:28 -08004041 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00004042 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004043 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08004044 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00004045 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08004046 }
4047
Steve Frenchfb8c4b12007-07-10 01:16:18 +00004048 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00004049 return true;
Steve French7ba52632007-02-08 18:14:13 +00004050
Steve French4b18f2a2008-04-29 00:06:05 +00004051 return false;
Steve French23e7dd72005-10-20 13:44:56 -07004052 } else
Steve French4b18f2a2008-04-29 00:06:05 +00004053 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054}
4055
Nick Piggind9414772008-09-24 11:32:59 -04004056static int cifs_write_begin(struct file *file, struct address_space *mapping,
4057 loff_t pos, unsigned len, unsigned flags,
4058 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059{
Sachin Prabhu466bd312013-09-13 14:11:57 +01004060 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004061 pgoff_t index = pos >> PAGE_SHIFT;
4062 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004063 loff_t page_start = pos & PAGE_MASK;
4064 loff_t i_size;
4065 struct page *page;
4066 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067
Joe Perchesf96637b2013-05-04 22:12:25 -05004068 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04004069
Sachin Prabhu466bd312013-09-13 14:11:57 +01004070start:
Nick Piggin54566b22009-01-04 12:00:53 -08004071 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004072 if (!page) {
4073 rc = -ENOMEM;
4074 goto out;
4075 }
Nick Piggind9414772008-09-24 11:32:59 -04004076
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004077 if (PageUptodate(page))
4078 goto out;
Steve French8a236262007-03-06 00:31:00 +00004079
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004080 /*
4081 * If we write a full page it will be up to date, no need to read from
4082 * the server. If the write is short, we'll end up doing a sync write
4083 * instead.
4084 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004085 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004086 goto out;
4087
4088 /*
4089 * optimize away the read when we have an oplock, and we're not
4090 * expecting to use any of the data we'd be reading in. That
4091 * is, when the page lies beyond the EOF, or straddles the EOF
4092 * and the write will cover all of the existing data.
4093 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004094 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004095 i_size = i_size_read(mapping->host);
4096 if (page_start >= i_size ||
4097 (offset == 0 && (pos + len) >= i_size)) {
4098 zero_user_segments(page, 0, offset,
4099 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004100 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004101 /*
4102 * PageChecked means that the parts of the page
4103 * to which we're not writing are considered up
4104 * to date. Once the data is copied to the
4105 * page, it can be set uptodate.
4106 */
4107 SetPageChecked(page);
4108 goto out;
4109 }
4110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111
Sachin Prabhu466bd312013-09-13 14:11:57 +01004112 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004113 /*
4114 * might as well read a page, it is fast enough. If we get
4115 * an error, we don't need to return it. cifs_write_end will
4116 * do a sync write instead since PG_uptodate isn't set.
4117 */
4118 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004119 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01004120 oncethru = 1;
4121 goto start;
Steve French8a236262007-03-06 00:31:00 +00004122 } else {
4123 /* we could try using another file handle if there is one -
4124 but how would we lock it to prevent close of that handle
4125 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04004126 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00004127 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00004128out:
4129 *pagep = page;
4130 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131}
4132
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304133static int cifs_release_page(struct page *page, gfp_t gfp)
4134{
4135 if (PagePrivate(page))
4136 return 0;
4137
4138 return cifs_fscache_release_page(page, gfp);
4139}
4140
Lukas Czernerd47992f2013-05-21 23:17:23 -04004141static void cifs_invalidate_page(struct page *page, unsigned int offset,
4142 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304143{
4144 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
4145
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004146 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304147 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
4148}
4149
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004150static int cifs_launder_page(struct page *page)
4151{
4152 int rc = 0;
4153 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03004154 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004155 struct writeback_control wbc = {
4156 .sync_mode = WB_SYNC_ALL,
4157 .nr_to_write = 0,
4158 .range_start = range_start,
4159 .range_end = range_end,
4160 };
4161
Joe Perchesf96637b2013-05-04 22:12:25 -05004162 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004163
4164 if (clear_page_dirty_for_io(page))
4165 rc = cifs_writepage_locked(page, &wbc);
4166
4167 cifs_fscache_invalidate_page(page, page->mapping->host);
4168 return rc;
4169}
4170
Tejun Heo9b646972010-07-20 22:09:02 +02004171void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04004172{
4173 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
4174 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00004175 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004176 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004177 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004178 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004179 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04004180
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004181 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10004182 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004183
4184 server->ops->downgrade_oplock(server, cinode,
4185 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
4186
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004187 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004188 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05004189 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
4190 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004191 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04004192 }
4193
Jeff Layton3bc303c2009-09-21 06:47:50 -04004194 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004195 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05004196 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00004197 else
Al Viro8737c932009-12-24 06:47:55 -05004198 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004199 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04004200 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04004201 rc = filemap_fdatawait(inode->i_mapping);
4202 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04004203 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004204 }
Joe Perchesf96637b2013-05-04 22:12:25 -05004205 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004206 }
4207
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004208 rc = cifs_push_locks(cfile);
4209 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05004210 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04004211
Jeff Layton3bc303c2009-09-21 06:47:50 -04004212 /*
4213 * releasing stale oplock after recent reconnect of smb session using
4214 * a now incorrect file handle is not a data integrity issue but do
4215 * not bother sending an oplock release if session to server still is
4216 * disconnected since oplock already released by the server
4217 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00004218 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07004219 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
4220 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05004221 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004222 }
Aurelien Aptel8092ecc2019-03-29 10:49:12 +01004223 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00004224 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04004225}
4226
Steve Frenchdca69282013-11-11 16:42:37 -06004227/*
4228 * The presence of cifs_direct_io() in the address space ops vector
4229 * allowes open() O_DIRECT flags which would have failed otherwise.
4230 *
4231 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
4232 * so this method should never be called.
4233 *
4234 * Direct IO is not yet supported in the cached mode.
4235 */
4236static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07004237cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06004238{
4239 /*
4240 * FIXME
4241 * Eventually need to support direct IO for non forcedirectio mounts
4242 */
4243 return -EINVAL;
4244}
4245
4246
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004247const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 .readpage = cifs_readpage,
4249 .readpages = cifs_readpages,
4250 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07004251 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004252 .write_begin = cifs_write_begin,
4253 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304255 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06004256 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304257 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004258 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004260
4261/*
4262 * cifs_readpages requires the server to support a buffer large enough to
4263 * contain the header plus one complete page of data. Otherwise, we need
4264 * to leave cifs_readpages out of the address space operations.
4265 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07004266const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004267 .readpage = cifs_readpage,
4268 .writepage = cifs_writepage,
4269 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04004270 .write_begin = cifs_write_begin,
4271 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004272 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304273 .releasepage = cifs_release_page,
4274 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004275 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004276};