blob: 1c3f262d9c4d4643693b8287e6538381d849cff3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French18a89a12017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
Pavel Shilovsky3a539d92019-09-30 10:06:18 -0700255 if (rc) {
256 server->ops->close(xid, tcon, fid);
257 if (rc == -ESTALE)
258 rc = -EOPENSTALE;
259 }
260
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300261out:
262 kfree(buf);
263 return rc;
264}
265
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400266static bool
267cifs_has_mand_locks(struct cifsInodeInfo *cinode)
268{
269 struct cifs_fid_locks *cur;
270 bool has_locks = false;
271
272 down_read(&cinode->lock_sem);
273 list_for_each_entry(cur, &cinode->llist, llist) {
274 if (!list_empty(&cur->locks)) {
275 has_locks = true;
276 break;
277 }
278 }
279 up_read(&cinode->lock_sem);
280 return has_locks;
281}
282
Dave Wysochanskifa25e112019-10-23 05:02:33 -0400283void
284cifs_down_write(struct rw_semaphore *sem)
285{
286 while (!down_write_trylock(sem))
287 msleep(10);
288}
289
Jeff Layton15ecb432010-10-15 15:34:02 -0400290struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700291cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400292 struct tcon_link *tlink, __u32 oplock)
293{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500294 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000295 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700296 struct cifsInodeInfo *cinode = CIFS_I(inode);
297 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700298 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700299 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400300 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400301
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700302 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
303 if (cfile == NULL)
304 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400305
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
307 if (!fdlocks) {
308 kfree(cfile);
309 return NULL;
310 }
311
312 INIT_LIST_HEAD(&fdlocks->locks);
313 fdlocks->cfile = cfile;
314 cfile->llist = fdlocks;
Dave Wysochanskifa25e112019-10-23 05:02:33 -0400315 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700316 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700317 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700318
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700319 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700320 cfile->pid = current->tgid;
321 cfile->uid = current_fsuid();
322 cfile->dentry = dget(dentry);
323 cfile->f_flags = file->f_flags;
324 cfile->invalidHandle = false;
325 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700326 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700327 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500328 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400329
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100330 cifs_sb_active(inode->i_sb);
331
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400332 /*
333 * If the server returned a read oplock and we have mandatory brlocks,
334 * set oplock level to None.
335 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400336 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500337 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400338 oplock = 0;
339 }
340
Steve French3afca262016-09-22 18:58:16 -0500341 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400342 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700343 oplock = fid->pending_open->oplock;
344 list_del(&fid->pending_open->olist);
345
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400346 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400347 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700348
349 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500350
Jeff Layton15ecb432010-10-15 15:34:02 -0400351 /* if readable file instance put first in list*/
352 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700353 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400354 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700355 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500356 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400357
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400358 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400359 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400360
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700361 file->private_data = cfile;
362 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400363}
364
Jeff Layton764a1b12012-07-25 14:59:54 -0400365struct cifsFileInfo *
366cifsFileInfo_get(struct cifsFileInfo *cifs_file)
367{
Steve French3afca262016-09-22 18:58:16 -0500368 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400369 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500370 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400371 return cifs_file;
372}
373
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100374/**
375 * cifsFileInfo_put - release a reference of file priv data
376 *
377 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000378 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400379void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
380{
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100381 _cifsFileInfo_put(cifs_file, true);
382}
383
384/**
385 * _cifsFileInfo_put - release a reference of file priv data
386 *
387 * This may involve closing the filehandle @cifs_file out on the
388 * server. Must be called without holding tcon->open_file_lock and
389 * cifs_file->file_info_lock.
390 *
391 * If @wait_for_oplock_handler is true and we are releasing the last
392 * reference, wait for any running oplock break handler of the file
393 * and cancel any pending one. If calling this function from the
394 * oplock break handler, you need to pass false.
395 *
396 */
397void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
398{
David Howells2b0143b2015-03-17 22:25:59 +0000399 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000400 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700401 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300402 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100403 struct super_block *sb = inode->i_sb;
404 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000405 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700406 struct cifs_fid fid;
407 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000408 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000409
Steve French3afca262016-09-22 18:58:16 -0500410 spin_lock(&tcon->open_file_lock);
411
412 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400413 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500414 spin_unlock(&cifs_file->file_info_lock);
415 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400417 }
Steve French3afca262016-09-22 18:58:16 -0500418 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700420 if (server->ops->get_lease_key)
421 server->ops->get_lease_key(inode, &fid);
422
423 /* store open in pending opens to make sure we don't miss lease break */
424 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
425
Steve Frenchcdff08e2010-10-21 22:46:14 +0000426 /* remove it from the lists */
427 list_del(&cifs_file->flist);
428 list_del(&cifs_file->tlist);
429
430 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500431 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000432 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700433 /*
434 * In strict cache mode we need invalidate mapping on the last
435 * close because it may cause a error when we open this file
436 * again and get at least level II oplock.
437 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300438 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400439 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300440 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000441 }
Steve French3afca262016-09-22 18:58:16 -0500442
443 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000444
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100445 oplock_break_cancelled = wait_oplock_handler ?
446 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400447
Steve Frenchcdff08e2010-10-21 22:46:14 +0000448 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700449 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400450 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700451
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400452 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700453 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400454 server->ops->close(xid, tcon, &cifs_file->fid);
455 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000456 }
457
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000458 if (oplock_break_cancelled)
459 cifs_done_oplock_break(cifsi);
460
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700461 cifs_del_pending_open(&open);
462
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700463 /*
464 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000465 * is closed anyway.
466 */
Dave Wysochanskifa25e112019-10-23 05:02:33 -0400467 cifs_down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700468 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000469 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400470 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000471 kfree(li);
472 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700473 list_del(&cifs_file->llist->llist);
474 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700475 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000476
477 cifs_put_tlink(cifs_file->tlink);
478 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100479 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000480 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400481}
482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
486 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400487 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400488 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700490 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000491 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400492 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700493 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300495 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700496 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700497 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400499 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
501 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400502 tlink = cifs_sb_tlink(cifs_sb);
503 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400504 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400505 return PTR_ERR(tlink);
506 }
507 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700508 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500510 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530512 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400513 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 }
515
Joe Perchesf96637b2013-05-04 22:12:25 -0500516 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000517 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000518
Namjae Jeon787aded2014-08-22 14:22:51 +0900519 if (file->f_flags & O_DIRECT &&
520 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
521 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
522 file->f_op = &cifs_file_direct_nobrl_ops;
523 else
524 file->f_op = &cifs_file_direct_ops;
525 }
526
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700527 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000528 oplock = REQ_OPLOCK;
529 else
530 oplock = 0;
531
Steve French64cc2c62009-03-04 19:54:08 +0000532 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400533 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
534 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000535 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400536 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000537 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700538 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000539 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500540 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300541 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000542 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
543 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500544 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
545 tcon->ses->serverName,
546 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000547 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000548 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
549 (rc != -EOPNOTSUPP)) /* path not found or net err */
550 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700551 /*
552 * Else fallthrough to retry open the old way on network i/o
553 * or DFS errors.
554 */
Steve French276a74a2009-03-03 18:00:34 +0000555 }
556
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700557 if (server->ops->get_lease_key)
558 server->ops->get_lease_key(inode, &fid);
559
560 cifs_add_pending_open(&fid, tlink, &open);
561
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300562 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700563 if (server->ops->get_lease_key)
564 server->ops->get_lease_key(inode, &fid);
565
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300566 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700567 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700568 if (rc) {
569 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300570 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700571 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300572 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400573
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700574 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
575 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700576 if (server->ops->close)
577 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700578 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 rc = -ENOMEM;
580 goto out;
581 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530583 cifs_fscache_set_inode_cookie(inode, file);
584
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300585 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700586 /*
587 * Time to set mode which we can not set earlier due to
588 * problems creating new read-only files.
589 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300590 struct cifs_unix_set_info_args args = {
591 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800592 .uid = INVALID_UID, /* no change */
593 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300594 .ctime = NO_CHANGE_64,
595 .atime = NO_CHANGE_64,
596 .mtime = NO_CHANGE_64,
597 .device = 0,
598 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700599 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
600 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 }
602
603out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400605 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400606 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return rc;
608}
609
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400610static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
611
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700612/*
613 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400614 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700615 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400616static int
617cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400619 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000620 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400621 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 int rc = 0;
623
Rabin Vincent6ed24ef2017-05-03 17:17:21 +0200624 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400625 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400626 /* can cache locks - no need to relock */
627 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400628 return rc;
629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400631 if (cap_unix(tcon->ses) &&
632 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
633 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
634 rc = cifs_push_posix_locks(cfile);
635 else
636 rc = tcon->ses->server->ops->push_mand_locks(cfile);
637
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400638 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 return rc;
640}
641
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700642static int
643cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644{
645 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400646 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400647 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000649 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700650 struct TCP_Server_Info *server;
651 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000652 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700654 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500656 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400657 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400659 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700660 mutex_lock(&cfile->fh_mutex);
661 if (!cfile->invalidHandle) {
662 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530663 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400664 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530665 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 }
667
David Howells2b0143b2015-03-17 22:25:59 +0000668 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700670 tcon = tlink_tcon(cfile->tlink);
671 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000672
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700673 /*
674 * Can not grab rename sem here because various ops, including those
675 * that already have the rename sem can end up causing writepage to get
676 * called and if the server was down that means we end up here, and we
677 * can never tell if the caller already has the rename_sem.
678 */
679 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000681 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700682 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400683 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000684 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 }
686
Joe Perchesf96637b2013-05-04 22:12:25 -0500687 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
688 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300690 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 oplock = REQ_OPLOCK;
692 else
Steve French4b18f2a2008-04-29 00:06:05 +0000693 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400695 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000696 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400697 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400698 /*
699 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
700 * original open. Must mask them off for a reopen.
701 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700702 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400703 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400704
Jeff Layton2422f672010-06-16 13:40:16 -0400705 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700706 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400707 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000708 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500709 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200710 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000711 goto reopen_success;
712 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700713 /*
714 * fallthrough to retry open the old way on errors, especially
715 * in the reconnect path it is important to retry hard
716 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000717 }
718
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700719 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000720
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500721 if (backup_cred(cifs_sb))
722 create_options |= CREATE_OPEN_BACKUP_INTENT;
723
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700724 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400725 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700726
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400727 oparms.tcon = tcon;
728 oparms.cifs_sb = cifs_sb;
729 oparms.desired_access = desired_access;
730 oparms.create_options = create_options;
731 oparms.disposition = disposition;
732 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400733 oparms.fid = &cfile->fid;
734 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400735
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700736 /*
737 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400738 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700739 * file might have write behind data that needs to be flushed and server
740 * version of file size can be stale. If we knew for sure that inode was
741 * not dirty locally we could do this.
742 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400743 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400744 if (rc == -ENOENT && oparms.reconnect == false) {
745 /* durable handle timeout is expired - open the file again */
746 rc = server->ops->open(xid, &oparms, &oplock, NULL);
747 /* indicate that we need to relock the file */
748 oparms.reconnect = true;
749 }
750
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700752 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500753 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
754 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400755 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 }
Jeff Layton15886172010-10-15 15:33:59 -0400757
758reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700759 cfile->invalidHandle = false;
760 mutex_unlock(&cfile->fh_mutex);
761 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400762
763 if (can_flush) {
764 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400765 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400766
Jeff Layton15886172010-10-15 15:33:59 -0400767 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700768 rc = cifs_get_inode_info_unix(&inode, full_path,
769 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400770 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700771 rc = cifs_get_inode_info(&inode, full_path, NULL,
772 inode->i_sb, xid, NULL);
773 }
774 /*
775 * Else we are writing out data to server already and could deadlock if
776 * we tried to flush data, and since we do not know if we have data that
777 * would invalidate the current end of file on the server we can not go
778 * to the server to get the new inode info.
779 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300780
Pavel Shilovskyde740252016-10-11 15:34:07 -0700781 /*
782 * If the server returned a read oplock and we have mandatory brlocks,
783 * set oplock level to None.
784 */
785 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
786 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
787 oplock = 0;
788 }
789
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400790 server->ops->set_fid(cfile, &cfile->fid, oplock);
791 if (oparms.reconnect)
792 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400793
794reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400796 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 return rc;
798}
799
800int cifs_close(struct inode *inode, struct file *file)
801{
Jeff Layton77970692011-04-05 16:23:47 -0700802 if (file->private_data != NULL) {
803 cifsFileInfo_put(file->private_data);
804 file->private_data = NULL;
805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
Steve Frenchcdff08e2010-10-21 22:46:14 +0000807 /* return code from the ->release op is always ignored */
808 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809}
810
Steve French52ace1e2016-09-22 19:23:56 -0500811void
812cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
813{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700814 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500815 struct list_head *tmp;
816 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700817 struct list_head tmp_list;
818
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800819 if (!tcon->use_persistent || !tcon->need_reopen_files)
820 return;
821
822 tcon->need_reopen_files = false;
823
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700824 cifs_dbg(FYI, "Reopen persistent handles");
825 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500826
827 /* list all files open on tree connection, reopen resilient handles */
828 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700829 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500830 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700831 if (!open_file->invalidHandle)
832 continue;
833 cifsFileInfo_get(open_file);
834 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500835 }
836 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700837
838 list_for_each_safe(tmp, tmp1, &tmp_list) {
839 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800840 if (cifs_reopen_file(open_file, false /* do not flush */))
841 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700842 list_del_init(&open_file->rlist);
843 cifsFileInfo_put(open_file);
844 }
Steve French52ace1e2016-09-22 19:23:56 -0500845}
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847int cifs_closedir(struct inode *inode, struct file *file)
848{
849 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400850 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700851 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700852 struct cifs_tcon *tcon;
853 struct TCP_Server_Info *server;
854 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Joe Perchesf96637b2013-05-04 22:12:25 -0500856 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700858 if (cfile == NULL)
859 return rc;
860
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400861 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700862 tcon = tlink_tcon(cfile->tlink);
863 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Joe Perchesf96637b2013-05-04 22:12:25 -0500865 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500866 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400867 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700868 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500869 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700870 if (server->ops->close_dir)
871 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
872 else
873 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500874 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700875 /* not much we can do if it fails anyway, ignore rc */
876 rc = 0;
877 } else
Steve French3afca262016-09-22 18:58:16 -0500878 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700880 buf = cfile->srch_inf.ntwrk_buf_start;
881 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500882 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700883 cfile->srch_inf.ntwrk_buf_start = NULL;
884 if (cfile->srch_inf.smallBuf)
885 cifs_small_buf_release(buf);
886 else
887 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700889
890 cifs_put_tlink(cfile->tlink);
891 kfree(file->private_data);
892 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400894 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 return rc;
896}
897
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300899cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000900{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400901 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000902 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400903 if (!lock)
904 return lock;
905 lock->offset = offset;
906 lock->length = length;
907 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400908 lock->pid = current->tgid;
909 INIT_LIST_HEAD(&lock->blist);
910 init_waitqueue_head(&lock->block_q);
911 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400912}
913
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700914void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400915cifs_del_lock_waiters(struct cifsLockInfo *lock)
916{
917 struct cifsLockInfo *li, *tmp;
918 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
919 list_del_init(&li->blist);
920 wake_up(&li->block_q);
921 }
922}
923
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400924#define CIFS_LOCK_OP 0
925#define CIFS_READ_OP 1
926#define CIFS_WRITE_OP 2
927
928/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400929static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700930cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
931 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400932 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400933{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300934 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700935 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300936 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400937
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700938 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400939 if (offset + length <= li->offset ||
940 offset >= li->offset + li->length)
941 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400942 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
943 server->ops->compare_fids(cfile, cur_cfile)) {
944 /* shared lock prevents write op through the same fid */
945 if (!(li->type & server->vals->shared_lock_type) ||
946 rw_check != CIFS_WRITE_OP)
947 continue;
948 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700949 if ((type & server->vals->shared_lock_type) &&
950 ((server->ops->compare_fids(cfile, cur_cfile) &&
951 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400952 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700953 if (conf_lock)
954 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700955 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400956 }
957 return false;
958}
959
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700960bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300961cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700962 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400963 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400964{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300965 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700966 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000967 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300968
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700969 list_for_each_entry(cur, &cinode->llist, llist) {
970 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700971 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300972 if (rc)
973 break;
974 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300975
976 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400977}
978
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300979/*
980 * Check if there is another lock that prevents us to set the lock (mandatory
981 * style). If such a lock exists, update the flock structure with its
982 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
983 * or leave it the same if we can't. Returns 0 if we don't need to request to
984 * the server or 1 otherwise.
985 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400986static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300987cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
988 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400989{
990 int rc = 0;
991 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000992 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300993 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400994 bool exist;
995
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700996 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400997
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300998 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400999 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001000 if (exist) {
1001 flock->fl_start = conf_lock->offset;
1002 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1003 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001004 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001005 flock->fl_type = F_RDLCK;
1006 else
1007 flock->fl_type = F_WRLCK;
1008 } else if (!cinode->can_cache_brlcks)
1009 rc = 1;
1010 else
1011 flock->fl_type = F_UNLCK;
1012
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001013 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001014 return rc;
1015}
1016
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001017static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001018cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001019{
David Howells2b0143b2015-03-17 22:25:59 +00001020 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001021 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001022 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001023 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001024}
1025
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001026/*
1027 * Set the byte-range lock (mandatory style). Returns:
1028 * 1) 0, if we set the lock and don't need to request to the server;
1029 * 2) 1, if no locks prevent us but we need to request to the server;
1030 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1031 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001032static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001033cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001034 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001036 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001037 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001038 bool exist;
1039 int rc = 0;
1040
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041try_again:
1042 exist = false;
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001043 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001044
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001045 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001046 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001047 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001048 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001049 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001050 return rc;
1051 }
1052
1053 if (!exist)
1054 rc = 1;
1055 else if (!wait)
1056 rc = -EACCES;
1057 else {
1058 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001059 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001060 rc = wait_event_interruptible(lock->block_q,
1061 (lock->blist.prev == &lock->blist) &&
1062 (lock->blist.next == &lock->blist));
1063 if (!rc)
1064 goto try_again;
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001065 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001066 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001067 }
1068
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001069 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001070 return rc;
1071}
1072
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001073/*
1074 * Check if there is another lock that prevents us to set the lock (posix
1075 * style). If such a lock exists, update the flock structure with its
1076 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1077 * or leave it the same if we can't. Returns 0 if we don't need to request to
1078 * the server or 1 otherwise.
1079 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001080static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001081cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1082{
1083 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001084 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001085 unsigned char saved_type = flock->fl_type;
1086
Pavel Shilovsky50792762011-10-29 17:17:57 +04001087 if ((flock->fl_flags & FL_POSIX) == 0)
1088 return 1;
1089
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001090 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001091 posix_test_lock(file, flock);
1092
1093 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1094 flock->fl_type = saved_type;
1095 rc = 1;
1096 }
1097
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001098 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 return rc;
1100}
1101
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001102/*
1103 * Set the byte-range lock (posix style). Returns:
1104 * 1) 0, if we set the lock and don't need to request to the server;
1105 * 2) 1, if we need to request to the server;
1106 * 3) <0, if the error occurs while setting the lock.
1107 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108static int
1109cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1110{
Al Viro496ad9a2013-01-23 17:07:38 -05001111 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001112 int rc = 1;
1113
1114 if ((flock->fl_flags & FL_POSIX) == 0)
1115 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001116
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001117try_again:
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001118 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001119 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001120 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001121 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001122 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001123
1124 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001125 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001126 if (rc == FILE_LOCK_DEFERRED) {
1127 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1128 if (!rc)
1129 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001130 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001131 }
Steve French9ebb3892012-04-01 13:52:54 -05001132 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001133}
1134
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001135int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001136cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001137{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001138 unsigned int xid;
1139 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001140 struct cifsLockInfo *li, *tmp;
1141 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001142 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001143 LOCKING_ANDX_RANGE *buf, *cur;
1144 int types[] = {LOCKING_ANDX_LARGE_FILES,
1145 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1146 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001147
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001148 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001149 tcon = tlink_tcon(cfile->tlink);
1150
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001151 /*
1152 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001153 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001154 */
1155 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001156 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001157 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001158 return -EINVAL;
1159 }
1160
Ross Lagerwall04d76802019-01-08 18:30:56 +00001161 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1162 PAGE_SIZE);
1163 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1164 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001165 max_num = (max_buf - sizeof(struct smb_hdr)) /
1166 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001167 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001168 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001169 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001170 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001171 }
1172
1173 for (i = 0; i < 2; i++) {
1174 cur = buf;
1175 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001176 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001177 if (li->type != types[i])
1178 continue;
1179 cur->Pid = cpu_to_le16(li->pid);
1180 cur->LengthLow = cpu_to_le32((u32)li->length);
1181 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1182 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1183 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1184 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001185 stored_rc = cifs_lockv(xid, tcon,
1186 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001187 (__u8)li->type, 0, num,
1188 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001189 if (stored_rc)
1190 rc = stored_rc;
1191 cur = buf;
1192 num = 0;
1193 } else
1194 cur++;
1195 }
1196
1197 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001198 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001199 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001200 if (stored_rc)
1201 rc = stored_rc;
1202 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001203 }
1204
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001205 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001206 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001207 return rc;
1208}
1209
Jeff Layton3d224622016-05-24 06:27:44 -04001210static __u32
1211hash_lockowner(fl_owner_t owner)
1212{
1213 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1214}
1215
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001216struct lock_to_push {
1217 struct list_head llist;
1218 __u64 offset;
1219 __u64 length;
1220 __u32 pid;
1221 __u16 netfid;
1222 __u8 type;
1223};
1224
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001225static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001226cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001227{
David Howells2b0143b2015-03-17 22:25:59 +00001228 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001229 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001230 struct file_lock *flock;
1231 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001232 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001233 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001234 struct list_head locks_to_send, *el;
1235 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001236 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001237
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001238 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001239
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001240 if (!flctx)
1241 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001242
Jeff Laytone084c1b2015-02-16 14:32:03 -05001243 spin_lock(&flctx->flc_lock);
1244 list_for_each(el, &flctx->flc_posix) {
1245 count++;
1246 }
1247 spin_unlock(&flctx->flc_lock);
1248
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001249 INIT_LIST_HEAD(&locks_to_send);
1250
1251 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001252 * Allocating count locks is enough because no FL_POSIX locks can be
1253 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001254 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001255 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001256 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001257 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1258 if (!lck) {
1259 rc = -ENOMEM;
1260 goto err_out;
1261 }
1262 list_add_tail(&lck->llist, &locks_to_send);
1263 }
1264
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001265 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001266 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001267 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001268 if (el == &locks_to_send) {
1269 /*
1270 * The list ended. We don't have enough allocated
1271 * structures - something is really wrong.
1272 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001273 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001274 break;
1275 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001276 length = 1 + flock->fl_end - flock->fl_start;
1277 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1278 type = CIFS_RDLCK;
1279 else
1280 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001281 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001282 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001283 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001284 lck->length = length;
1285 lck->type = type;
1286 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001287 }
Jeff Layton6109c852015-01-16 15:05:57 -05001288 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001289
1290 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001291 int stored_rc;
1292
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001293 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001294 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001295 lck->type, 0);
1296 if (stored_rc)
1297 rc = stored_rc;
1298 list_del(&lck->llist);
1299 kfree(lck);
1300 }
1301
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001302out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001303 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001304 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001305err_out:
1306 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1307 list_del(&lck->llist);
1308 kfree(lck);
1309 }
1310 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001311}
1312
1313static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001314cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001315{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001316 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001317 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001318 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001319 int rc = 0;
1320
1321 /* we are going to update can_cache_brlcks here - need a write access */
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001322 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001323 if (!cinode->can_cache_brlcks) {
1324 up_write(&cinode->lock_sem);
1325 return rc;
1326 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001327
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001328 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001329 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1330 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001331 rc = cifs_push_posix_locks(cfile);
1332 else
1333 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001334
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001335 cinode->can_cache_brlcks = false;
1336 up_write(&cinode->lock_sem);
1337 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001338}
1339
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001340static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001341cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001342 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001344 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001345 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001346 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001347 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001349 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001350 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001352 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001353 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001354 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001355 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001356 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001357 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1358 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001359 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001361 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001363 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001364 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001365 *lock = 1;
1366 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001367 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001368 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001369 *unlock = 1;
1370 /* Check if unlock includes more than one lock range */
1371 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001372 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001373 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001374 *lock = 1;
1375 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001376 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001377 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001378 *lock = 1;
1379 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001380 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001381 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001382 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001384 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001385}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001388cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001389 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001390{
1391 int rc = 0;
1392 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001393 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1394 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001395 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001396 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001398 if (posix_lck) {
1399 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001400
1401 rc = cifs_posix_lock_test(file, flock);
1402 if (!rc)
1403 return rc;
1404
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001405 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001406 posix_lock_type = CIFS_RDLCK;
1407 else
1408 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001409 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1410 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001411 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001412 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 return rc;
1414 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001415
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001416 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001417 if (!rc)
1418 return rc;
1419
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001420 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001421 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1422 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001423 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001424 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1425 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001426 flock->fl_type = F_UNLCK;
1427 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001428 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1429 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001430 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001431 }
1432
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001433 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001434 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001435 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001436 }
1437
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001438 type &= ~server->vals->exclusive_lock_type;
1439
1440 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1441 type | server->vals->shared_lock_type,
1442 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001443 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001444 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1445 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001446 flock->fl_type = F_RDLCK;
1447 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001448 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1449 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001450 } else
1451 flock->fl_type = F_WRLCK;
1452
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001453 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001454}
1455
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001456void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001457cifs_move_llist(struct list_head *source, struct list_head *dest)
1458{
1459 struct list_head *li, *tmp;
1460 list_for_each_safe(li, tmp, source)
1461 list_move(li, dest);
1462}
1463
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001464void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001465cifs_free_llist(struct list_head *llist)
1466{
1467 struct cifsLockInfo *li, *tmp;
1468 list_for_each_entry_safe(li, tmp, llist, llist) {
1469 cifs_del_lock_waiters(li);
1470 list_del(&li->llist);
1471 kfree(li);
1472 }
1473}
1474
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001475int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001476cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1477 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001478{
1479 int rc = 0, stored_rc;
1480 int types[] = {LOCKING_ANDX_LARGE_FILES,
1481 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1482 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001483 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001484 LOCKING_ANDX_RANGE *buf, *cur;
1485 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001486 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001487 struct cifsLockInfo *li, *tmp;
1488 __u64 length = 1 + flock->fl_end - flock->fl_start;
1489 struct list_head tmp_llist;
1490
1491 INIT_LIST_HEAD(&tmp_llist);
1492
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001493 /*
1494 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001495 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001496 */
1497 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001498 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001499 return -EINVAL;
1500
Ross Lagerwall04d76802019-01-08 18:30:56 +00001501 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1502 PAGE_SIZE);
1503 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1504 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001505 max_num = (max_buf - sizeof(struct smb_hdr)) /
1506 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001507 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001508 if (!buf)
1509 return -ENOMEM;
1510
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001511 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001512 for (i = 0; i < 2; i++) {
1513 cur = buf;
1514 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001515 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001516 if (flock->fl_start > li->offset ||
1517 (flock->fl_start + length) <
1518 (li->offset + li->length))
1519 continue;
1520 if (current->tgid != li->pid)
1521 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001522 if (types[i] != li->type)
1523 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001524 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001525 /*
1526 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001527 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001528 */
1529 list_del(&li->llist);
1530 cifs_del_lock_waiters(li);
1531 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001532 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001533 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001534 cur->Pid = cpu_to_le16(li->pid);
1535 cur->LengthLow = cpu_to_le32((u32)li->length);
1536 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1537 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1538 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1539 /*
1540 * We need to save a lock here to let us add it again to
1541 * the file's list if the unlock range request fails on
1542 * the server.
1543 */
1544 list_move(&li->llist, &tmp_llist);
1545 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001546 stored_rc = cifs_lockv(xid, tcon,
1547 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001548 li->type, num, 0, buf);
1549 if (stored_rc) {
1550 /*
1551 * We failed on the unlock range
1552 * request - add all locks from the tmp
1553 * list to the head of the file's list.
1554 */
1555 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001556 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001557 rc = stored_rc;
1558 } else
1559 /*
1560 * The unlock range request succeed -
1561 * free the tmp list.
1562 */
1563 cifs_free_llist(&tmp_llist);
1564 cur = buf;
1565 num = 0;
1566 } else
1567 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001568 }
1569 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001570 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001571 types[i], num, 0, buf);
1572 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001573 cifs_move_llist(&tmp_llist,
1574 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001575 rc = stored_rc;
1576 } else
1577 cifs_free_llist(&tmp_llist);
1578 }
1579 }
1580
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001581 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001582 kfree(buf);
1583 return rc;
1584}
1585
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001586static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001587cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001588 bool wait_flag, bool posix_lck, int lock, int unlock,
1589 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001590{
1591 int rc = 0;
1592 __u64 length = 1 + flock->fl_end - flock->fl_start;
1593 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1594 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001595 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001596 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001597
1598 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001599 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001600
1601 rc = cifs_posix_lock_set(file, flock);
1602 if (!rc || rc < 0)
1603 return rc;
1604
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001605 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001606 posix_lock_type = CIFS_RDLCK;
1607 else
1608 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001609
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001610 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001611 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001612
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001613 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001614 hash_lockowner(flock->fl_owner),
1615 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001616 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001617 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001618 }
1619
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001620 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001621 struct cifsLockInfo *lock;
1622
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001623 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001624 if (!lock)
1625 return -ENOMEM;
1626
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001627 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001628 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001629 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001630 return rc;
1631 }
1632 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001633 goto out;
1634
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001635 /*
1636 * Windows 7 server can delay breaking lease from read to None
1637 * if we set a byte-range lock on a file - break it explicitly
1638 * before sending the lock to the server to be sure the next
1639 * read won't conflict with non-overlapted locks due to
1640 * pagereading.
1641 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001642 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1643 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001644 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001645 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1646 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001647 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001648 }
1649
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001650 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1651 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001652 if (rc) {
1653 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001654 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001655 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001656
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001657 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001658 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001659 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001660
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001661out:
Aurelien Aptel56300d32019-03-14 18:44:16 +01001662 if (flock->fl_flags & FL_POSIX) {
1663 /*
1664 * If this is a request to remove all locks because we
1665 * are closing the file, it doesn't matter if the
1666 * unlocking failed as both cifs.ko and the SMB server
1667 * remove the lock on file close
1668 */
1669 if (rc) {
1670 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1671 if (!(flock->fl_flags & FL_CLOSE))
1672 return rc;
1673 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001674 rc = locks_lock_file_wait(file, flock);
Aurelien Aptel56300d32019-03-14 18:44:16 +01001675 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001676 return rc;
1677}
1678
1679int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1680{
1681 int rc, xid;
1682 int lock = 0, unlock = 0;
1683 bool wait_flag = false;
1684 bool posix_lck = false;
1685 struct cifs_sb_info *cifs_sb;
1686 struct cifs_tcon *tcon;
1687 struct cifsInodeInfo *cinode;
1688 struct cifsFileInfo *cfile;
1689 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001690 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001691
1692 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001693 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001694
Joe Perchesf96637b2013-05-04 22:12:25 -05001695 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1696 cmd, flock->fl_flags, flock->fl_type,
1697 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001698
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001699 cfile = (struct cifsFileInfo *)file->private_data;
1700 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001701
1702 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1703 tcon->ses->server);
1704
Al Viro7119e222014-10-22 00:25:12 -04001705 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001706 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001707 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001708
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001709 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001710 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1711 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1712 posix_lck = true;
1713 /*
1714 * BB add code here to normalize offset and length to account for
1715 * negative length which we can not accept over the wire.
1716 */
1717 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001718 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001719 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001720 return rc;
1721 }
1722
1723 if (!lock && !unlock) {
1724 /*
1725 * if no lock or unlock then nothing to do since we do not
1726 * know what it is
1727 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001728 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001729 return -EOPNOTSUPP;
1730 }
1731
1732 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1733 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001734 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 return rc;
1736}
1737
Jeff Layton597b0272012-03-23 14:40:56 -04001738/*
1739 * update the file size (if needed) after a write. Should be called with
1740 * the inode->i_lock held
1741 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001742void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001743cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1744 unsigned int bytes_written)
1745{
1746 loff_t end_of_write = offset + bytes_written;
1747
1748 if (end_of_write > cifsi->server_eof)
1749 cifsi->server_eof = end_of_write;
1750}
1751
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001752static ssize_t
1753cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1754 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755{
1756 int rc = 0;
1757 unsigned int bytes_written = 0;
1758 unsigned int total_written;
1759 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001760 struct cifs_tcon *tcon;
1761 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001762 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001763 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001764 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001765 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Jeff Layton7da4b492010-10-15 15:34:00 -04001767 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Al Viro35c265e2014-08-19 20:25:34 -04001769 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1770 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001772 tcon = tlink_tcon(open_file->tlink);
1773 server = tcon->ses->server;
1774
1775 if (!server->ops->sync_write)
1776 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001777
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001778 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 for (total_written = 0; write_size > total_written;
1781 total_written += bytes_written) {
1782 rc = -EAGAIN;
1783 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001784 struct kvec iov[2];
1785 unsigned int len;
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 /* we could deadlock if we called
1789 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001790 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001792 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 if (rc != 0)
1794 break;
1795 }
Steve French3e844692005-10-03 13:37:24 -07001796
David Howells2b0143b2015-03-17 22:25:59 +00001797 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001798 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001799 /* iov[0] is reserved for smb header */
1800 iov[1].iov_base = (char *)write_data + total_written;
1801 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001802 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001803 io_parms.tcon = tcon;
1804 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001805 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001806 rc = server->ops->sync_write(xid, &open_file->fid,
1807 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 }
1809 if (rc || (bytes_written == 0)) {
1810 if (total_written)
1811 break;
1812 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001813 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 return rc;
1815 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001816 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001817 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001818 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001819 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001820 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001821 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 }
1823
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001824 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825
Jeff Layton7da4b492010-10-15 15:34:00 -04001826 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001827 spin_lock(&d_inode(dentry)->i_lock);
1828 if (*offset > d_inode(dentry)->i_size)
1829 i_size_write(d_inode(dentry), *offset);
1830 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 }
David Howells2b0143b2015-03-17 22:25:59 +00001832 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001833 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 return total_written;
1835}
1836
Jeff Layton6508d902010-09-29 19:51:11 -04001837struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1838 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001839{
1840 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001841 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001842 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001843
1844 /* only filter by fsuid on multiuser mounts */
1845 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1846 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001847
Steve French3afca262016-09-22 18:58:16 -05001848 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001849 /* we could simply get the first_list_entry since write-only entries
1850 are always at the end of the list but since the first entry might
1851 have a close pending, we go through the whole list */
1852 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001853 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001854 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001855 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001856 if (!open_file->invalidHandle) {
1857 /* found a good file */
1858 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001859 cifsFileInfo_get(open_file);
1860 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001861 return open_file;
1862 } /* else might as well continue, and look for
1863 another, or simply have the caller reopen it
1864 again rather than trying to fix this handle */
1865 } else /* write only file */
1866 break; /* write only files are last so must be done */
1867 }
Steve French3afca262016-09-22 18:58:16 -05001868 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001869 return NULL;
1870}
Steve French630f3f0c2007-10-25 21:17:17 +00001871
Jeff Layton6508d902010-09-29 19:51:11 -04001872struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1873 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001874{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001875 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001876 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001877 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001878 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001879 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001880 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001881
Steve French60808232006-04-22 15:53:05 +00001882 /* Having a null inode here (because mapping->host was set to zero by
1883 the VFS or MM) should not happen but we had reports of on oops (due to
1884 it being zero) during stress testcases so we need to check for it */
1885
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001886 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001887 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001888 dump_stack();
1889 return NULL;
1890 }
1891
Jeff Laytond3892292010-11-02 16:22:50 -04001892 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001893 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001894
Jeff Layton6508d902010-09-29 19:51:11 -04001895 /* only filter by fsuid on multiuser mounts */
1896 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1897 fsuid_only = false;
1898
Steve French3afca262016-09-22 18:58:16 -05001899 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001900refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001901 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001902 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001903 return NULL;
1904 }
Steve French6148a742005-10-05 12:23:19 -07001905 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001906 if (!any_available && open_file->pid != current->tgid)
1907 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001908 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001909 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001910 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001911 if (!open_file->invalidHandle) {
1912 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001913 cifsFileInfo_get(open_file);
1914 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001915 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001916 } else {
1917 if (!inv_file)
1918 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001919 }
Steve French6148a742005-10-05 12:23:19 -07001920 }
1921 }
Jeff Layton2846d382008-09-22 21:33:33 -04001922 /* couldn't find useable FH with same pid, try any available */
1923 if (!any_available) {
1924 any_available = true;
1925 goto refind_writable;
1926 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001927
1928 if (inv_file) {
1929 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001930 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001931 }
1932
Steve French3afca262016-09-22 18:58:16 -05001933 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001934
1935 if (inv_file) {
1936 rc = cifs_reopen_file(inv_file, false);
1937 if (!rc)
1938 return inv_file;
1939 else {
Steve French3afca262016-09-22 18:58:16 -05001940 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001941 list_move_tail(&inv_file->flist,
1942 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001943 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001944 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001945 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001946 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001947 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001948 goto refind_writable;
1949 }
1950 }
1951
Steve French6148a742005-10-05 12:23:19 -07001952 return NULL;
1953}
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1956{
1957 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001958 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 char *write_data;
1960 int rc = -EFAULT;
1961 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001963 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
1965 if (!mapping || !mapping->host)
1966 return -EFAULT;
1967
1968 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
1970 offset += (loff_t)from;
1971 write_data = kmap(page);
1972 write_data += from;
1973
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001974 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 kunmap(page);
1976 return -EIO;
1977 }
1978
1979 /* racing with truncate? */
1980 if (offset > mapping->host->i_size) {
1981 kunmap(page);
1982 return 0; /* don't care */
1983 }
1984
1985 /* check to make sure that we are not extending the file */
1986 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001987 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Jeff Layton6508d902010-09-29 19:51:11 -04001989 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001990 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001991 bytes_written = cifs_write(open_file, open_file->pid,
1992 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001993 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001995 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001996 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001997 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001998 else if (bytes_written < 0)
1999 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07002000 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05002001 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 rc = -EIO;
2003 }
2004
2005 kunmap(page);
2006 return rc;
2007}
2008
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002009static struct cifs_writedata *
2010wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2011 pgoff_t end, pgoff_t *index,
2012 unsigned int *found_pages)
2013{
2014 unsigned int nr_pages;
2015 struct page **pages;
2016 struct cifs_writedata *wdata;
2017
2018 wdata = cifs_writedata_alloc((unsigned int)tofind,
2019 cifs_writev_complete);
2020 if (!wdata)
2021 return NULL;
2022
2023 /*
2024 * find_get_pages_tag seems to return a max of 256 on each
2025 * iteration, so we must call it several times in order to
2026 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03002027 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002028 */
2029 *found_pages = 0;
2030 pages = wdata->pages;
2031 do {
2032 nr_pages = find_get_pages_tag(mapping, index,
2033 PAGECACHE_TAG_DIRTY, tofind,
2034 pages);
2035 *found_pages += nr_pages;
2036 tofind -= nr_pages;
2037 pages += nr_pages;
2038 } while (nr_pages && tofind && *index <= end);
2039
2040 return wdata;
2041}
2042
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002043static unsigned int
2044wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2045 struct address_space *mapping,
2046 struct writeback_control *wbc,
2047 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2048{
2049 unsigned int nr_pages = 0, i;
2050 struct page *page;
2051
2052 for (i = 0; i < found_pages; i++) {
2053 page = wdata->pages[i];
2054 /*
2055 * At this point we hold neither mapping->tree_lock nor
2056 * lock on the page itself: the page may be truncated or
2057 * invalidated (changing page->mapping to NULL), or even
2058 * swizzled back from swapper_space to tmpfs file
2059 * mapping
2060 */
2061
2062 if (nr_pages == 0)
2063 lock_page(page);
2064 else if (!trylock_page(page))
2065 break;
2066
2067 if (unlikely(page->mapping != mapping)) {
2068 unlock_page(page);
2069 break;
2070 }
2071
2072 if (!wbc->range_cyclic && page->index > end) {
2073 *done = true;
2074 unlock_page(page);
2075 break;
2076 }
2077
2078 if (*next && (page->index != *next)) {
2079 /* Not next consecutive page */
2080 unlock_page(page);
2081 break;
2082 }
2083
2084 if (wbc->sync_mode != WB_SYNC_NONE)
2085 wait_on_page_writeback(page);
2086
2087 if (PageWriteback(page) ||
2088 !clear_page_dirty_for_io(page)) {
2089 unlock_page(page);
2090 break;
2091 }
2092
2093 /*
2094 * This actually clears the dirty bit in the radix tree.
2095 * See cifs_writepage() for more commentary.
2096 */
2097 set_page_writeback(page);
2098 if (page_offset(page) >= i_size_read(mapping->host)) {
2099 *done = true;
2100 unlock_page(page);
2101 end_page_writeback(page);
2102 break;
2103 }
2104
2105 wdata->pages[i] = page;
2106 *next = page->index + 1;
2107 ++nr_pages;
2108 }
2109
2110 /* reset index to refind any pages skipped */
2111 if (nr_pages == 0)
2112 *index = wdata->pages[0]->index + 1;
2113
2114 /* put any pages we aren't going to use */
2115 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002116 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002117 wdata->pages[i] = NULL;
2118 }
2119
2120 return nr_pages;
2121}
2122
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002123static int
2124wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2125 struct address_space *mapping, struct writeback_control *wbc)
2126{
2127 int rc = 0;
2128 struct TCP_Server_Info *server;
2129 unsigned int i;
2130
2131 wdata->sync_mode = wbc->sync_mode;
2132 wdata->nr_pages = nr_pages;
2133 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002134 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002135 wdata->tailsz = min(i_size_read(mapping->host) -
2136 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002137 (loff_t)PAGE_SIZE);
2138 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002139
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002140 if (wdata->cfile != NULL)
2141 cifsFileInfo_put(wdata->cfile);
2142 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2143 if (!wdata->cfile) {
2144 cifs_dbg(VFS, "No writable handles for inode\n");
2145 rc = -EBADF;
2146 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002147 wdata->pid = wdata->cfile->pid;
2148 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2149 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002150 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002151
2152 for (i = 0; i < nr_pages; ++i)
2153 unlock_page(wdata->pages[i]);
2154
2155 return rc;
2156}
2157
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002159 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002161 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002162 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002163 bool done = false, scanned = false, range_whole = false;
2164 pgoff_t end, index;
2165 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002166 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002167
Steve French37c0eb42005-10-05 14:50:29 -07002168 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002169 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002170 * one page at a time via cifs_writepage
2171 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002172 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002173 return generic_writepages(mapping, wbc);
2174
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002175 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002176 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002177 end = -1;
2178 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002179 index = wbc->range_start >> PAGE_SHIFT;
2180 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002181 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002182 range_whole = true;
2183 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002184 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002185 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002186retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002187 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002188 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002189 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002190
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002191 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2192 &wsize, &credits);
2193 if (rc)
2194 break;
Steve French37c0eb42005-10-05 14:50:29 -07002195
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002196 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002197
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002198 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2199 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002200 if (!wdata) {
2201 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002202 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002203 break;
2204 }
2205
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002206 if (found_pages == 0) {
2207 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002208 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002209 break;
2210 }
2211
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002212 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2213 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002214
2215 /* nothing to write? */
2216 if (nr_pages == 0) {
2217 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002218 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002219 continue;
2220 }
2221
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002222 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002223
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002224 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002225
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002226 /* send failure -- clean up the mess */
2227 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002228 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002229 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002230 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002231 redirty_page_for_writepage(wbc,
2232 wdata->pages[i]);
2233 else
2234 SetPageError(wdata->pages[i]);
2235 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002236 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002237 }
Jeff Layton941b8532011-01-11 07:24:01 -05002238 if (rc != -EAGAIN)
2239 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002240 }
2241 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002242
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002243 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2244 index = saved_index;
2245 continue;
2246 }
2247
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002248 wbc->nr_to_write -= nr_pages;
2249 if (wbc->nr_to_write <= 0)
2250 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002251
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002252 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002253 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002254
Steve French37c0eb42005-10-05 14:50:29 -07002255 if (!scanned && !done) {
2256 /*
2257 * We hit the last page and there is more work to be done: wrap
2258 * back to the start of the file
2259 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002260 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002261 index = 0;
2262 goto retry;
2263 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002264
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002265 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002266 mapping->writeback_index = index;
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 return rc;
2269}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002271static int
2272cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002274 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002275 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002277 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002279 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002280 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002281 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002282
2283 /*
2284 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2285 *
2286 * A writepage() implementation always needs to do either this,
2287 * or re-dirty the page with "redirty_page_for_writepage()" in
2288 * the case of a failure.
2289 *
2290 * Just unlocking the page will cause the radix tree tag-bits
2291 * to fail to update with the state of the page correctly.
2292 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002293 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002294retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002295 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002296 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2297 goto retry_write;
2298 else if (rc == -EAGAIN)
2299 redirty_page_for_writepage(wbc, page);
2300 else if (rc != 0)
2301 SetPageError(page);
2302 else
2303 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002304 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002305 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002306 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 return rc;
2308}
2309
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002310static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2311{
2312 int rc = cifs_writepage_locked(page, wbc);
2313 unlock_page(page);
2314 return rc;
2315}
2316
Nick Piggind9414772008-09-24 11:32:59 -04002317static int cifs_write_end(struct file *file, struct address_space *mapping,
2318 loff_t pos, unsigned len, unsigned copied,
2319 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320{
Nick Piggind9414772008-09-24 11:32:59 -04002321 int rc;
2322 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002323 struct cifsFileInfo *cfile = file->private_data;
2324 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2325 __u32 pid;
2326
2327 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2328 pid = cfile->pid;
2329 else
2330 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331
Joe Perchesf96637b2013-05-04 22:12:25 -05002332 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002333 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002334
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002335 if (PageChecked(page)) {
2336 if (copied == len)
2337 SetPageUptodate(page);
2338 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002339 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002340 SetPageUptodate(page);
2341
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002343 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002344 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002345 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002346
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002347 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 /* this is probably better than directly calling
2349 partialpage_write since in this function the file handle is
2350 known which we might as well leverage */
2351 /* BB check if anything else missing out of ppw
2352 such as updating last write time */
2353 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002354 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002355 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002357
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002358 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002359 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002360 rc = copied;
2361 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002362 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 }
2364
Nick Piggind9414772008-09-24 11:32:59 -04002365 if (rc > 0) {
2366 spin_lock(&inode->i_lock);
2367 if (pos > inode->i_size)
2368 i_size_write(inode, pos);
2369 spin_unlock(&inode->i_lock);
2370 }
2371
2372 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002373 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 return rc;
2376}
2377
Josef Bacik02c24a82011-07-16 20:44:56 -04002378int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2379 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002381 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002383 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002384 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002385 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002386 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002387 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
Josef Bacik02c24a82011-07-16 20:44:56 -04002389 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2390 if (rc)
2391 return rc;
Al Viro59551022016-01-22 15:40:57 -05002392 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002393
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002394 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
Al Viro35c265e2014-08-19 20:25:34 -04002396 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2397 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002398
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002399 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002400 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002401 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002402 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002403 rc = 0; /* don't care about it in fsync */
2404 }
2405 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002406
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002407 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002408 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2409 server = tcon->ses->server;
2410 if (server->ops->flush)
2411 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2412 else
2413 rc = -ENOSYS;
2414 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002415
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002416 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002417 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002418 return rc;
2419}
2420
Josef Bacik02c24a82011-07-16 20:44:56 -04002421int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002422{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002423 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002424 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002425 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002426 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002427 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002428 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002429 struct inode *inode = file->f_mapping->host;
2430
2431 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2432 if (rc)
2433 return rc;
Al Viro59551022016-01-22 15:40:57 -05002434 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002435
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002436 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002437
Al Viro35c265e2014-08-19 20:25:34 -04002438 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2439 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002440
2441 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002442 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2443 server = tcon->ses->server;
2444 if (server->ops->flush)
2445 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2446 else
2447 rc = -ENOSYS;
2448 }
Steve Frenchb298f222009-02-21 21:17:43 +00002449
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002450 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002451 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 return rc;
2453}
2454
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455/*
2456 * As file closes, flush all cached write data for this inode checking
2457 * for write behind errors.
2458 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002459int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460{
Al Viro496ad9a2013-01-23 17:07:38 -05002461 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 int rc = 0;
2463
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002464 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002465 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002466
Joe Perchesf96637b2013-05-04 22:12:25 -05002467 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
2469 return rc;
2470}
2471
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002472static int
2473cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2474{
2475 int rc = 0;
2476 unsigned long i;
2477
2478 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002479 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002480 if (!pages[i]) {
2481 /*
2482 * save number of pages we have already allocated and
2483 * return with ENOMEM error
2484 */
2485 num_pages = i;
2486 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002487 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002488 }
2489 }
2490
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002491 if (rc) {
2492 for (i = 0; i < num_pages; i++)
2493 put_page(pages[i]);
2494 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002495 return rc;
2496}
2497
2498static inline
2499size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2500{
2501 size_t num_pages;
2502 size_t clen;
2503
2504 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002505 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002506
2507 if (cur_len)
2508 *cur_len = clen;
2509
2510 return num_pages;
2511}
2512
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002513static void
Steve French4a5c80d2014-02-07 20:45:12 -06002514cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002515{
2516 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002517 struct cifs_writedata *wdata = container_of(refcount,
2518 struct cifs_writedata, refcount);
2519
2520 for (i = 0; i < wdata->nr_pages; i++)
2521 put_page(wdata->pages[i]);
2522 cifs_writedata_release(refcount);
2523}
2524
2525static void
2526cifs_uncached_writev_complete(struct work_struct *work)
2527{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002528 struct cifs_writedata *wdata = container_of(work,
2529 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002530 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002531 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2532
2533 spin_lock(&inode->i_lock);
2534 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2535 if (cifsi->server_eof > inode->i_size)
2536 i_size_write(inode, cifsi->server_eof);
2537 spin_unlock(&inode->i_lock);
2538
2539 complete(&wdata->done);
2540
Steve French4a5c80d2014-02-07 20:45:12 -06002541 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002542}
2543
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002544static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002545wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2546 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002547{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002548 size_t save_len, copied, bytes, cur_len = *len;
2549 unsigned long i, nr_pages = *num_pages;
2550
2551 save_len = cur_len;
2552 for (i = 0; i < nr_pages; i++) {
2553 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2554 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2555 cur_len -= copied;
2556 /*
2557 * If we didn't copy as much as we expected, then that
2558 * may mean we trod into an unmapped area. Stop copying
2559 * at that point. On the next pass through the big
2560 * loop, we'll likely end up getting a zero-length
2561 * write and bailing out of it.
2562 */
2563 if (copied < bytes)
2564 break;
2565 }
2566 cur_len = save_len - cur_len;
2567 *len = cur_len;
2568
2569 /*
2570 * If we have no data to send, then that probably means that
2571 * the copy above failed altogether. That's most likely because
2572 * the address in the iovec was bogus. Return -EFAULT and let
2573 * the caller free anything we allocated and bail out.
2574 */
2575 if (!cur_len)
2576 return -EFAULT;
2577
2578 /*
2579 * i + 1 now represents the number of pages we actually used in
2580 * the copy phase above.
2581 */
2582 *num_pages = i + 1;
2583 return 0;
2584}
2585
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002586static int
2587cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2588 struct cifsFileInfo *open_file,
2589 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002590{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002591 int rc = 0;
2592 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002593 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002594 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002595 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002596 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002597 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002598 struct TCP_Server_Info *server;
2599
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002600 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2601 pid = open_file->pid;
2602 else
2603 pid = current->tgid;
2604
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002605 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002606
2607 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002608 unsigned int wsize, credits;
2609
2610 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2611 &wsize, &credits);
2612 if (rc)
2613 break;
2614
2615 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002616 wdata = cifs_writedata_alloc(nr_pages,
2617 cifs_uncached_writev_complete);
2618 if (!wdata) {
2619 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002620 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002621 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002622 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002623
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002624 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2625 if (rc) {
2626 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002627 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002628 break;
2629 }
2630
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002631 num_pages = nr_pages;
2632 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2633 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002634 for (i = 0; i < nr_pages; i++)
2635 put_page(wdata->pages[i]);
2636 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002637 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002638 break;
2639 }
2640
2641 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002642 * Bring nr_pages down to the number of pages we actually used,
2643 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002644 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002645 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002646 put_page(wdata->pages[nr_pages - 1]);
2647
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002648 wdata->sync_mode = WB_SYNC_ALL;
2649 wdata->nr_pages = nr_pages;
2650 wdata->offset = (__u64)offset;
2651 wdata->cfile = cifsFileInfo_get(open_file);
2652 wdata->pid = pid;
2653 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002654 wdata->pagesz = PAGE_SIZE;
2655 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002656 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002657
2658 if (!wdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01002659 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002660 rc = server->ops->async_writev(wdata,
2661 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002662 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002663 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002664 kref_put(&wdata->refcount,
2665 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002666 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002667 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002668 iov_iter_advance(from, offset - saved_offset);
2669 continue;
2670 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002671 break;
2672 }
2673
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002674 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002675 offset += cur_len;
2676 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002677 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002678
2679 return rc;
2680}
2681
Al Viroe9d15932015-04-06 22:44:11 -04002682ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002683{
Al Viroe9d15932015-04-06 22:44:11 -04002684 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002685 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002686 struct cifsFileInfo *open_file;
2687 struct cifs_tcon *tcon;
2688 struct cifs_sb_info *cifs_sb;
2689 struct cifs_writedata *wdata, *tmp;
2690 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002691 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002692 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002693
Al Viroe9d15932015-04-06 22:44:11 -04002694 /*
2695 * BB - optimize the way when signing is disabled. We can drop this
2696 * extra memory-to-memory copying and use iovec buffers for constructing
2697 * write request.
2698 */
2699
Al Viro3309dd02015-04-09 12:55:47 -04002700 rc = generic_write_checks(iocb, from);
2701 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002702 return rc;
2703
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002704 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002705 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002706 open_file = file->private_data;
2707 tcon = tlink_tcon(open_file->tlink);
2708
2709 if (!tcon->ses->server->ops->async_writev)
2710 return -ENOSYS;
2711
Al Viro3309dd02015-04-09 12:55:47 -04002712 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2713 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002714
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002715 /*
2716 * If at least one write was successfully sent, then discard any rc
2717 * value from the later writes. If the other write succeeds, then
2718 * we'll end up returning whatever was written. If it fails, then
2719 * we'll get a new rc value from that.
2720 */
2721 if (!list_empty(&wdata_list))
2722 rc = 0;
2723
2724 /*
2725 * Wait for and collect replies for any successful sends in order of
2726 * increasing offset. Once an error is hit or we get a fatal signal
2727 * while waiting, then return without waiting for any more replies.
2728 */
2729restart_loop:
2730 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2731 if (!rc) {
2732 /* FIXME: freezable too? */
2733 rc = wait_for_completion_killable(&wdata->done);
2734 if (rc)
2735 rc = -EINTR;
2736 else if (wdata->result)
2737 rc = wdata->result;
2738 else
2739 total_written += wdata->bytes;
2740
2741 /* resend call if it's a retryable error */
2742 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002743 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002744 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002745
2746 INIT_LIST_HEAD(&tmp_list);
2747 list_del_init(&wdata->list);
2748
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002749 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002750 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002751
2752 rc = cifs_write_from_iter(wdata->offset,
2753 wdata->bytes, &tmp_from,
2754 open_file, cifs_sb, &tmp_list);
2755
2756 list_splice(&tmp_list, &wdata_list);
2757
2758 kref_put(&wdata->refcount,
2759 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002760 goto restart_loop;
2761 }
2762 }
2763 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002764 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002765 }
2766
Al Viroe9d15932015-04-06 22:44:11 -04002767 if (unlikely(!total_written))
2768 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002769
Al Viroe9d15932015-04-06 22:44:11 -04002770 iocb->ki_pos += total_written;
2771 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002772 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002773 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002774}
2775
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002776static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002777cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002778{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002779 struct file *file = iocb->ki_filp;
2780 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2781 struct inode *inode = file->f_mapping->host;
2782 struct cifsInodeInfo *cinode = CIFS_I(inode);
2783 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002784 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002785
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002786 /*
2787 * We need to hold the sem to be sure nobody modifies lock list
2788 * with a brlock that prevents writing.
2789 */
2790 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002791 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002792
Al Viro3309dd02015-04-09 12:55:47 -04002793 rc = generic_write_checks(iocb, from);
2794 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002795 goto out;
2796
Al Viro5f380c72015-04-07 11:28:12 -04002797 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002798 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002799 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002800 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002801 else
2802 rc = -EACCES;
2803out:
Al Viro59551022016-01-22 15:40:57 -05002804 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002805
Christoph Hellwige2592212016-04-07 08:52:01 -07002806 if (rc > 0)
2807 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002808 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002809 return rc;
2810}
2811
2812ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002813cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002814{
Al Viro496ad9a2013-01-23 17:07:38 -05002815 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002816 struct cifsInodeInfo *cinode = CIFS_I(inode);
2817 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2818 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2819 iocb->ki_filp->private_data;
2820 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002821 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002822
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002823 written = cifs_get_writer(cinode);
2824 if (written)
2825 return written;
2826
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002827 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002828 if (cap_unix(tcon->ses) &&
2829 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002830 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002831 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002832 goto out;
2833 }
Al Viro3dae8752014-04-03 12:05:17 -04002834 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002835 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002836 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002837 /*
2838 * For non-oplocked files in strict cache mode we need to write the data
2839 * to the server exactly from the pos to pos+len-1 rather than flush all
2840 * affected pages because it may cause a error with mandatory locks on
2841 * these pages but not on the region from pos to ppos+len-1.
2842 */
Al Viro3dae8752014-04-03 12:05:17 -04002843 written = cifs_user_writev(iocb, from);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002844 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002845 /*
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002846 * We have read level caching and we have just sent a write
2847 * request to the server thus making data in the cache stale.
2848 * Zap the cache and set oplock/lease level to NONE to avoid
2849 * reading stale data from the cache. All subsequent read
2850 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002851 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002852 cifs_zap_mapping(inode);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002853 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05002854 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002855 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002856 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002857out:
2858 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002859 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002860}
2861
Jeff Layton0471ca32012-05-16 07:13:16 -04002862static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002863cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002864{
2865 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002866
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002867 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2868 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002869 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002870 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002871 INIT_LIST_HEAD(&rdata->list);
2872 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002873 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002874 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002875
Jeff Layton0471ca32012-05-16 07:13:16 -04002876 return rdata;
2877}
2878
Jeff Layton6993f742012-05-16 07:13:17 -04002879void
2880cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002881{
Jeff Layton6993f742012-05-16 07:13:17 -04002882 struct cifs_readdata *rdata = container_of(refcount,
2883 struct cifs_readdata, refcount);
2884
2885 if (rdata->cfile)
2886 cifsFileInfo_put(rdata->cfile);
2887
Jeff Layton0471ca32012-05-16 07:13:16 -04002888 kfree(rdata);
2889}
2890
Jeff Layton2a1bb132012-05-16 07:13:17 -04002891static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002892cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002893{
2894 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002895 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002896 unsigned int i;
2897
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002898 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002899 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2900 if (!page) {
2901 rc = -ENOMEM;
2902 break;
2903 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002904 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002905 }
2906
2907 if (rc) {
Roberto Bergantinos Corpasdf2b6af2019-05-28 09:38:14 +02002908 unsigned int nr_page_failed = i;
2909
2910 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002911 put_page(rdata->pages[i]);
2912 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002913 }
2914 }
2915 return rc;
2916}
2917
2918static void
2919cifs_uncached_readdata_release(struct kref *refcount)
2920{
Jeff Layton1c892542012-05-16 07:13:17 -04002921 struct cifs_readdata *rdata = container_of(refcount,
2922 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002923 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002924
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002925 for (i = 0; i < rdata->nr_pages; i++) {
2926 put_page(rdata->pages[i]);
2927 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002928 }
2929 cifs_readdata_release(refcount);
2930}
2931
Jeff Layton1c892542012-05-16 07:13:17 -04002932/**
2933 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2934 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002935 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002936 *
2937 * This function copies data from a list of pages in a readdata response into
2938 * an array of iovecs. It will first calculate where the data should go
2939 * based on the info in the readdata and then copy the data into that spot.
2940 */
Al Viro7f25bba2014-02-04 14:07:43 -05002941static int
2942cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002943{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002944 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002945 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002946
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002947 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002948 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002949 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovskyc06d74d2017-01-19 13:53:15 -08002950 size_t written;
2951
2952 if (unlikely(iter->type & ITER_PIPE)) {
2953 void *addr = kmap_atomic(page);
2954
2955 written = copy_to_iter(addr, copy, iter);
2956 kunmap_atomic(addr);
2957 } else
2958 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05002959 remaining -= written;
2960 if (written < copy && iov_iter_count(iter) > 0)
2961 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002962 }
Al Viro7f25bba2014-02-04 14:07:43 -05002963 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002964}
2965
2966static void
2967cifs_uncached_readv_complete(struct work_struct *work)
2968{
2969 struct cifs_readdata *rdata = container_of(work,
2970 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002971
2972 complete(&rdata->done);
2973 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2974}
2975
2976static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002977cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2978 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002979{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002980 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002981 unsigned int i;
2982 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002983
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002984 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002985 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002986 for (i = 0; i < nr_pages; i++) {
2987 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002988 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002989
Al Viro71335662016-01-09 19:54:50 -05002990 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002991 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002992 rdata->pages[i] = NULL;
2993 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002994 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002995 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002996 }
Al Viro71335662016-01-09 19:54:50 -05002997 n = len;
2998 if (len >= PAGE_SIZE) {
2999 /* enough data to fill the page */
3000 n = PAGE_SIZE;
3001 len -= n;
3002 } else {
3003 zero_user(page, len, PAGE_SIZE - len);
3004 rdata->tailsz = len;
3005 len = 0;
3006 }
3007 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003008 if (result < 0)
3009 break;
3010
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003011 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003012 }
3013
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003014 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3015 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003016}
3017
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003018static int
3019cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3020 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003021{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003022 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003023 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003024 size_t cur_len;
3025 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003026 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003027 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003028
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003029 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003030
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003031 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3032 pid = open_file->pid;
3033 else
3034 pid = current->tgid;
3035
Jeff Layton1c892542012-05-16 07:13:17 -04003036 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003037 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3038 &rsize, &credits);
3039 if (rc)
3040 break;
3041
3042 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04003043 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003044
Jeff Layton1c892542012-05-16 07:13:17 -04003045 /* allocate a readdata struct */
3046 rdata = cifs_readdata_alloc(npages,
3047 cifs_uncached_readv_complete);
3048 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003049 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003050 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003051 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003053
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003054 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003055 if (rc)
3056 goto error;
3057
3058 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003059 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003060 rdata->offset = offset;
3061 rdata->bytes = cur_len;
3062 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003063 rdata->pagesz = PAGE_SIZE;
3064 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003065 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04003066
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003067 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003068 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003069 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003070error:
3071 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003072 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003073 kref_put(&rdata->refcount,
3074 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003075 if (rc == -EAGAIN)
3076 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003077 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 }
Jeff Layton1c892542012-05-16 07:13:17 -04003079
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003080 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003081 offset += cur_len;
3082 len -= cur_len;
3083 } while (len > 0);
3084
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003085 return rc;
3086}
3087
3088ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3089{
3090 struct file *file = iocb->ki_filp;
3091 ssize_t rc;
3092 size_t len;
3093 ssize_t total_read = 0;
3094 loff_t offset = iocb->ki_pos;
3095 struct cifs_sb_info *cifs_sb;
3096 struct cifs_tcon *tcon;
3097 struct cifsFileInfo *open_file;
3098 struct cifs_readdata *rdata, *tmp;
3099 struct list_head rdata_list;
3100
3101 len = iov_iter_count(to);
3102 if (!len)
3103 return 0;
3104
3105 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003106 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003107 open_file = file->private_data;
3108 tcon = tlink_tcon(open_file->tlink);
3109
3110 if (!tcon->ses->server->ops->async_readv)
3111 return -ENOSYS;
3112
3113 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3114 cifs_dbg(FYI, "attempting read on write only file instance\n");
3115
3116 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3117
Jeff Layton1c892542012-05-16 07:13:17 -04003118 /* if at least one read request send succeeded, then reset rc */
3119 if (!list_empty(&rdata_list))
3120 rc = 0;
3121
Al Viroe6a7bcb2014-04-02 19:53:36 -04003122 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003123 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003124again:
Jeff Layton1c892542012-05-16 07:13:17 -04003125 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3126 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003127 /* FIXME: freezable sleep too? */
3128 rc = wait_for_completion_killable(&rdata->done);
3129 if (rc)
3130 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003131 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003132 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003133 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003134 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003135
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003136 list_del_init(&rdata->list);
3137 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003138
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003139 /*
3140 * Got a part of data and then reconnect has
3141 * happened -- fill the buffer and continue
3142 * reading.
3143 */
3144 if (got_bytes && got_bytes < rdata->bytes) {
3145 rc = cifs_readdata_to_iov(rdata, to);
3146 if (rc) {
3147 kref_put(&rdata->refcount,
3148 cifs_uncached_readdata_release);
3149 continue;
3150 }
3151 }
3152
3153 rc = cifs_send_async_read(
3154 rdata->offset + got_bytes,
3155 rdata->bytes - got_bytes,
3156 rdata->cfile, cifs_sb,
3157 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003158
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003159 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003160
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003161 kref_put(&rdata->refcount,
3162 cifs_uncached_readdata_release);
3163 goto again;
3164 } else if (rdata->result)
3165 rc = rdata->result;
3166 else
Jeff Layton1c892542012-05-16 07:13:17 -04003167 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003168
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003169 /* if there was a short read -- discard anything left */
3170 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3171 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003172 }
3173 list_del_init(&rdata->list);
3174 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003176
Al Viroe6a7bcb2014-04-02 19:53:36 -04003177 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003178
Jeff Layton1c892542012-05-16 07:13:17 -04003179 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003180
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003181 /* mask nodata case */
3182 if (rc == -ENODATA)
3183 rc = 0;
3184
Al Viro0165e812014-02-04 14:19:48 -05003185 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003186 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003187 return total_read;
3188 }
3189 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003190}
3191
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003192ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003193cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003194{
Al Viro496ad9a2013-01-23 17:07:38 -05003195 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003196 struct cifsInodeInfo *cinode = CIFS_I(inode);
3197 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3198 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3199 iocb->ki_filp->private_data;
3200 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3201 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003202
3203 /*
3204 * In strict cache mode we need to read from the server all the time
3205 * if we don't have level II oplock because the server can delay mtime
3206 * change - so we can't make a decision about inode invalidating.
3207 * And we can also fail with pagereading if there are mandatory locks
3208 * on pages affected by this read but not on the region from pos to
3209 * pos+len-1.
3210 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003211 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003212 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003213
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003214 if (cap_unix(tcon->ses) &&
3215 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3216 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003217 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003218
3219 /*
3220 * We need to hold the sem to be sure nobody modifies lock list
3221 * with a brlock that prevents reading.
3222 */
3223 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003224 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003225 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003226 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003227 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003228 up_read(&cinode->lock_sem);
3229 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003230}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003232static ssize_t
3233cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234{
3235 int rc = -EACCES;
3236 unsigned int bytes_read = 0;
3237 unsigned int total_read;
3238 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003239 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003241 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003242 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003243 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003244 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003246 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003247 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003248 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003250 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003251 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003253 /* FIXME: set up handlers for larger reads and/or convert to async */
3254 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3255
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303257 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003258 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303259 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003261 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003262 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003263 server = tcon->ses->server;
3264
3265 if (!server->ops->sync_read) {
3266 free_xid(xid);
3267 return -ENOSYS;
3268 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003270 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3271 pid = open_file->pid;
3272 else
3273 pid = current->tgid;
3274
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003276 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003278 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3279 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003280 do {
3281 current_read_size = min_t(uint, read_size - total_read,
3282 rsize);
3283 /*
3284 * For windows me and 9x we do not want to request more
3285 * than it negotiated since it will refuse the read
3286 * then.
3287 */
3288 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003289 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003290 current_read_size = min_t(uint,
3291 current_read_size, CIFSMaxBufSize);
3292 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003293 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003294 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 if (rc != 0)
3296 break;
3297 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003298 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003299 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003300 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003301 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003302 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003303 &bytes_read, &cur_offset,
3304 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003305 } while (rc == -EAGAIN);
3306
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307 if (rc || (bytes_read == 0)) {
3308 if (total_read) {
3309 break;
3310 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003311 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 return rc;
3313 }
3314 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003315 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003316 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 }
3318 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003319 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 return total_read;
3321}
3322
Jeff Laytonca83ce32011-04-12 09:13:44 -04003323/*
3324 * If the page is mmap'ed into a process' page tables, then we need to make
3325 * sure that it doesn't change while being written back.
3326 */
3327static int
3328cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3329{
3330 struct page *page = vmf->page;
3331
3332 lock_page(page);
3333 return VM_FAULT_LOCKED;
3334}
3335
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003336static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003337 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003338 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003339 .page_mkwrite = cifs_page_mkwrite,
3340};
3341
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003342int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3343{
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003344 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003345 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003346
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003347 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003348
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003349 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003350 rc = cifs_zap_mapping(inode);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003351 if (!rc)
3352 rc = generic_file_mmap(file, vma);
3353 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003354 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003355
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003356 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003357 return rc;
3358}
3359
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3361{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 int rc, xid;
3363
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003364 xid = get_xid();
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003365
Jeff Laytonabab0952010-02-12 07:44:18 -05003366 rc = cifs_revalidate_file(file);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003367 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003368 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3369 rc);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003370 if (!rc)
3371 rc = generic_file_mmap(file, vma);
3372 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003373 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003374
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003375 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 return rc;
3377}
3378
Jeff Layton0471ca32012-05-16 07:13:16 -04003379static void
3380cifs_readv_complete(struct work_struct *work)
3381{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003382 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003383 struct cifs_readdata *rdata = container_of(work,
3384 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003385
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003386 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003387 for (i = 0; i < rdata->nr_pages; i++) {
3388 struct page *page = rdata->pages[i];
3389
Jeff Layton0471ca32012-05-16 07:13:16 -04003390 lru_cache_add_file(page);
3391
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003392 if (rdata->result == 0 ||
3393 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003394 flush_dcache_page(page);
3395 SetPageUptodate(page);
3396 }
3397
3398 unlock_page(page);
3399
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003400 if (rdata->result == 0 ||
3401 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003402 cifs_readpage_to_fscache(rdata->mapping->host, page);
3403
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003404 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003405
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003406 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003407 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003408 }
Jeff Layton6993f742012-05-16 07:13:17 -04003409 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003410}
3411
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003412static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003413cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3414 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003415{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003416 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003417 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003418 u64 eof;
3419 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003420 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003421
3422 /* determine the eof that the server (probably) has */
3423 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003424 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003425 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003426
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003427 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003428 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003429 for (i = 0; i < nr_pages; i++) {
3430 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003431 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003432
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003433 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003434 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003435 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003436 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003437 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003438 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003439 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003440 } else if (page->index > eof_index) {
3441 /*
3442 * The VFS will not try to do readahead past the
3443 * i_size, but it's possible that we have outstanding
3444 * writes with gaps in the middle and the i_size hasn't
3445 * caught up yet. Populate those with zeroed out pages
3446 * to prevent the VFS from repeatedly attempting to
3447 * fill them until the writes are flushed.
3448 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003449 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003450 lru_cache_add_file(page);
3451 flush_dcache_page(page);
3452 SetPageUptodate(page);
3453 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003454 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003455 rdata->pages[i] = NULL;
3456 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003457 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003458 } else {
3459 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003460 lru_cache_add_file(page);
3461 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003462 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003463 rdata->pages[i] = NULL;
3464 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003465 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003466 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003467
Al Viro71335662016-01-09 19:54:50 -05003468 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003469 if (result < 0)
3470 break;
3471
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003472 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003473 }
3474
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003475 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3476 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003477}
3478
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003479static int
3480readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3481 unsigned int rsize, struct list_head *tmplist,
3482 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3483{
3484 struct page *page, *tpage;
3485 unsigned int expected_index;
3486 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003487 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003488
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003489 INIT_LIST_HEAD(tmplist);
3490
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003491 page = list_entry(page_list->prev, struct page, lru);
3492
3493 /*
3494 * Lock the page and put it in the cache. Since no one else
3495 * should have access to this page, we're safe to simply set
3496 * PG_locked without checking it first.
3497 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003498 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003499 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003500 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003501
3502 /* give up if we can't stick it in the cache */
3503 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003504 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003505 return rc;
3506 }
3507
3508 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003509 *offset = (loff_t)page->index << PAGE_SHIFT;
3510 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003511 *nr_pages = 1;
3512 list_move_tail(&page->lru, tmplist);
3513
3514 /* now try and add more pages onto the request */
3515 expected_index = page->index + 1;
3516 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3517 /* discontinuity ? */
3518 if (page->index != expected_index)
3519 break;
3520
3521 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003522 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003523 break;
3524
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003525 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003526 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003527 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003528 break;
3529 }
3530 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003531 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003532 expected_index++;
3533 (*nr_pages)++;
3534 }
3535 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536}
3537
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538static int cifs_readpages(struct file *file, struct address_space *mapping,
3539 struct list_head *page_list, unsigned num_pages)
3540{
Jeff Layton690c5e32011-10-19 15:30:16 -04003541 int rc;
3542 struct list_head tmplist;
3543 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003544 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003545 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003546 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547
Jeff Layton690c5e32011-10-19 15:30:16 -04003548 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303549 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3550 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003551 *
3552 * After this point, every page in the list might have PG_fscache set,
3553 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303554 */
3555 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3556 &num_pages);
3557 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003558 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303559
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003560 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3561 pid = open_file->pid;
3562 else
3563 pid = current->tgid;
3564
Jeff Layton690c5e32011-10-19 15:30:16 -04003565 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003566 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567
Joe Perchesf96637b2013-05-04 22:12:25 -05003568 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3569 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003570
3571 /*
3572 * Start with the page at end of list and move it to private
3573 * list. Do the same with any following pages until we hit
3574 * the rsize limit, hit an index discontinuity, or run out of
3575 * pages. Issue the async read and then start the loop again
3576 * until the list is empty.
3577 *
3578 * Note that list order is important. The page_list is in
3579 * the order of declining indexes. When we put the pages in
3580 * the rdata->pages, then we want them in increasing order.
3581 */
3582 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003583 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003584 loff_t offset;
3585 struct page *page, *tpage;
3586 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003587 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003588
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003589 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3590 &rsize, &credits);
3591 if (rc)
3592 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593
Jeff Layton690c5e32011-10-19 15:30:16 -04003594 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003595 * Give up immediately if rsize is too small to read an entire
3596 * page. The VFS will fall back to readpage. We should never
3597 * reach this point however since we set ra_pages to 0 when the
3598 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003599 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003600 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003601 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003602 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003605 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3606 &nr_pages, &offset, &bytes);
3607 if (rc) {
3608 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003610 }
3611
Jeff Layton0471ca32012-05-16 07:13:16 -04003612 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003613 if (!rdata) {
3614 /* best to give up if we're out of mem */
3615 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3616 list_del(&page->lru);
3617 lru_cache_add_file(page);
3618 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003619 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003620 }
3621 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003622 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003623 break;
3624 }
3625
Jeff Layton6993f742012-05-16 07:13:17 -04003626 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003627 rdata->mapping = mapping;
3628 rdata->offset = offset;
3629 rdata->bytes = bytes;
3630 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003631 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003632 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003633 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003634
3635 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3636 list_del(&page->lru);
3637 rdata->pages[rdata->nr_pages++] = page;
3638 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003639
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003640 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003641 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003642 rc = server->ops->async_readv(rdata);
3643 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003644 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003645 for (i = 0; i < rdata->nr_pages; i++) {
3646 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003647 lru_cache_add_file(page);
3648 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003649 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003651 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003652 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 break;
3654 }
Jeff Layton6993f742012-05-16 07:13:17 -04003655
3656 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 }
3658
David Howells54afa992013-09-04 17:10:39 +00003659 /* Any pages that have been shown to fscache but didn't get added to
3660 * the pagecache must be uncached before they get returned to the
3661 * allocator.
3662 */
3663 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 return rc;
3665}
3666
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003667/*
3668 * cifs_readpage_worker must be called with the page pinned
3669 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670static int cifs_readpage_worker(struct file *file, struct page *page,
3671 loff_t *poffset)
3672{
3673 char *read_data;
3674 int rc;
3675
Suresh Jayaraman56698232010-07-05 18:13:25 +05303676 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003677 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303678 if (rc == 0)
3679 goto read_complete;
3680
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681 read_data = kmap(page);
3682 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003683
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003684 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003685
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 if (rc < 0)
3687 goto io_error;
3688 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003689 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003690
Al Viro496ad9a2013-01-23 17:07:38 -05003691 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003692 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003693
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003694 if (PAGE_SIZE > rc)
3695 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696
3697 flush_dcache_page(page);
3698 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303699
3700 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003701 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303702
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003704
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003706 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003707 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303708
3709read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 return rc;
3711}
3712
3713static int cifs_readpage(struct file *file, struct page *page)
3714{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003715 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003717 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003719 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003720
3721 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303722 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003723 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303724 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 }
3726
Joe Perchesf96637b2013-05-04 22:12:25 -05003727 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003728 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729
3730 rc = cifs_readpage_worker(file, page, &offset);
3731
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003732 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 return rc;
3734}
3735
Steve Frencha403a0a2007-07-26 15:54:16 +00003736static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3737{
3738 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003739 struct cifs_tcon *tcon =
3740 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003741
Steve French3afca262016-09-22 18:58:16 -05003742 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003743 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003744 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003745 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003746 return 1;
3747 }
3748 }
Steve French3afca262016-09-22 18:58:16 -05003749 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003750 return 0;
3751}
3752
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753/* We do not want to update the file size from server for inodes
3754 open for write - to avoid races with writepage extending
3755 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003756 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757 but this is tricky to do without racing with writebehind
3758 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003759bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760{
Steve Frencha403a0a2007-07-26 15:54:16 +00003761 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003762 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003763
Steve Frencha403a0a2007-07-26 15:54:16 +00003764 if (is_inode_writable(cifsInode)) {
3765 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003766 struct cifs_sb_info *cifs_sb;
3767
Steve Frenchc32a0b62006-01-12 14:41:28 -08003768 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003769 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003770 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003771 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003772 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003773 }
3774
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003775 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003776 return true;
Steve French7ba52632007-02-08 18:14:13 +00003777
Steve French4b18f2a2008-04-29 00:06:05 +00003778 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003779 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003780 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781}
3782
Nick Piggind9414772008-09-24 11:32:59 -04003783static int cifs_write_begin(struct file *file, struct address_space *mapping,
3784 loff_t pos, unsigned len, unsigned flags,
3785 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003787 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003788 pgoff_t index = pos >> PAGE_SHIFT;
3789 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003790 loff_t page_start = pos & PAGE_MASK;
3791 loff_t i_size;
3792 struct page *page;
3793 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794
Joe Perchesf96637b2013-05-04 22:12:25 -05003795 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003796
Sachin Prabhu466bd312013-09-13 14:11:57 +01003797start:
Nick Piggin54566b22009-01-04 12:00:53 -08003798 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003799 if (!page) {
3800 rc = -ENOMEM;
3801 goto out;
3802 }
Nick Piggind9414772008-09-24 11:32:59 -04003803
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003804 if (PageUptodate(page))
3805 goto out;
Steve French8a236262007-03-06 00:31:00 +00003806
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003807 /*
3808 * If we write a full page it will be up to date, no need to read from
3809 * the server. If the write is short, we'll end up doing a sync write
3810 * instead.
3811 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003812 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003813 goto out;
3814
3815 /*
3816 * optimize away the read when we have an oplock, and we're not
3817 * expecting to use any of the data we'd be reading in. That
3818 * is, when the page lies beyond the EOF, or straddles the EOF
3819 * and the write will cover all of the existing data.
3820 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003821 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003822 i_size = i_size_read(mapping->host);
3823 if (page_start >= i_size ||
3824 (offset == 0 && (pos + len) >= i_size)) {
3825 zero_user_segments(page, 0, offset,
3826 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003827 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003828 /*
3829 * PageChecked means that the parts of the page
3830 * to which we're not writing are considered up
3831 * to date. Once the data is copied to the
3832 * page, it can be set uptodate.
3833 */
3834 SetPageChecked(page);
3835 goto out;
3836 }
3837 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
Sachin Prabhu466bd312013-09-13 14:11:57 +01003839 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003840 /*
3841 * might as well read a page, it is fast enough. If we get
3842 * an error, we don't need to return it. cifs_write_end will
3843 * do a sync write instead since PG_uptodate isn't set.
3844 */
3845 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003846 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003847 oncethru = 1;
3848 goto start;
Steve French8a236262007-03-06 00:31:00 +00003849 } else {
3850 /* we could try using another file handle if there is one -
3851 but how would we lock it to prevent close of that handle
3852 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003853 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003854 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003855out:
3856 *pagep = page;
3857 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858}
3859
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303860static int cifs_release_page(struct page *page, gfp_t gfp)
3861{
3862 if (PagePrivate(page))
3863 return 0;
3864
3865 return cifs_fscache_release_page(page, gfp);
3866}
3867
Lukas Czernerd47992f2013-05-21 23:17:23 -04003868static void cifs_invalidate_page(struct page *page, unsigned int offset,
3869 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303870{
3871 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3872
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003873 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303874 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3875}
3876
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003877static int cifs_launder_page(struct page *page)
3878{
3879 int rc = 0;
3880 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003881 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003882 struct writeback_control wbc = {
3883 .sync_mode = WB_SYNC_ALL,
3884 .nr_to_write = 0,
3885 .range_start = range_start,
3886 .range_end = range_end,
3887 };
3888
Joe Perchesf96637b2013-05-04 22:12:25 -05003889 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003890
3891 if (clear_page_dirty_for_io(page))
3892 rc = cifs_writepage_locked(page, &wbc);
3893
3894 cifs_fscache_invalidate_page(page, page->mapping->host);
3895 return rc;
3896}
3897
Tejun Heo9b646972010-07-20 22:09:02 +02003898void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003899{
3900 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3901 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003902 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003903 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003904 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003905 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003906 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003907
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003908 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003909 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003910
3911 server->ops->downgrade_oplock(server, cinode,
3912 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3913
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003914 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003915 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003916 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3917 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003918 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003919 }
3920
Jeff Layton3bc303c2009-09-21 06:47:50 -04003921 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003922 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003923 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003924 else
Al Viro8737c932009-12-24 06:47:55 -05003925 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003926 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003927 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003928 rc = filemap_fdatawait(inode->i_mapping);
3929 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003930 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003931 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003932 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003933 }
3934
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003935 rc = cifs_push_locks(cfile);
3936 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003937 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003938
Jeff Layton3bc303c2009-09-21 06:47:50 -04003939 /*
3940 * releasing stale oplock after recent reconnect of smb session using
3941 * a now incorrect file handle is not a data integrity issue but do
3942 * not bother sending an oplock release if session to server still is
3943 * disconnected since oplock already released by the server
3944 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003945 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003946 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3947 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003948 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003949 }
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +01003950 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003951 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003952}
3953
Steve Frenchdca69282013-11-11 16:42:37 -06003954/*
3955 * The presence of cifs_direct_io() in the address space ops vector
3956 * allowes open() O_DIRECT flags which would have failed otherwise.
3957 *
3958 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3959 * so this method should never be called.
3960 *
3961 * Direct IO is not yet supported in the cached mode.
3962 */
3963static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003964cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003965{
3966 /*
3967 * FIXME
3968 * Eventually need to support direct IO for non forcedirectio mounts
3969 */
3970 return -EINVAL;
3971}
3972
3973
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003974const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 .readpage = cifs_readpage,
3976 .readpages = cifs_readpages,
3977 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003978 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003979 .write_begin = cifs_write_begin,
3980 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303982 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003983 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303984 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003985 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003986};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003987
3988/*
3989 * cifs_readpages requires the server to support a buffer large enough to
3990 * contain the header plus one complete page of data. Otherwise, we need
3991 * to leave cifs_readpages out of the address space operations.
3992 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003993const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003994 .readpage = cifs_readpage,
3995 .writepage = cifs_writepage,
3996 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003997 .write_begin = cifs_write_begin,
3998 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003999 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304000 .releasepage = cifs_release_page,
4001 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004002 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004003};