blob: d4ef639a0fed6b03e492a46ed4a60e1a38f9490b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French18a89a12017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
Pavel Shilovsky3a539d92019-09-30 10:06:18 -0700255 if (rc) {
256 server->ops->close(xid, tcon, fid);
257 if (rc == -ESTALE)
258 rc = -EOPENSTALE;
259 }
260
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300261out:
262 kfree(buf);
263 return rc;
264}
265
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400266static bool
267cifs_has_mand_locks(struct cifsInodeInfo *cinode)
268{
269 struct cifs_fid_locks *cur;
270 bool has_locks = false;
271
272 down_read(&cinode->lock_sem);
273 list_for_each_entry(cur, &cinode->llist, llist) {
274 if (!list_empty(&cur->locks)) {
275 has_locks = true;
276 break;
277 }
278 }
279 up_read(&cinode->lock_sem);
280 return has_locks;
281}
282
Dave Wysochanskifa25e112019-10-23 05:02:33 -0400283void
284cifs_down_write(struct rw_semaphore *sem)
285{
286 while (!down_write_trylock(sem))
287 msleep(10);
288}
289
Jeff Layton15ecb432010-10-15 15:34:02 -0400290struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700291cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400292 struct tcon_link *tlink, __u32 oplock)
293{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500294 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000295 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700296 struct cifsInodeInfo *cinode = CIFS_I(inode);
297 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700298 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700299 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400300 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400301
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700302 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
303 if (cfile == NULL)
304 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400305
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700306 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
307 if (!fdlocks) {
308 kfree(cfile);
309 return NULL;
310 }
311
312 INIT_LIST_HEAD(&fdlocks->locks);
313 fdlocks->cfile = cfile;
314 cfile->llist = fdlocks;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700315
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700316 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700317 cfile->pid = current->tgid;
318 cfile->uid = current_fsuid();
319 cfile->dentry = dget(dentry);
320 cfile->f_flags = file->f_flags;
321 cfile->invalidHandle = false;
322 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700323 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700324 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500325 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400326
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100327 cifs_sb_active(inode->i_sb);
328
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 /*
330 * If the server returned a read oplock and we have mandatory brlocks,
331 * set oplock level to None.
332 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400333 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500334 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400335 oplock = 0;
336 }
337
Pavel Shilovsky0c474332019-11-27 16:18:39 -0800338 cifs_down_write(&cinode->lock_sem);
339 list_add(&fdlocks->llist, &cinode->llist);
340 up_write(&cinode->lock_sem);
341
Steve French3afca262016-09-22 18:58:16 -0500342 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400343 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700344 oplock = fid->pending_open->oplock;
345 list_del(&fid->pending_open->olist);
346
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400348 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700349
350 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500351
Jeff Layton15ecb432010-10-15 15:34:02 -0400352 /* if readable file instance put first in list*/
353 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700354 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400355 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700356 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500357 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400358
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400359 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400360 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400361
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700362 file->private_data = cfile;
363 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400364}
365
Jeff Layton764a1b12012-07-25 14:59:54 -0400366struct cifsFileInfo *
367cifsFileInfo_get(struct cifsFileInfo *cifs_file)
368{
Steve French3afca262016-09-22 18:58:16 -0500369 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400370 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500371 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400372 return cifs_file;
373}
374
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100375/**
376 * cifsFileInfo_put - release a reference of file priv data
377 *
378 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000379 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400380void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
381{
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100382 _cifsFileInfo_put(cifs_file, true);
383}
384
385/**
386 * _cifsFileInfo_put - release a reference of file priv data
387 *
388 * This may involve closing the filehandle @cifs_file out on the
389 * server. Must be called without holding tcon->open_file_lock and
390 * cifs_file->file_info_lock.
391 *
392 * If @wait_for_oplock_handler is true and we are releasing the last
393 * reference, wait for any running oplock break handler of the file
394 * and cancel any pending one. If calling this function from the
395 * oplock break handler, you need to pass false.
396 *
397 */
398void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
399{
David Howells2b0143b2015-03-17 22:25:59 +0000400 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000401 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700402 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300403 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100404 struct super_block *sb = inode->i_sb;
405 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700407 struct cifs_fid fid;
408 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000409 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000410
Steve French3afca262016-09-22 18:58:16 -0500411 spin_lock(&tcon->open_file_lock);
412
413 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400414 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500415 spin_unlock(&cifs_file->file_info_lock);
416 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400418 }
Steve French3afca262016-09-22 18:58:16 -0500419 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000420
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700421 if (server->ops->get_lease_key)
422 server->ops->get_lease_key(inode, &fid);
423
424 /* store open in pending opens to make sure we don't miss lease break */
425 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
426
Steve Frenchcdff08e2010-10-21 22:46:14 +0000427 /* remove it from the lists */
428 list_del(&cifs_file->flist);
429 list_del(&cifs_file->tlist);
430
431 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500432 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000433 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700434 /*
435 * In strict cache mode we need invalidate mapping on the last
436 * close because it may cause a error when we open this file
437 * again and get at least level II oplock.
438 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300439 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400440 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300441 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000442 }
Steve French3afca262016-09-22 18:58:16 -0500443
444 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000445
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100446 oplock_break_cancelled = wait_oplock_handler ?
447 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400448
Steve Frenchcdff08e2010-10-21 22:46:14 +0000449 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700450 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400451 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700452
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400453 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700454 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400455 server->ops->close(xid, tcon, &cifs_file->fid);
456 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000457 }
458
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000459 if (oplock_break_cancelled)
460 cifs_done_oplock_break(cifsi);
461
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700462 cifs_del_pending_open(&open);
463
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700464 /*
465 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000466 * is closed anyway.
467 */
Dave Wysochanskifa25e112019-10-23 05:02:33 -0400468 cifs_down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700469 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000470 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400471 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000472 kfree(li);
473 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700474 list_del(&cifs_file->llist->llist);
475 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700476 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000477
478 cifs_put_tlink(cifs_file->tlink);
479 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100480 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000481 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400482}
483
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
487 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400488 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400489 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700491 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000492 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400493 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700494 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300496 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700497 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700498 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400500 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
502 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400503 tlink = cifs_sb_tlink(cifs_sb);
504 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400505 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400506 return PTR_ERR(tlink);
507 }
508 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700509 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500511 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530513 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400514 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
516
Joe Perchesf96637b2013-05-04 22:12:25 -0500517 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000518 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000519
Namjae Jeon787aded2014-08-22 14:22:51 +0900520 if (file->f_flags & O_DIRECT &&
521 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
522 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
523 file->f_op = &cifs_file_direct_nobrl_ops;
524 else
525 file->f_op = &cifs_file_direct_ops;
526 }
527
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700528 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000529 oplock = REQ_OPLOCK;
530 else
531 oplock = 0;
532
Steve French64cc2c62009-03-04 19:54:08 +0000533 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400534 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
535 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000536 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400537 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000538 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700539 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000540 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500541 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300542 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000543 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
544 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500545 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
546 tcon->ses->serverName,
547 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000548 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000549 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
550 (rc != -EOPNOTSUPP)) /* path not found or net err */
551 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700552 /*
553 * Else fallthrough to retry open the old way on network i/o
554 * or DFS errors.
555 */
Steve French276a74a2009-03-03 18:00:34 +0000556 }
557
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700558 if (server->ops->get_lease_key)
559 server->ops->get_lease_key(inode, &fid);
560
561 cifs_add_pending_open(&fid, tlink, &open);
562
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300563 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700564 if (server->ops->get_lease_key)
565 server->ops->get_lease_key(inode, &fid);
566
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300567 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700568 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700569 if (rc) {
570 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300571 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700572 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300573 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400574
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700575 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
576 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700577 if (server->ops->close)
578 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700579 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 rc = -ENOMEM;
581 goto out;
582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530584 cifs_fscache_set_inode_cookie(inode, file);
585
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300586 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700587 /*
588 * Time to set mode which we can not set earlier due to
589 * problems creating new read-only files.
590 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300591 struct cifs_unix_set_info_args args = {
592 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800593 .uid = INVALID_UID, /* no change */
594 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300595 .ctime = NO_CHANGE_64,
596 .atime = NO_CHANGE_64,
597 .mtime = NO_CHANGE_64,
598 .device = 0,
599 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700600 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
601 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 }
603
604out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400606 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400607 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return rc;
609}
610
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400611static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
612
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700613/*
614 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400615 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700616 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400617static int
618cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400620 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000621 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400622 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 int rc = 0;
624
Rabin Vincent6ed24ef2017-05-03 17:17:21 +0200625 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400626 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400627 /* can cache locks - no need to relock */
628 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400629 return rc;
630 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400632 if (cap_unix(tcon->ses) &&
633 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
634 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
635 rc = cifs_push_posix_locks(cfile);
636 else
637 rc = tcon->ses->server->ops->push_mand_locks(cfile);
638
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400639 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return rc;
641}
642
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643static int
644cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
646 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400647 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400648 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000650 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700651 struct TCP_Server_Info *server;
652 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000653 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700655 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500657 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400658 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400660 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700661 mutex_lock(&cfile->fh_mutex);
662 if (!cfile->invalidHandle) {
663 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530664 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400665 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530666 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 }
668
David Howells2b0143b2015-03-17 22:25:59 +0000669 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700671 tcon = tlink_tcon(cfile->tlink);
672 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000673
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 /*
675 * Can not grab rename sem here because various ops, including those
676 * that already have the rename sem can end up causing writepage to get
677 * called and if the server was down that means we end up here, and we
678 * can never tell if the caller already has the rename_sem.
679 */
680 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000682 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700683 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400684 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000685 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
687
Joe Perchesf96637b2013-05-04 22:12:25 -0500688 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
689 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300691 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 oplock = REQ_OPLOCK;
693 else
Steve French4b18f2a2008-04-29 00:06:05 +0000694 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400696 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000697 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400698 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400699 /*
700 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
701 * original open. Must mask them off for a reopen.
702 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700703 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400704 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400705
Jeff Layton2422f672010-06-16 13:40:16 -0400706 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400708 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000709 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500710 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200711 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000712 goto reopen_success;
713 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700714 /*
715 * fallthrough to retry open the old way on errors, especially
716 * in the reconnect path it is important to retry hard
717 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000718 }
719
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700720 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000721
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500722 if (backup_cred(cifs_sb))
723 create_options |= CREATE_OPEN_BACKUP_INTENT;
724
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700725 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400726 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700727
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400728 oparms.tcon = tcon;
729 oparms.cifs_sb = cifs_sb;
730 oparms.desired_access = desired_access;
731 oparms.create_options = create_options;
732 oparms.disposition = disposition;
733 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400734 oparms.fid = &cfile->fid;
735 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400736
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700737 /*
738 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400739 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700740 * file might have write behind data that needs to be flushed and server
741 * version of file size can be stale. If we knew for sure that inode was
742 * not dirty locally we could do this.
743 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400744 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400745 if (rc == -ENOENT && oparms.reconnect == false) {
746 /* durable handle timeout is expired - open the file again */
747 rc = server->ops->open(xid, &oparms, &oplock, NULL);
748 /* indicate that we need to relock the file */
749 oparms.reconnect = true;
750 }
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700753 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500754 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
755 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400756 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 }
Jeff Layton15886172010-10-15 15:33:59 -0400758
759reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700760 cfile->invalidHandle = false;
761 mutex_unlock(&cfile->fh_mutex);
762 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400763
764 if (can_flush) {
765 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400766 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400767
Jeff Layton15886172010-10-15 15:33:59 -0400768 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700769 rc = cifs_get_inode_info_unix(&inode, full_path,
770 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400771 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700772 rc = cifs_get_inode_info(&inode, full_path, NULL,
773 inode->i_sb, xid, NULL);
774 }
775 /*
776 * Else we are writing out data to server already and could deadlock if
777 * we tried to flush data, and since we do not know if we have data that
778 * would invalidate the current end of file on the server we can not go
779 * to the server to get the new inode info.
780 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300781
Pavel Shilovskyde740252016-10-11 15:34:07 -0700782 /*
783 * If the server returned a read oplock and we have mandatory brlocks,
784 * set oplock level to None.
785 */
786 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
787 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
788 oplock = 0;
789 }
790
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400791 server->ops->set_fid(cfile, &cfile->fid, oplock);
792 if (oparms.reconnect)
793 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400794
795reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400797 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 return rc;
799}
800
801int cifs_close(struct inode *inode, struct file *file)
802{
Jeff Layton77970692011-04-05 16:23:47 -0700803 if (file->private_data != NULL) {
804 cifsFileInfo_put(file->private_data);
805 file->private_data = NULL;
806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Steve Frenchcdff08e2010-10-21 22:46:14 +0000808 /* return code from the ->release op is always ignored */
809 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
Steve French52ace1e2016-09-22 19:23:56 -0500812void
813cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
814{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700815 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500816 struct list_head *tmp;
817 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700818 struct list_head tmp_list;
819
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800820 if (!tcon->use_persistent || !tcon->need_reopen_files)
821 return;
822
823 tcon->need_reopen_files = false;
824
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700825 cifs_dbg(FYI, "Reopen persistent handles");
826 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500827
828 /* list all files open on tree connection, reopen resilient handles */
829 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700830 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500831 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700832 if (!open_file->invalidHandle)
833 continue;
834 cifsFileInfo_get(open_file);
835 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500836 }
837 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700838
839 list_for_each_safe(tmp, tmp1, &tmp_list) {
840 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800841 if (cifs_reopen_file(open_file, false /* do not flush */))
842 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700843 list_del_init(&open_file->rlist);
844 cifsFileInfo_put(open_file);
845 }
Steve French52ace1e2016-09-22 19:23:56 -0500846}
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848int cifs_closedir(struct inode *inode, struct file *file)
849{
850 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400851 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700852 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700853 struct cifs_tcon *tcon;
854 struct TCP_Server_Info *server;
855 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Joe Perchesf96637b2013-05-04 22:12:25 -0500857 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700859 if (cfile == NULL)
860 return rc;
861
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400862 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700863 tcon = tlink_tcon(cfile->tlink);
864 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
Joe Perchesf96637b2013-05-04 22:12:25 -0500866 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500867 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400868 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700869 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500870 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700871 if (server->ops->close_dir)
872 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
873 else
874 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500875 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700876 /* not much we can do if it fails anyway, ignore rc */
877 rc = 0;
878 } else
Steve French3afca262016-09-22 18:58:16 -0500879 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700881 buf = cfile->srch_inf.ntwrk_buf_start;
882 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500883 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700884 cfile->srch_inf.ntwrk_buf_start = NULL;
885 if (cfile->srch_inf.smallBuf)
886 cifs_small_buf_release(buf);
887 else
888 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700890
891 cifs_put_tlink(cfile->tlink);
892 kfree(file->private_data);
893 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400895 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return rc;
897}
898
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400899static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300900cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000901{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400902 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000903 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400904 if (!lock)
905 return lock;
906 lock->offset = offset;
907 lock->length = length;
908 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400909 lock->pid = current->tgid;
910 INIT_LIST_HEAD(&lock->blist);
911 init_waitqueue_head(&lock->block_q);
912 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400913}
914
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700915void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400916cifs_del_lock_waiters(struct cifsLockInfo *lock)
917{
918 struct cifsLockInfo *li, *tmp;
919 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
920 list_del_init(&li->blist);
921 wake_up(&li->block_q);
922 }
923}
924
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400925#define CIFS_LOCK_OP 0
926#define CIFS_READ_OP 1
927#define CIFS_WRITE_OP 2
928
929/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400930static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700931cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
932 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400933 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400934{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300935 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700936 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300937 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400938
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700939 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400940 if (offset + length <= li->offset ||
941 offset >= li->offset + li->length)
942 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400943 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
944 server->ops->compare_fids(cfile, cur_cfile)) {
945 /* shared lock prevents write op through the same fid */
946 if (!(li->type & server->vals->shared_lock_type) ||
947 rw_check != CIFS_WRITE_OP)
948 continue;
949 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700950 if ((type & server->vals->shared_lock_type) &&
951 ((server->ops->compare_fids(cfile, cur_cfile) &&
952 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400953 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700954 if (conf_lock)
955 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700956 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400957 }
958 return false;
959}
960
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700961bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300962cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700963 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400964 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400965{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300966 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700967 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000968 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300969
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700970 list_for_each_entry(cur, &cinode->llist, llist) {
971 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700972 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300973 if (rc)
974 break;
975 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300976
977 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400978}
979
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300980/*
981 * Check if there is another lock that prevents us to set the lock (mandatory
982 * style). If such a lock exists, update the flock structure with its
983 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
984 * or leave it the same if we can't. Returns 0 if we don't need to request to
985 * the server or 1 otherwise.
986 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300988cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
989 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400990{
991 int rc = 0;
992 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000993 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300994 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400995 bool exist;
996
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700997 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400998
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300999 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001000 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001001 if (exist) {
1002 flock->fl_start = conf_lock->offset;
1003 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1004 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001005 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001006 flock->fl_type = F_RDLCK;
1007 else
1008 flock->fl_type = F_WRLCK;
1009 } else if (!cinode->can_cache_brlcks)
1010 rc = 1;
1011 else
1012 flock->fl_type = F_UNLCK;
1013
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001014 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015 return rc;
1016}
1017
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001018static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001019cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001020{
David Howells2b0143b2015-03-17 22:25:59 +00001021 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001022 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001023 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001024 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001025}
1026
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001027/*
1028 * Set the byte-range lock (mandatory style). Returns:
1029 * 1) 0, if we set the lock and don't need to request to the server;
1030 * 2) 1, if no locks prevent us but we need to request to the server;
1031 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1032 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001033static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001034cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001035 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001036{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001037 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001038 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001039 bool exist;
1040 int rc = 0;
1041
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001042try_again:
1043 exist = false;
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001044 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001045
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001046 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001047 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001048 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001049 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001050 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001051 return rc;
1052 }
1053
1054 if (!exist)
1055 rc = 1;
1056 else if (!wait)
1057 rc = -EACCES;
1058 else {
1059 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001060 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001061 rc = wait_event_interruptible(lock->block_q,
1062 (lock->blist.prev == &lock->blist) &&
1063 (lock->blist.next == &lock->blist));
1064 if (!rc)
1065 goto try_again;
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001066 cifs_down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001067 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001068 }
1069
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001070 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001071 return rc;
1072}
1073
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001074/*
1075 * Check if there is another lock that prevents us to set the lock (posix
1076 * style). If such a lock exists, update the flock structure with its
1077 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1078 * or leave it the same if we can't. Returns 0 if we don't need to request to
1079 * the server or 1 otherwise.
1080 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001081static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001082cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1083{
1084 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001085 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001086 unsigned char saved_type = flock->fl_type;
1087
Pavel Shilovsky50792762011-10-29 17:17:57 +04001088 if ((flock->fl_flags & FL_POSIX) == 0)
1089 return 1;
1090
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001091 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092 posix_test_lock(file, flock);
1093
1094 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1095 flock->fl_type = saved_type;
1096 rc = 1;
1097 }
1098
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001099 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001100 return rc;
1101}
1102
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001103/*
1104 * Set the byte-range lock (posix style). Returns:
1105 * 1) 0, if we set the lock and don't need to request to the server;
1106 * 2) 1, if we need to request to the server;
1107 * 3) <0, if the error occurs while setting the lock.
1108 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001109static int
1110cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1111{
Al Viro496ad9a2013-01-23 17:07:38 -05001112 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001113 int rc = 1;
1114
1115 if ((flock->fl_flags & FL_POSIX) == 0)
1116 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001117
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001118try_again:
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001119 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001120 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001121 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001122 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001123 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001124
1125 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001126 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001127 if (rc == FILE_LOCK_DEFERRED) {
1128 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1129 if (!rc)
1130 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001131 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001132 }
Steve French9ebb3892012-04-01 13:52:54 -05001133 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001134}
1135
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001136int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001137cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001138{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001139 unsigned int xid;
1140 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001141 struct cifsLockInfo *li, *tmp;
1142 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001143 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001144 LOCKING_ANDX_RANGE *buf, *cur;
1145 int types[] = {LOCKING_ANDX_LARGE_FILES,
1146 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1147 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001148
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001149 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001150 tcon = tlink_tcon(cfile->tlink);
1151
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001152 /*
1153 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001154 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001155 */
1156 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001157 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001158 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001159 return -EINVAL;
1160 }
1161
Ross Lagerwall04d76802019-01-08 18:30:56 +00001162 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1163 PAGE_SIZE);
1164 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1165 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001166 max_num = (max_buf - sizeof(struct smb_hdr)) /
1167 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001168 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001169 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001170 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001171 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001172 }
1173
1174 for (i = 0; i < 2; i++) {
1175 cur = buf;
1176 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001177 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001178 if (li->type != types[i])
1179 continue;
1180 cur->Pid = cpu_to_le16(li->pid);
1181 cur->LengthLow = cpu_to_le32((u32)li->length);
1182 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1183 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1184 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1185 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001186 stored_rc = cifs_lockv(xid, tcon,
1187 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001188 (__u8)li->type, 0, num,
1189 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001190 if (stored_rc)
1191 rc = stored_rc;
1192 cur = buf;
1193 num = 0;
1194 } else
1195 cur++;
1196 }
1197
1198 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001199 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001200 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001201 if (stored_rc)
1202 rc = stored_rc;
1203 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001204 }
1205
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001206 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001207 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001208 return rc;
1209}
1210
Jeff Layton3d224622016-05-24 06:27:44 -04001211static __u32
1212hash_lockowner(fl_owner_t owner)
1213{
1214 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1215}
1216
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001217struct lock_to_push {
1218 struct list_head llist;
1219 __u64 offset;
1220 __u64 length;
1221 __u32 pid;
1222 __u16 netfid;
1223 __u8 type;
1224};
1225
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001226static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001227cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001228{
David Howells2b0143b2015-03-17 22:25:59 +00001229 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001230 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001231 struct file_lock *flock;
1232 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001233 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001234 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001235 struct list_head locks_to_send, *el;
1236 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001237 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001238
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001239 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001240
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001241 if (!flctx)
1242 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001243
Jeff Laytone084c1b2015-02-16 14:32:03 -05001244 spin_lock(&flctx->flc_lock);
1245 list_for_each(el, &flctx->flc_posix) {
1246 count++;
1247 }
1248 spin_unlock(&flctx->flc_lock);
1249
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001250 INIT_LIST_HEAD(&locks_to_send);
1251
1252 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001253 * Allocating count locks is enough because no FL_POSIX locks can be
1254 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001255 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001256 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001257 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001258 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1259 if (!lck) {
1260 rc = -ENOMEM;
1261 goto err_out;
1262 }
1263 list_add_tail(&lck->llist, &locks_to_send);
1264 }
1265
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001266 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001267 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001268 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001269 if (el == &locks_to_send) {
1270 /*
1271 * The list ended. We don't have enough allocated
1272 * structures - something is really wrong.
1273 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001274 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001275 break;
1276 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001277 length = 1 + flock->fl_end - flock->fl_start;
1278 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1279 type = CIFS_RDLCK;
1280 else
1281 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001282 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001283 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001284 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001285 lck->length = length;
1286 lck->type = type;
1287 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001288 }
Jeff Layton6109c852015-01-16 15:05:57 -05001289 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001290
1291 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001292 int stored_rc;
1293
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001294 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001295 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001296 lck->type, 0);
1297 if (stored_rc)
1298 rc = stored_rc;
1299 list_del(&lck->llist);
1300 kfree(lck);
1301 }
1302
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001303out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001304 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001305 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001306err_out:
1307 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1308 list_del(&lck->llist);
1309 kfree(lck);
1310 }
1311 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001312}
1313
1314static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001315cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001316{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001317 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001318 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001319 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001320 int rc = 0;
1321
1322 /* we are going to update can_cache_brlcks here - need a write access */
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001323 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001324 if (!cinode->can_cache_brlcks) {
1325 up_write(&cinode->lock_sem);
1326 return rc;
1327 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001328
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001329 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001330 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1331 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001332 rc = cifs_push_posix_locks(cfile);
1333 else
1334 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001335
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001336 cinode->can_cache_brlcks = false;
1337 up_write(&cinode->lock_sem);
1338 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001339}
1340
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001341static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001342cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001343 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001345 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001346 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001347 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001348 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001349 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001350 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001351 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001353 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001354 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001355 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001356 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001357 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001358 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1359 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001360 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001362 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001363 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001364 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001365 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001366 *lock = 1;
1367 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001368 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001369 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001370 *unlock = 1;
1371 /* Check if unlock includes more than one lock range */
1372 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001373 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001374 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001375 *lock = 1;
1376 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001377 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001378 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001379 *lock = 1;
1380 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001381 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001382 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001383 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001385 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001386}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001388static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001389cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001390 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001391{
1392 int rc = 0;
1393 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001394 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1395 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001396 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001397 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001399 if (posix_lck) {
1400 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001401
1402 rc = cifs_posix_lock_test(file, flock);
1403 if (!rc)
1404 return rc;
1405
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001406 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001407 posix_lock_type = CIFS_RDLCK;
1408 else
1409 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001410 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1411 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001412 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001413 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 return rc;
1415 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001416
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001417 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001418 if (!rc)
1419 return rc;
1420
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001421 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001422 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1423 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001424 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001425 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1426 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427 flock->fl_type = F_UNLCK;
1428 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001429 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1430 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001431 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001432 }
1433
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001434 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001435 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001436 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001437 }
1438
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001439 type &= ~server->vals->exclusive_lock_type;
1440
1441 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1442 type | server->vals->shared_lock_type,
1443 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001444 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001445 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1446 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001447 flock->fl_type = F_RDLCK;
1448 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001449 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1450 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001451 } else
1452 flock->fl_type = F_WRLCK;
1453
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001454 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001455}
1456
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001457void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001458cifs_move_llist(struct list_head *source, struct list_head *dest)
1459{
1460 struct list_head *li, *tmp;
1461 list_for_each_safe(li, tmp, source)
1462 list_move(li, dest);
1463}
1464
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001465void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001466cifs_free_llist(struct list_head *llist)
1467{
1468 struct cifsLockInfo *li, *tmp;
1469 list_for_each_entry_safe(li, tmp, llist, llist) {
1470 cifs_del_lock_waiters(li);
1471 list_del(&li->llist);
1472 kfree(li);
1473 }
1474}
1475
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001476int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001477cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1478 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001479{
1480 int rc = 0, stored_rc;
1481 int types[] = {LOCKING_ANDX_LARGE_FILES,
1482 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1483 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001484 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001485 LOCKING_ANDX_RANGE *buf, *cur;
1486 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001487 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001488 struct cifsLockInfo *li, *tmp;
1489 __u64 length = 1 + flock->fl_end - flock->fl_start;
1490 struct list_head tmp_llist;
1491
1492 INIT_LIST_HEAD(&tmp_llist);
1493
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001494 /*
1495 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001496 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001497 */
1498 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001499 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001500 return -EINVAL;
1501
Ross Lagerwall04d76802019-01-08 18:30:56 +00001502 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1503 PAGE_SIZE);
1504 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1505 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001506 max_num = (max_buf - sizeof(struct smb_hdr)) /
1507 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001508 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001509 if (!buf)
1510 return -ENOMEM;
1511
Dave Wysochanskifa25e112019-10-23 05:02:33 -04001512 cifs_down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001513 for (i = 0; i < 2; i++) {
1514 cur = buf;
1515 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001516 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001517 if (flock->fl_start > li->offset ||
1518 (flock->fl_start + length) <
1519 (li->offset + li->length))
1520 continue;
1521 if (current->tgid != li->pid)
1522 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001523 if (types[i] != li->type)
1524 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001525 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001526 /*
1527 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001528 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001529 */
1530 list_del(&li->llist);
1531 cifs_del_lock_waiters(li);
1532 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001533 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001534 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001535 cur->Pid = cpu_to_le16(li->pid);
1536 cur->LengthLow = cpu_to_le32((u32)li->length);
1537 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1538 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1539 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1540 /*
1541 * We need to save a lock here to let us add it again to
1542 * the file's list if the unlock range request fails on
1543 * the server.
1544 */
1545 list_move(&li->llist, &tmp_llist);
1546 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001547 stored_rc = cifs_lockv(xid, tcon,
1548 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001549 li->type, num, 0, buf);
1550 if (stored_rc) {
1551 /*
1552 * We failed on the unlock range
1553 * request - add all locks from the tmp
1554 * list to the head of the file's list.
1555 */
1556 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001557 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001558 rc = stored_rc;
1559 } else
1560 /*
1561 * The unlock range request succeed -
1562 * free the tmp list.
1563 */
1564 cifs_free_llist(&tmp_llist);
1565 cur = buf;
1566 num = 0;
1567 } else
1568 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001569 }
1570 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001571 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001572 types[i], num, 0, buf);
1573 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001574 cifs_move_llist(&tmp_llist,
1575 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001576 rc = stored_rc;
1577 } else
1578 cifs_free_llist(&tmp_llist);
1579 }
1580 }
1581
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001582 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001583 kfree(buf);
1584 return rc;
1585}
1586
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001587static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001588cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001589 bool wait_flag, bool posix_lck, int lock, int unlock,
1590 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001591{
1592 int rc = 0;
1593 __u64 length = 1 + flock->fl_end - flock->fl_start;
1594 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1595 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001596 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001597 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001598
1599 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001600 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001601
1602 rc = cifs_posix_lock_set(file, flock);
1603 if (!rc || rc < 0)
1604 return rc;
1605
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001606 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001607 posix_lock_type = CIFS_RDLCK;
1608 else
1609 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001610
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001611 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001612 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001613
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001614 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001615 hash_lockowner(flock->fl_owner),
1616 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001617 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001618 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001619 }
1620
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001621 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001622 struct cifsLockInfo *lock;
1623
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001624 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001625 if (!lock)
1626 return -ENOMEM;
1627
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001628 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001629 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001630 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001631 return rc;
1632 }
1633 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001634 goto out;
1635
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001636 /*
1637 * Windows 7 server can delay breaking lease from read to None
1638 * if we set a byte-range lock on a file - break it explicitly
1639 * before sending the lock to the server to be sure the next
1640 * read won't conflict with non-overlapted locks due to
1641 * pagereading.
1642 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001643 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1644 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001645 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001646 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1647 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001648 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001649 }
1650
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001651 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1652 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001653 if (rc) {
1654 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001655 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001656 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001657
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001658 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001659 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001660 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001661
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001662out:
Aurelien Aptel56300d32019-03-14 18:44:16 +01001663 if (flock->fl_flags & FL_POSIX) {
1664 /*
1665 * If this is a request to remove all locks because we
1666 * are closing the file, it doesn't matter if the
1667 * unlocking failed as both cifs.ko and the SMB server
1668 * remove the lock on file close
1669 */
1670 if (rc) {
1671 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1672 if (!(flock->fl_flags & FL_CLOSE))
1673 return rc;
1674 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001675 rc = locks_lock_file_wait(file, flock);
Aurelien Aptel56300d32019-03-14 18:44:16 +01001676 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001677 return rc;
1678}
1679
1680int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1681{
1682 int rc, xid;
1683 int lock = 0, unlock = 0;
1684 bool wait_flag = false;
1685 bool posix_lck = false;
1686 struct cifs_sb_info *cifs_sb;
1687 struct cifs_tcon *tcon;
1688 struct cifsInodeInfo *cinode;
1689 struct cifsFileInfo *cfile;
1690 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001691 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001692
1693 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001694 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001695
Joe Perchesf96637b2013-05-04 22:12:25 -05001696 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1697 cmd, flock->fl_flags, flock->fl_type,
1698 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001699
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001700 cfile = (struct cifsFileInfo *)file->private_data;
1701 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001702
1703 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1704 tcon->ses->server);
1705
Al Viro7119e222014-10-22 00:25:12 -04001706 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001707 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001708 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001709
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001710 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001711 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1712 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1713 posix_lck = true;
1714 /*
1715 * BB add code here to normalize offset and length to account for
1716 * negative length which we can not accept over the wire.
1717 */
1718 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001719 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001720 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001721 return rc;
1722 }
1723
1724 if (!lock && !unlock) {
1725 /*
1726 * if no lock or unlock then nothing to do since we do not
1727 * know what it is
1728 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001729 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001730 return -EOPNOTSUPP;
1731 }
1732
1733 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1734 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001735 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 return rc;
1737}
1738
Jeff Layton597b0272012-03-23 14:40:56 -04001739/*
1740 * update the file size (if needed) after a write. Should be called with
1741 * the inode->i_lock held
1742 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001743void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001744cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1745 unsigned int bytes_written)
1746{
1747 loff_t end_of_write = offset + bytes_written;
1748
1749 if (end_of_write > cifsi->server_eof)
1750 cifsi->server_eof = end_of_write;
1751}
1752
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001753static ssize_t
1754cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1755 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756{
1757 int rc = 0;
1758 unsigned int bytes_written = 0;
1759 unsigned int total_written;
1760 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001761 struct cifs_tcon *tcon;
1762 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001763 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001764 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001765 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001766 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
Jeff Layton7da4b492010-10-15 15:34:00 -04001768 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
Al Viro35c265e2014-08-19 20:25:34 -04001770 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1771 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001773 tcon = tlink_tcon(open_file->tlink);
1774 server = tcon->ses->server;
1775
1776 if (!server->ops->sync_write)
1777 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001778
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001779 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 for (total_written = 0; write_size > total_written;
1782 total_written += bytes_written) {
1783 rc = -EAGAIN;
1784 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001785 struct kvec iov[2];
1786 unsigned int len;
1787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 /* we could deadlock if we called
1790 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001791 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001793 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 if (rc != 0)
1795 break;
1796 }
Steve French3e844692005-10-03 13:37:24 -07001797
David Howells2b0143b2015-03-17 22:25:59 +00001798 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001799 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001800 /* iov[0] is reserved for smb header */
1801 iov[1].iov_base = (char *)write_data + total_written;
1802 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001803 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001804 io_parms.tcon = tcon;
1805 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001806 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001807 rc = server->ops->sync_write(xid, &open_file->fid,
1808 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 }
1810 if (rc || (bytes_written == 0)) {
1811 if (total_written)
1812 break;
1813 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001814 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 return rc;
1816 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001817 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001818 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001819 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001820 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001821 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001822 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 }
1824
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001825 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Jeff Layton7da4b492010-10-15 15:34:00 -04001827 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001828 spin_lock(&d_inode(dentry)->i_lock);
1829 if (*offset > d_inode(dentry)->i_size)
1830 i_size_write(d_inode(dentry), *offset);
1831 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 }
David Howells2b0143b2015-03-17 22:25:59 +00001833 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001834 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 return total_written;
1836}
1837
Jeff Layton6508d902010-09-29 19:51:11 -04001838struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1839 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001840{
1841 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001842 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001843 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001844
1845 /* only filter by fsuid on multiuser mounts */
1846 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1847 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001848
Steve French3afca262016-09-22 18:58:16 -05001849 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001850 /* we could simply get the first_list_entry since write-only entries
1851 are always at the end of the list but since the first entry might
1852 have a close pending, we go through the whole list */
1853 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001854 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001855 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001856 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001857 if (!open_file->invalidHandle) {
1858 /* found a good file */
1859 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001860 cifsFileInfo_get(open_file);
1861 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001862 return open_file;
1863 } /* else might as well continue, and look for
1864 another, or simply have the caller reopen it
1865 again rather than trying to fix this handle */
1866 } else /* write only file */
1867 break; /* write only files are last so must be done */
1868 }
Steve French3afca262016-09-22 18:58:16 -05001869 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001870 return NULL;
1871}
Steve French630f3f0c2007-10-25 21:17:17 +00001872
Jeff Layton6508d902010-09-29 19:51:11 -04001873struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1874 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001875{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001876 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001877 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001878 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001879 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001880 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001881 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001882
Steve French60808232006-04-22 15:53:05 +00001883 /* Having a null inode here (because mapping->host was set to zero by
1884 the VFS or MM) should not happen but we had reports of on oops (due to
1885 it being zero) during stress testcases so we need to check for it */
1886
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001887 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001888 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001889 dump_stack();
1890 return NULL;
1891 }
1892
Jeff Laytond3892292010-11-02 16:22:50 -04001893 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001894 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001895
Jeff Layton6508d902010-09-29 19:51:11 -04001896 /* only filter by fsuid on multiuser mounts */
1897 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1898 fsuid_only = false;
1899
Steve French3afca262016-09-22 18:58:16 -05001900 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001901refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001902 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001903 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001904 return NULL;
1905 }
Steve French6148a742005-10-05 12:23:19 -07001906 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001907 if (!any_available && open_file->pid != current->tgid)
1908 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001909 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001910 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001911 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001912 if (!open_file->invalidHandle) {
1913 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001914 cifsFileInfo_get(open_file);
1915 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001916 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001917 } else {
1918 if (!inv_file)
1919 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001920 }
Steve French6148a742005-10-05 12:23:19 -07001921 }
1922 }
Jeff Layton2846d382008-09-22 21:33:33 -04001923 /* couldn't find useable FH with same pid, try any available */
1924 if (!any_available) {
1925 any_available = true;
1926 goto refind_writable;
1927 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001928
1929 if (inv_file) {
1930 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001931 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001932 }
1933
Steve French3afca262016-09-22 18:58:16 -05001934 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001935
1936 if (inv_file) {
1937 rc = cifs_reopen_file(inv_file, false);
1938 if (!rc)
1939 return inv_file;
1940 else {
Steve French3afca262016-09-22 18:58:16 -05001941 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001942 list_move_tail(&inv_file->flist,
1943 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001944 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001945 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001946 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001947 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001948 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001949 goto refind_writable;
1950 }
1951 }
1952
Steve French6148a742005-10-05 12:23:19 -07001953 return NULL;
1954}
1955
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1957{
1958 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001959 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 char *write_data;
1961 int rc = -EFAULT;
1962 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001964 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
1966 if (!mapping || !mapping->host)
1967 return -EFAULT;
1968
1969 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
1971 offset += (loff_t)from;
1972 write_data = kmap(page);
1973 write_data += from;
1974
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001975 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 kunmap(page);
1977 return -EIO;
1978 }
1979
1980 /* racing with truncate? */
1981 if (offset > mapping->host->i_size) {
1982 kunmap(page);
1983 return 0; /* don't care */
1984 }
1985
1986 /* check to make sure that we are not extending the file */
1987 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001988 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
Jeff Layton6508d902010-09-29 19:51:11 -04001990 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001991 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001992 bytes_written = cifs_write(open_file, open_file->pid,
1993 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001994 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001996 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001997 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001998 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001999 else if (bytes_written < 0)
2000 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07002001 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05002002 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 rc = -EIO;
2004 }
2005
2006 kunmap(page);
2007 return rc;
2008}
2009
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002010static struct cifs_writedata *
2011wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2012 pgoff_t end, pgoff_t *index,
2013 unsigned int *found_pages)
2014{
2015 unsigned int nr_pages;
2016 struct page **pages;
2017 struct cifs_writedata *wdata;
2018
2019 wdata = cifs_writedata_alloc((unsigned int)tofind,
2020 cifs_writev_complete);
2021 if (!wdata)
2022 return NULL;
2023
2024 /*
2025 * find_get_pages_tag seems to return a max of 256 on each
2026 * iteration, so we must call it several times in order to
2027 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03002028 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002029 */
2030 *found_pages = 0;
2031 pages = wdata->pages;
2032 do {
2033 nr_pages = find_get_pages_tag(mapping, index,
2034 PAGECACHE_TAG_DIRTY, tofind,
2035 pages);
2036 *found_pages += nr_pages;
2037 tofind -= nr_pages;
2038 pages += nr_pages;
2039 } while (nr_pages && tofind && *index <= end);
2040
2041 return wdata;
2042}
2043
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002044static unsigned int
2045wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2046 struct address_space *mapping,
2047 struct writeback_control *wbc,
2048 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2049{
2050 unsigned int nr_pages = 0, i;
2051 struct page *page;
2052
2053 for (i = 0; i < found_pages; i++) {
2054 page = wdata->pages[i];
2055 /*
2056 * At this point we hold neither mapping->tree_lock nor
2057 * lock on the page itself: the page may be truncated or
2058 * invalidated (changing page->mapping to NULL), or even
2059 * swizzled back from swapper_space to tmpfs file
2060 * mapping
2061 */
2062
2063 if (nr_pages == 0)
2064 lock_page(page);
2065 else if (!trylock_page(page))
2066 break;
2067
2068 if (unlikely(page->mapping != mapping)) {
2069 unlock_page(page);
2070 break;
2071 }
2072
2073 if (!wbc->range_cyclic && page->index > end) {
2074 *done = true;
2075 unlock_page(page);
2076 break;
2077 }
2078
2079 if (*next && (page->index != *next)) {
2080 /* Not next consecutive page */
2081 unlock_page(page);
2082 break;
2083 }
2084
2085 if (wbc->sync_mode != WB_SYNC_NONE)
2086 wait_on_page_writeback(page);
2087
2088 if (PageWriteback(page) ||
2089 !clear_page_dirty_for_io(page)) {
2090 unlock_page(page);
2091 break;
2092 }
2093
2094 /*
2095 * This actually clears the dirty bit in the radix tree.
2096 * See cifs_writepage() for more commentary.
2097 */
2098 set_page_writeback(page);
2099 if (page_offset(page) >= i_size_read(mapping->host)) {
2100 *done = true;
2101 unlock_page(page);
2102 end_page_writeback(page);
2103 break;
2104 }
2105
2106 wdata->pages[i] = page;
2107 *next = page->index + 1;
2108 ++nr_pages;
2109 }
2110
2111 /* reset index to refind any pages skipped */
2112 if (nr_pages == 0)
2113 *index = wdata->pages[0]->index + 1;
2114
2115 /* put any pages we aren't going to use */
2116 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002117 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002118 wdata->pages[i] = NULL;
2119 }
2120
2121 return nr_pages;
2122}
2123
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002124static int
2125wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2126 struct address_space *mapping, struct writeback_control *wbc)
2127{
2128 int rc = 0;
2129 struct TCP_Server_Info *server;
2130 unsigned int i;
2131
2132 wdata->sync_mode = wbc->sync_mode;
2133 wdata->nr_pages = nr_pages;
2134 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002135 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002136 wdata->tailsz = min(i_size_read(mapping->host) -
2137 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002138 (loff_t)PAGE_SIZE);
2139 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002140
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002141 if (wdata->cfile != NULL)
2142 cifsFileInfo_put(wdata->cfile);
2143 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2144 if (!wdata->cfile) {
2145 cifs_dbg(VFS, "No writable handles for inode\n");
2146 rc = -EBADF;
2147 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002148 wdata->pid = wdata->cfile->pid;
2149 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2150 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002151 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002152
2153 for (i = 0; i < nr_pages; ++i)
2154 unlock_page(wdata->pages[i]);
2155
2156 return rc;
2157}
2158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002160 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002162 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002163 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002164 bool done = false, scanned = false, range_whole = false;
2165 pgoff_t end, index;
2166 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002167 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002168
Steve French37c0eb42005-10-05 14:50:29 -07002169 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002170 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002171 * one page at a time via cifs_writepage
2172 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002173 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002174 return generic_writepages(mapping, wbc);
2175
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002176 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002177 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002178 end = -1;
2179 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002180 index = wbc->range_start >> PAGE_SHIFT;
2181 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002182 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002183 range_whole = true;
2184 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002185 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002186 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002187retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002188 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002189 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002190 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002191
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002192 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2193 &wsize, &credits);
2194 if (rc)
2195 break;
Steve French37c0eb42005-10-05 14:50:29 -07002196
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002197 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002198
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002199 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2200 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002201 if (!wdata) {
2202 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002203 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002204 break;
2205 }
2206
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002207 if (found_pages == 0) {
2208 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002209 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002210 break;
2211 }
2212
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002213 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2214 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002215
2216 /* nothing to write? */
2217 if (nr_pages == 0) {
2218 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002219 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002220 continue;
2221 }
2222
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002223 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002224
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002225 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002226
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002227 /* send failure -- clean up the mess */
2228 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002229 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002230 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002231 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002232 redirty_page_for_writepage(wbc,
2233 wdata->pages[i]);
2234 else
2235 SetPageError(wdata->pages[i]);
2236 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002237 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002238 }
Jeff Layton941b8532011-01-11 07:24:01 -05002239 if (rc != -EAGAIN)
2240 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002241 }
2242 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002243
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002244 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2245 index = saved_index;
2246 continue;
2247 }
2248
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002249 wbc->nr_to_write -= nr_pages;
2250 if (wbc->nr_to_write <= 0)
2251 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002252
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002253 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002254 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002255
Steve French37c0eb42005-10-05 14:50:29 -07002256 if (!scanned && !done) {
2257 /*
2258 * We hit the last page and there is more work to be done: wrap
2259 * back to the start of the file
2260 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002261 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002262 index = 0;
2263 goto retry;
2264 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002265
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002266 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002267 mapping->writeback_index = index;
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 return rc;
2270}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002272static int
2273cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002275 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002276 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002278 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002280 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002281 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002282 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002283
2284 /*
2285 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2286 *
2287 * A writepage() implementation always needs to do either this,
2288 * or re-dirty the page with "redirty_page_for_writepage()" in
2289 * the case of a failure.
2290 *
2291 * Just unlocking the page will cause the radix tree tag-bits
2292 * to fail to update with the state of the page correctly.
2293 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002294 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002295retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002296 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002297 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2298 goto retry_write;
2299 else if (rc == -EAGAIN)
2300 redirty_page_for_writepage(wbc, page);
2301 else if (rc != 0)
2302 SetPageError(page);
2303 else
2304 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002305 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002306 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002307 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 return rc;
2309}
2310
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002311static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2312{
2313 int rc = cifs_writepage_locked(page, wbc);
2314 unlock_page(page);
2315 return rc;
2316}
2317
Nick Piggind9414772008-09-24 11:32:59 -04002318static int cifs_write_end(struct file *file, struct address_space *mapping,
2319 loff_t pos, unsigned len, unsigned copied,
2320 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321{
Nick Piggind9414772008-09-24 11:32:59 -04002322 int rc;
2323 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002324 struct cifsFileInfo *cfile = file->private_data;
2325 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2326 __u32 pid;
2327
2328 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2329 pid = cfile->pid;
2330 else
2331 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Joe Perchesf96637b2013-05-04 22:12:25 -05002333 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002334 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002335
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002336 if (PageChecked(page)) {
2337 if (copied == len)
2338 SetPageUptodate(page);
2339 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002340 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002341 SetPageUptodate(page);
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002344 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002345 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002346 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002347
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002348 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 /* this is probably better than directly calling
2350 partialpage_write since in this function the file handle is
2351 known which we might as well leverage */
2352 /* BB check if anything else missing out of ppw
2353 such as updating last write time */
2354 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002355 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002356 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002358
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002359 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002360 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002361 rc = copied;
2362 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002363 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 }
2365
Nick Piggind9414772008-09-24 11:32:59 -04002366 if (rc > 0) {
2367 spin_lock(&inode->i_lock);
2368 if (pos > inode->i_size)
2369 i_size_write(inode, pos);
2370 spin_unlock(&inode->i_lock);
2371 }
2372
2373 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002374 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002375
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 return rc;
2377}
2378
Josef Bacik02c24a82011-07-16 20:44:56 -04002379int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2380 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002382 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002384 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002385 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002386 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002387 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002388 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389
Josef Bacik02c24a82011-07-16 20:44:56 -04002390 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2391 if (rc)
2392 return rc;
Al Viro59551022016-01-22 15:40:57 -05002393 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002394
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002395 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
Al Viro35c265e2014-08-19 20:25:34 -04002397 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2398 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002399
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002400 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002401 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002402 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002403 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002404 rc = 0; /* don't care about it in fsync */
2405 }
2406 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002407
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002408 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002409 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2410 server = tcon->ses->server;
2411 if (server->ops->flush)
2412 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2413 else
2414 rc = -ENOSYS;
2415 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002416
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002417 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002418 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002419 return rc;
2420}
2421
Josef Bacik02c24a82011-07-16 20:44:56 -04002422int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002423{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002424 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002425 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002426 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002427 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002428 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002429 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002430 struct inode *inode = file->f_mapping->host;
2431
2432 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2433 if (rc)
2434 return rc;
Al Viro59551022016-01-22 15:40:57 -05002435 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002436
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002437 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002438
Al Viro35c265e2014-08-19 20:25:34 -04002439 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2440 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002441
2442 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002443 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2444 server = tcon->ses->server;
2445 if (server->ops->flush)
2446 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2447 else
2448 rc = -ENOSYS;
2449 }
Steve Frenchb298f222009-02-21 21:17:43 +00002450
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002451 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002452 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 return rc;
2454}
2455
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456/*
2457 * As file closes, flush all cached write data for this inode checking
2458 * for write behind errors.
2459 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002460int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461{
Al Viro496ad9a2013-01-23 17:07:38 -05002462 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 int rc = 0;
2464
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002465 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002466 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002467
Joe Perchesf96637b2013-05-04 22:12:25 -05002468 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
2470 return rc;
2471}
2472
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002473static int
2474cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2475{
2476 int rc = 0;
2477 unsigned long i;
2478
2479 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002480 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002481 if (!pages[i]) {
2482 /*
2483 * save number of pages we have already allocated and
2484 * return with ENOMEM error
2485 */
2486 num_pages = i;
2487 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002488 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002489 }
2490 }
2491
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002492 if (rc) {
2493 for (i = 0; i < num_pages; i++)
2494 put_page(pages[i]);
2495 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002496 return rc;
2497}
2498
2499static inline
2500size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2501{
2502 size_t num_pages;
2503 size_t clen;
2504
2505 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002506 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002507
2508 if (cur_len)
2509 *cur_len = clen;
2510
2511 return num_pages;
2512}
2513
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002514static void
Steve French4a5c80d2014-02-07 20:45:12 -06002515cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002516{
2517 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002518 struct cifs_writedata *wdata = container_of(refcount,
2519 struct cifs_writedata, refcount);
2520
2521 for (i = 0; i < wdata->nr_pages; i++)
2522 put_page(wdata->pages[i]);
2523 cifs_writedata_release(refcount);
2524}
2525
2526static void
2527cifs_uncached_writev_complete(struct work_struct *work)
2528{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002529 struct cifs_writedata *wdata = container_of(work,
2530 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002531 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002532 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2533
2534 spin_lock(&inode->i_lock);
2535 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2536 if (cifsi->server_eof > inode->i_size)
2537 i_size_write(inode, cifsi->server_eof);
2538 spin_unlock(&inode->i_lock);
2539
2540 complete(&wdata->done);
2541
Steve French4a5c80d2014-02-07 20:45:12 -06002542 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002543}
2544
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002545static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002546wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2547 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002548{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002549 size_t save_len, copied, bytes, cur_len = *len;
2550 unsigned long i, nr_pages = *num_pages;
2551
2552 save_len = cur_len;
2553 for (i = 0; i < nr_pages; i++) {
2554 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2555 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2556 cur_len -= copied;
2557 /*
2558 * If we didn't copy as much as we expected, then that
2559 * may mean we trod into an unmapped area. Stop copying
2560 * at that point. On the next pass through the big
2561 * loop, we'll likely end up getting a zero-length
2562 * write and bailing out of it.
2563 */
2564 if (copied < bytes)
2565 break;
2566 }
2567 cur_len = save_len - cur_len;
2568 *len = cur_len;
2569
2570 /*
2571 * If we have no data to send, then that probably means that
2572 * the copy above failed altogether. That's most likely because
2573 * the address in the iovec was bogus. Return -EFAULT and let
2574 * the caller free anything we allocated and bail out.
2575 */
2576 if (!cur_len)
2577 return -EFAULT;
2578
2579 /*
2580 * i + 1 now represents the number of pages we actually used in
2581 * the copy phase above.
2582 */
2583 *num_pages = i + 1;
2584 return 0;
2585}
2586
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002587static int
2588cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2589 struct cifsFileInfo *open_file,
2590 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002591{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002592 int rc = 0;
2593 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002594 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002595 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002596 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002597 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002598 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002599 struct TCP_Server_Info *server;
2600
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002601 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2602 pid = open_file->pid;
2603 else
2604 pid = current->tgid;
2605
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002606 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002607
2608 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002609 unsigned int wsize, credits;
2610
2611 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2612 &wsize, &credits);
2613 if (rc)
2614 break;
2615
2616 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002617 wdata = cifs_writedata_alloc(nr_pages,
2618 cifs_uncached_writev_complete);
2619 if (!wdata) {
2620 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002621 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002622 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002623 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002624
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002625 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2626 if (rc) {
2627 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002628 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002629 break;
2630 }
2631
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002632 num_pages = nr_pages;
2633 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2634 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002635 for (i = 0; i < nr_pages; i++)
2636 put_page(wdata->pages[i]);
2637 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002638 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002639 break;
2640 }
2641
2642 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002643 * Bring nr_pages down to the number of pages we actually used,
2644 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002645 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002646 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002647 put_page(wdata->pages[nr_pages - 1]);
2648
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002649 wdata->sync_mode = WB_SYNC_ALL;
2650 wdata->nr_pages = nr_pages;
2651 wdata->offset = (__u64)offset;
2652 wdata->cfile = cifsFileInfo_get(open_file);
2653 wdata->pid = pid;
2654 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002655 wdata->pagesz = PAGE_SIZE;
2656 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002657 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002658
2659 if (!wdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01002660 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002661 rc = server->ops->async_writev(wdata,
2662 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002663 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002664 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002665 kref_put(&wdata->refcount,
2666 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002667 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002668 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002669 iov_iter_advance(from, offset - saved_offset);
2670 continue;
2671 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002672 break;
2673 }
2674
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002675 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002676 offset += cur_len;
2677 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002678 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002679
2680 return rc;
2681}
2682
Al Viroe9d15932015-04-06 22:44:11 -04002683ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002684{
Al Viroe9d15932015-04-06 22:44:11 -04002685 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002686 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002687 struct cifsFileInfo *open_file;
2688 struct cifs_tcon *tcon;
2689 struct cifs_sb_info *cifs_sb;
2690 struct cifs_writedata *wdata, *tmp;
2691 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002692 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002693 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002694
Al Viroe9d15932015-04-06 22:44:11 -04002695 /*
2696 * BB - optimize the way when signing is disabled. We can drop this
2697 * extra memory-to-memory copying and use iovec buffers for constructing
2698 * write request.
2699 */
2700
Al Viro3309dd02015-04-09 12:55:47 -04002701 rc = generic_write_checks(iocb, from);
2702 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002703 return rc;
2704
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002705 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002706 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002707 open_file = file->private_data;
2708 tcon = tlink_tcon(open_file->tlink);
2709
2710 if (!tcon->ses->server->ops->async_writev)
2711 return -ENOSYS;
2712
Al Viro3309dd02015-04-09 12:55:47 -04002713 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2714 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002715
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002716 /*
2717 * If at least one write was successfully sent, then discard any rc
2718 * value from the later writes. If the other write succeeds, then
2719 * we'll end up returning whatever was written. If it fails, then
2720 * we'll get a new rc value from that.
2721 */
2722 if (!list_empty(&wdata_list))
2723 rc = 0;
2724
2725 /*
2726 * Wait for and collect replies for any successful sends in order of
2727 * increasing offset. Once an error is hit or we get a fatal signal
2728 * while waiting, then return without waiting for any more replies.
2729 */
2730restart_loop:
2731 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2732 if (!rc) {
2733 /* FIXME: freezable too? */
2734 rc = wait_for_completion_killable(&wdata->done);
2735 if (rc)
2736 rc = -EINTR;
2737 else if (wdata->result)
2738 rc = wdata->result;
2739 else
2740 total_written += wdata->bytes;
2741
2742 /* resend call if it's a retryable error */
2743 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002744 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002745 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002746
2747 INIT_LIST_HEAD(&tmp_list);
2748 list_del_init(&wdata->list);
2749
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002750 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002751 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002752
2753 rc = cifs_write_from_iter(wdata->offset,
2754 wdata->bytes, &tmp_from,
2755 open_file, cifs_sb, &tmp_list);
2756
2757 list_splice(&tmp_list, &wdata_list);
2758
2759 kref_put(&wdata->refcount,
2760 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002761 goto restart_loop;
2762 }
2763 }
2764 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002765 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002766 }
2767
Al Viroe9d15932015-04-06 22:44:11 -04002768 if (unlikely(!total_written))
2769 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002770
Al Viroe9d15932015-04-06 22:44:11 -04002771 iocb->ki_pos += total_written;
2772 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002773 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002774 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002775}
2776
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002777static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002778cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002779{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002780 struct file *file = iocb->ki_filp;
2781 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2782 struct inode *inode = file->f_mapping->host;
2783 struct cifsInodeInfo *cinode = CIFS_I(inode);
2784 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002785 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002786
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002787 /*
2788 * We need to hold the sem to be sure nobody modifies lock list
2789 * with a brlock that prevents writing.
2790 */
2791 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002792 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002793
Al Viro3309dd02015-04-09 12:55:47 -04002794 rc = generic_write_checks(iocb, from);
2795 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002796 goto out;
2797
Al Viro5f380c72015-04-07 11:28:12 -04002798 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002799 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002800 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002801 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002802 else
2803 rc = -EACCES;
2804out:
Al Viro59551022016-01-22 15:40:57 -05002805 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002806
Christoph Hellwige2592212016-04-07 08:52:01 -07002807 if (rc > 0)
2808 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002809 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002810 return rc;
2811}
2812
2813ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002814cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002815{
Al Viro496ad9a2013-01-23 17:07:38 -05002816 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002817 struct cifsInodeInfo *cinode = CIFS_I(inode);
2818 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2819 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2820 iocb->ki_filp->private_data;
2821 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002822 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002823
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002824 written = cifs_get_writer(cinode);
2825 if (written)
2826 return written;
2827
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002828 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002829 if (cap_unix(tcon->ses) &&
2830 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002831 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002832 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002833 goto out;
2834 }
Al Viro3dae8752014-04-03 12:05:17 -04002835 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002836 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002837 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002838 /*
2839 * For non-oplocked files in strict cache mode we need to write the data
2840 * to the server exactly from the pos to pos+len-1 rather than flush all
2841 * affected pages because it may cause a error with mandatory locks on
2842 * these pages but not on the region from pos to ppos+len-1.
2843 */
Al Viro3dae8752014-04-03 12:05:17 -04002844 written = cifs_user_writev(iocb, from);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002845 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002846 /*
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002847 * We have read level caching and we have just sent a write
2848 * request to the server thus making data in the cache stale.
2849 * Zap the cache and set oplock/lease level to NONE to avoid
2850 * reading stale data from the cache. All subsequent read
2851 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002852 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002853 cifs_zap_mapping(inode);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002854 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05002855 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002856 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002857 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002858out:
2859 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002860 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002861}
2862
Jeff Layton0471ca32012-05-16 07:13:16 -04002863static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002864cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002865{
2866 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002867
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002868 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2869 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002870 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002871 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002872 INIT_LIST_HEAD(&rdata->list);
2873 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002874 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002875 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002876
Jeff Layton0471ca32012-05-16 07:13:16 -04002877 return rdata;
2878}
2879
Jeff Layton6993f742012-05-16 07:13:17 -04002880void
2881cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002882{
Jeff Layton6993f742012-05-16 07:13:17 -04002883 struct cifs_readdata *rdata = container_of(refcount,
2884 struct cifs_readdata, refcount);
2885
2886 if (rdata->cfile)
2887 cifsFileInfo_put(rdata->cfile);
2888
Jeff Layton0471ca32012-05-16 07:13:16 -04002889 kfree(rdata);
2890}
2891
Jeff Layton2a1bb132012-05-16 07:13:17 -04002892static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002893cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002894{
2895 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002896 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002897 unsigned int i;
2898
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002899 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002900 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2901 if (!page) {
2902 rc = -ENOMEM;
2903 break;
2904 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002905 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002906 }
2907
2908 if (rc) {
Roberto Bergantinos Corpasdf2b6af2019-05-28 09:38:14 +02002909 unsigned int nr_page_failed = i;
2910
2911 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002912 put_page(rdata->pages[i]);
2913 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002914 }
2915 }
2916 return rc;
2917}
2918
2919static void
2920cifs_uncached_readdata_release(struct kref *refcount)
2921{
Jeff Layton1c892542012-05-16 07:13:17 -04002922 struct cifs_readdata *rdata = container_of(refcount,
2923 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002924 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002925
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002926 for (i = 0; i < rdata->nr_pages; i++) {
2927 put_page(rdata->pages[i]);
2928 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002929 }
2930 cifs_readdata_release(refcount);
2931}
2932
Jeff Layton1c892542012-05-16 07:13:17 -04002933/**
2934 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2935 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002936 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002937 *
2938 * This function copies data from a list of pages in a readdata response into
2939 * an array of iovecs. It will first calculate where the data should go
2940 * based on the info in the readdata and then copy the data into that spot.
2941 */
Al Viro7f25bba2014-02-04 14:07:43 -05002942static int
2943cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002944{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002945 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002946 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002947
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002948 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002949 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002950 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovskyc06d74d2017-01-19 13:53:15 -08002951 size_t written;
2952
2953 if (unlikely(iter->type & ITER_PIPE)) {
2954 void *addr = kmap_atomic(page);
2955
2956 written = copy_to_iter(addr, copy, iter);
2957 kunmap_atomic(addr);
2958 } else
2959 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05002960 remaining -= written;
2961 if (written < copy && iov_iter_count(iter) > 0)
2962 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002963 }
Al Viro7f25bba2014-02-04 14:07:43 -05002964 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002965}
2966
2967static void
2968cifs_uncached_readv_complete(struct work_struct *work)
2969{
2970 struct cifs_readdata *rdata = container_of(work,
2971 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002972
2973 complete(&rdata->done);
2974 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2975}
2976
2977static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002978cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2979 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002980{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002981 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002982 unsigned int i;
2983 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002984
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002985 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002986 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002987 for (i = 0; i < nr_pages; i++) {
2988 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002989 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002990
Al Viro71335662016-01-09 19:54:50 -05002991 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002992 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002993 rdata->pages[i] = NULL;
2994 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002995 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002996 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002997 }
Al Viro71335662016-01-09 19:54:50 -05002998 n = len;
2999 if (len >= PAGE_SIZE) {
3000 /* enough data to fill the page */
3001 n = PAGE_SIZE;
3002 len -= n;
3003 } else {
3004 zero_user(page, len, PAGE_SIZE - len);
3005 rdata->tailsz = len;
3006 len = 0;
3007 }
3008 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003009 if (result < 0)
3010 break;
3011
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003012 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003013 }
3014
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003015 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3016 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003017}
3018
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003019static int
3020cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3021 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003023 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003024 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003025 size_t cur_len;
3026 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003027 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003028 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003029
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003030 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003031
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003032 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3033 pid = open_file->pid;
3034 else
3035 pid = current->tgid;
3036
Jeff Layton1c892542012-05-16 07:13:17 -04003037 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003038 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3039 &rsize, &credits);
3040 if (rc)
3041 break;
3042
3043 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04003044 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003045
Jeff Layton1c892542012-05-16 07:13:17 -04003046 /* allocate a readdata struct */
3047 rdata = cifs_readdata_alloc(npages,
3048 cifs_uncached_readv_complete);
3049 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003050 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003051 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003052 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003054
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003055 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003056 if (rc)
3057 goto error;
3058
3059 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003060 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003061 rdata->offset = offset;
3062 rdata->bytes = cur_len;
3063 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003064 rdata->pagesz = PAGE_SIZE;
3065 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003066 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04003067
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003068 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003069 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003070 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003071error:
3072 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003073 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003074 kref_put(&rdata->refcount,
3075 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003076 if (rc == -EAGAIN)
3077 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003078 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 }
Jeff Layton1c892542012-05-16 07:13:17 -04003080
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003081 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003082 offset += cur_len;
3083 len -= cur_len;
3084 } while (len > 0);
3085
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003086 return rc;
3087}
3088
3089ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3090{
3091 struct file *file = iocb->ki_filp;
3092 ssize_t rc;
3093 size_t len;
3094 ssize_t total_read = 0;
3095 loff_t offset = iocb->ki_pos;
3096 struct cifs_sb_info *cifs_sb;
3097 struct cifs_tcon *tcon;
3098 struct cifsFileInfo *open_file;
3099 struct cifs_readdata *rdata, *tmp;
3100 struct list_head rdata_list;
3101
3102 len = iov_iter_count(to);
3103 if (!len)
3104 return 0;
3105
3106 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003107 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003108 open_file = file->private_data;
3109 tcon = tlink_tcon(open_file->tlink);
3110
3111 if (!tcon->ses->server->ops->async_readv)
3112 return -ENOSYS;
3113
3114 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3115 cifs_dbg(FYI, "attempting read on write only file instance\n");
3116
3117 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3118
Jeff Layton1c892542012-05-16 07:13:17 -04003119 /* if at least one read request send succeeded, then reset rc */
3120 if (!list_empty(&rdata_list))
3121 rc = 0;
3122
Al Viroe6a7bcb2014-04-02 19:53:36 -04003123 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003124 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003125again:
Jeff Layton1c892542012-05-16 07:13:17 -04003126 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3127 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003128 /* FIXME: freezable sleep too? */
3129 rc = wait_for_completion_killable(&rdata->done);
3130 if (rc)
3131 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003132 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003133 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003134 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003135 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003136
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003137 list_del_init(&rdata->list);
3138 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003139
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003140 /*
3141 * Got a part of data and then reconnect has
3142 * happened -- fill the buffer and continue
3143 * reading.
3144 */
3145 if (got_bytes && got_bytes < rdata->bytes) {
3146 rc = cifs_readdata_to_iov(rdata, to);
3147 if (rc) {
3148 kref_put(&rdata->refcount,
3149 cifs_uncached_readdata_release);
3150 continue;
3151 }
3152 }
3153
3154 rc = cifs_send_async_read(
3155 rdata->offset + got_bytes,
3156 rdata->bytes - got_bytes,
3157 rdata->cfile, cifs_sb,
3158 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003159
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003160 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003161
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003162 kref_put(&rdata->refcount,
3163 cifs_uncached_readdata_release);
3164 goto again;
3165 } else if (rdata->result)
3166 rc = rdata->result;
3167 else
Jeff Layton1c892542012-05-16 07:13:17 -04003168 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003169
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003170 /* if there was a short read -- discard anything left */
3171 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3172 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003173 }
3174 list_del_init(&rdata->list);
3175 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003177
Al Viroe6a7bcb2014-04-02 19:53:36 -04003178 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003179
Jeff Layton1c892542012-05-16 07:13:17 -04003180 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003181
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003182 /* mask nodata case */
3183 if (rc == -ENODATA)
3184 rc = 0;
3185
Al Viro0165e812014-02-04 14:19:48 -05003186 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003187 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003188 return total_read;
3189 }
3190 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003191}
3192
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003193ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003194cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003195{
Al Viro496ad9a2013-01-23 17:07:38 -05003196 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003197 struct cifsInodeInfo *cinode = CIFS_I(inode);
3198 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3199 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3200 iocb->ki_filp->private_data;
3201 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3202 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003203
3204 /*
3205 * In strict cache mode we need to read from the server all the time
3206 * if we don't have level II oplock because the server can delay mtime
3207 * change - so we can't make a decision about inode invalidating.
3208 * And we can also fail with pagereading if there are mandatory locks
3209 * on pages affected by this read but not on the region from pos to
3210 * pos+len-1.
3211 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003212 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003213 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003214
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003215 if (cap_unix(tcon->ses) &&
3216 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3217 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003218 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003219
3220 /*
3221 * We need to hold the sem to be sure nobody modifies lock list
3222 * with a brlock that prevents reading.
3223 */
3224 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003225 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003226 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003227 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003228 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003229 up_read(&cinode->lock_sem);
3230 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003231}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003233static ssize_t
3234cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235{
3236 int rc = -EACCES;
3237 unsigned int bytes_read = 0;
3238 unsigned int total_read;
3239 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003240 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003242 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003243 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003244 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003245 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003247 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003248 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003249 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003251 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003252 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003254 /* FIXME: set up handlers for larger reads and/or convert to async */
3255 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3256
Linus Torvalds1da177e2005-04-16 15:20:36 -07003257 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303258 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003259 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303260 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003262 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003263 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003264 server = tcon->ses->server;
3265
3266 if (!server->ops->sync_read) {
3267 free_xid(xid);
3268 return -ENOSYS;
3269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3272 pid = open_file->pid;
3273 else
3274 pid = current->tgid;
3275
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003277 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003279 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3280 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003281 do {
3282 current_read_size = min_t(uint, read_size - total_read,
3283 rsize);
3284 /*
3285 * For windows me and 9x we do not want to request more
3286 * than it negotiated since it will refuse the read
3287 * then.
3288 */
3289 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003290 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003291 current_read_size = min_t(uint,
3292 current_read_size, CIFSMaxBufSize);
3293 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003294 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003295 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 if (rc != 0)
3297 break;
3298 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003299 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003300 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003301 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003302 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003303 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003304 &bytes_read, &cur_offset,
3305 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003306 } while (rc == -EAGAIN);
3307
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 if (rc || (bytes_read == 0)) {
3309 if (total_read) {
3310 break;
3311 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003312 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 return rc;
3314 }
3315 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003316 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003317 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 }
3319 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003320 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 return total_read;
3322}
3323
Jeff Laytonca83ce32011-04-12 09:13:44 -04003324/*
3325 * If the page is mmap'ed into a process' page tables, then we need to make
3326 * sure that it doesn't change while being written back.
3327 */
3328static int
3329cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3330{
3331 struct page *page = vmf->page;
3332
3333 lock_page(page);
3334 return VM_FAULT_LOCKED;
3335}
3336
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003337static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003338 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003339 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003340 .page_mkwrite = cifs_page_mkwrite,
3341};
3342
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003343int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3344{
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003345 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003346 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003347
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003348 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003349
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003350 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003351 rc = cifs_zap_mapping(inode);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003352 if (!rc)
3353 rc = generic_file_mmap(file, vma);
3354 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003355 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003356
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003357 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003358 return rc;
3359}
3360
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3362{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 int rc, xid;
3364
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003365 xid = get_xid();
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003366
Jeff Laytonabab0952010-02-12 07:44:18 -05003367 rc = cifs_revalidate_file(file);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003368 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003369 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3370 rc);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003371 if (!rc)
3372 rc = generic_file_mmap(file, vma);
3373 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003374 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003375
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003376 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 return rc;
3378}
3379
Jeff Layton0471ca32012-05-16 07:13:16 -04003380static void
3381cifs_readv_complete(struct work_struct *work)
3382{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003383 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003384 struct cifs_readdata *rdata = container_of(work,
3385 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003386
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003387 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003388 for (i = 0; i < rdata->nr_pages; i++) {
3389 struct page *page = rdata->pages[i];
3390
Jeff Layton0471ca32012-05-16 07:13:16 -04003391 lru_cache_add_file(page);
3392
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003393 if (rdata->result == 0 ||
3394 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003395 flush_dcache_page(page);
3396 SetPageUptodate(page);
3397 }
3398
3399 unlock_page(page);
3400
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003401 if (rdata->result == 0 ||
3402 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003403 cifs_readpage_to_fscache(rdata->mapping->host, page);
3404
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003405 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003406
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003407 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003408 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003409 }
Jeff Layton6993f742012-05-16 07:13:17 -04003410 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003411}
3412
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003413static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003414cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3415 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003416{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003417 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003418 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003419 u64 eof;
3420 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003421 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003422
3423 /* determine the eof that the server (probably) has */
3424 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003425 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003426 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003427
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003428 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003429 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003430 for (i = 0; i < nr_pages; i++) {
3431 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003432 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003433
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003434 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003435 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003436 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003437 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003438 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003439 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003440 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003441 } else if (page->index > eof_index) {
3442 /*
3443 * The VFS will not try to do readahead past the
3444 * i_size, but it's possible that we have outstanding
3445 * writes with gaps in the middle and the i_size hasn't
3446 * caught up yet. Populate those with zeroed out pages
3447 * to prevent the VFS from repeatedly attempting to
3448 * fill them until the writes are flushed.
3449 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003450 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003451 lru_cache_add_file(page);
3452 flush_dcache_page(page);
3453 SetPageUptodate(page);
3454 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003455 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003456 rdata->pages[i] = NULL;
3457 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003458 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003459 } else {
3460 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003461 lru_cache_add_file(page);
3462 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003463 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003464 rdata->pages[i] = NULL;
3465 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003466 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003467 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003468
Al Viro71335662016-01-09 19:54:50 -05003469 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003470 if (result < 0)
3471 break;
3472
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003473 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003474 }
3475
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003476 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3477 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003478}
3479
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003480static int
3481readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3482 unsigned int rsize, struct list_head *tmplist,
3483 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3484{
3485 struct page *page, *tpage;
3486 unsigned int expected_index;
3487 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003488 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003489
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003490 INIT_LIST_HEAD(tmplist);
3491
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003492 page = list_entry(page_list->prev, struct page, lru);
3493
3494 /*
3495 * Lock the page and put it in the cache. Since no one else
3496 * should have access to this page, we're safe to simply set
3497 * PG_locked without checking it first.
3498 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003499 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003500 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003501 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003502
3503 /* give up if we can't stick it in the cache */
3504 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003505 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003506 return rc;
3507 }
3508
3509 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003510 *offset = (loff_t)page->index << PAGE_SHIFT;
3511 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003512 *nr_pages = 1;
3513 list_move_tail(&page->lru, tmplist);
3514
3515 /* now try and add more pages onto the request */
3516 expected_index = page->index + 1;
3517 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3518 /* discontinuity ? */
3519 if (page->index != expected_index)
3520 break;
3521
3522 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003523 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003524 break;
3525
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003526 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003527 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003528 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003529 break;
3530 }
3531 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003532 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003533 expected_index++;
3534 (*nr_pages)++;
3535 }
3536 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537}
3538
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539static int cifs_readpages(struct file *file, struct address_space *mapping,
3540 struct list_head *page_list, unsigned num_pages)
3541{
Jeff Layton690c5e32011-10-19 15:30:16 -04003542 int rc;
3543 struct list_head tmplist;
3544 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003545 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003546 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003547 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
Jeff Layton690c5e32011-10-19 15:30:16 -04003549 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303550 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3551 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003552 *
3553 * After this point, every page in the list might have PG_fscache set,
3554 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303555 */
3556 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3557 &num_pages);
3558 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003559 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303560
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003561 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3562 pid = open_file->pid;
3563 else
3564 pid = current->tgid;
3565
Jeff Layton690c5e32011-10-19 15:30:16 -04003566 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003567 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568
Joe Perchesf96637b2013-05-04 22:12:25 -05003569 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3570 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003571
3572 /*
3573 * Start with the page at end of list and move it to private
3574 * list. Do the same with any following pages until we hit
3575 * the rsize limit, hit an index discontinuity, or run out of
3576 * pages. Issue the async read and then start the loop again
3577 * until the list is empty.
3578 *
3579 * Note that list order is important. The page_list is in
3580 * the order of declining indexes. When we put the pages in
3581 * the rdata->pages, then we want them in increasing order.
3582 */
3583 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003584 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003585 loff_t offset;
3586 struct page *page, *tpage;
3587 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003588 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003590 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3591 &rsize, &credits);
3592 if (rc)
3593 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594
Jeff Layton690c5e32011-10-19 15:30:16 -04003595 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003596 * Give up immediately if rsize is too small to read an entire
3597 * page. The VFS will fall back to readpage. We should never
3598 * reach this point however since we set ra_pages to 0 when the
3599 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003600 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003601 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003602 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003603 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003606 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3607 &nr_pages, &offset, &bytes);
3608 if (rc) {
3609 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003611 }
3612
Jeff Layton0471ca32012-05-16 07:13:16 -04003613 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003614 if (!rdata) {
3615 /* best to give up if we're out of mem */
3616 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3617 list_del(&page->lru);
3618 lru_cache_add_file(page);
3619 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003620 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003621 }
3622 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003623 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003624 break;
3625 }
3626
Jeff Layton6993f742012-05-16 07:13:17 -04003627 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003628 rdata->mapping = mapping;
3629 rdata->offset = offset;
3630 rdata->bytes = bytes;
3631 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003632 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003633 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003634 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003635
3636 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3637 list_del(&page->lru);
3638 rdata->pages[rdata->nr_pages++] = page;
3639 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003640
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003641 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003642 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003643 rc = server->ops->async_readv(rdata);
3644 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003645 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003646 for (i = 0; i < rdata->nr_pages; i++) {
3647 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003648 lru_cache_add_file(page);
3649 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003650 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003652 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003653 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 break;
3655 }
Jeff Layton6993f742012-05-16 07:13:17 -04003656
3657 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 }
3659
David Howells54afa992013-09-04 17:10:39 +00003660 /* Any pages that have been shown to fscache but didn't get added to
3661 * the pagecache must be uncached before they get returned to the
3662 * allocator.
3663 */
3664 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665 return rc;
3666}
3667
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003668/*
3669 * cifs_readpage_worker must be called with the page pinned
3670 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671static int cifs_readpage_worker(struct file *file, struct page *page,
3672 loff_t *poffset)
3673{
3674 char *read_data;
3675 int rc;
3676
Suresh Jayaraman56698232010-07-05 18:13:25 +05303677 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003678 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303679 if (rc == 0)
3680 goto read_complete;
3681
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 read_data = kmap(page);
3683 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003684
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003685 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003686
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687 if (rc < 0)
3688 goto io_error;
3689 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003690 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003691
Al Viro496ad9a2013-01-23 17:07:38 -05003692 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003693 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003694
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003695 if (PAGE_SIZE > rc)
3696 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697
3698 flush_dcache_page(page);
3699 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303700
3701 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003702 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303703
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003705
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003707 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003708 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303709
3710read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 return rc;
3712}
3713
3714static int cifs_readpage(struct file *file, struct page *page)
3715{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003716 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003717 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003718 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003720 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721
3722 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303723 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003724 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303725 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 }
3727
Joe Perchesf96637b2013-05-04 22:12:25 -05003728 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003729 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730
3731 rc = cifs_readpage_worker(file, page, &offset);
3732
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003733 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734 return rc;
3735}
3736
Steve Frencha403a0a2007-07-26 15:54:16 +00003737static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3738{
3739 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003740 struct cifs_tcon *tcon =
3741 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003742
Steve French3afca262016-09-22 18:58:16 -05003743 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003744 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003745 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003746 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003747 return 1;
3748 }
3749 }
Steve French3afca262016-09-22 18:58:16 -05003750 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003751 return 0;
3752}
3753
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754/* We do not want to update the file size from server for inodes
3755 open for write - to avoid races with writepage extending
3756 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003757 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 but this is tricky to do without racing with writebehind
3759 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003760bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003761{
Steve Frencha403a0a2007-07-26 15:54:16 +00003762 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003763 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003764
Steve Frencha403a0a2007-07-26 15:54:16 +00003765 if (is_inode_writable(cifsInode)) {
3766 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003767 struct cifs_sb_info *cifs_sb;
3768
Steve Frenchc32a0b62006-01-12 14:41:28 -08003769 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003770 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003771 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003772 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003773 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003774 }
3775
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003776 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003777 return true;
Steve French7ba52632007-02-08 18:14:13 +00003778
Steve French4b18f2a2008-04-29 00:06:05 +00003779 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003780 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003781 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782}
3783
Nick Piggind9414772008-09-24 11:32:59 -04003784static int cifs_write_begin(struct file *file, struct address_space *mapping,
3785 loff_t pos, unsigned len, unsigned flags,
3786 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003788 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003789 pgoff_t index = pos >> PAGE_SHIFT;
3790 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003791 loff_t page_start = pos & PAGE_MASK;
3792 loff_t i_size;
3793 struct page *page;
3794 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795
Joe Perchesf96637b2013-05-04 22:12:25 -05003796 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003797
Sachin Prabhu466bd312013-09-13 14:11:57 +01003798start:
Nick Piggin54566b22009-01-04 12:00:53 -08003799 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003800 if (!page) {
3801 rc = -ENOMEM;
3802 goto out;
3803 }
Nick Piggind9414772008-09-24 11:32:59 -04003804
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003805 if (PageUptodate(page))
3806 goto out;
Steve French8a236262007-03-06 00:31:00 +00003807
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003808 /*
3809 * If we write a full page it will be up to date, no need to read from
3810 * the server. If the write is short, we'll end up doing a sync write
3811 * instead.
3812 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003813 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003814 goto out;
3815
3816 /*
3817 * optimize away the read when we have an oplock, and we're not
3818 * expecting to use any of the data we'd be reading in. That
3819 * is, when the page lies beyond the EOF, or straddles the EOF
3820 * and the write will cover all of the existing data.
3821 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003822 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003823 i_size = i_size_read(mapping->host);
3824 if (page_start >= i_size ||
3825 (offset == 0 && (pos + len) >= i_size)) {
3826 zero_user_segments(page, 0, offset,
3827 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003828 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003829 /*
3830 * PageChecked means that the parts of the page
3831 * to which we're not writing are considered up
3832 * to date. Once the data is copied to the
3833 * page, it can be set uptodate.
3834 */
3835 SetPageChecked(page);
3836 goto out;
3837 }
3838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839
Sachin Prabhu466bd312013-09-13 14:11:57 +01003840 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003841 /*
3842 * might as well read a page, it is fast enough. If we get
3843 * an error, we don't need to return it. cifs_write_end will
3844 * do a sync write instead since PG_uptodate isn't set.
3845 */
3846 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003847 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003848 oncethru = 1;
3849 goto start;
Steve French8a236262007-03-06 00:31:00 +00003850 } else {
3851 /* we could try using another file handle if there is one -
3852 but how would we lock it to prevent close of that handle
3853 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003854 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003855 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003856out:
3857 *pagep = page;
3858 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859}
3860
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303861static int cifs_release_page(struct page *page, gfp_t gfp)
3862{
3863 if (PagePrivate(page))
3864 return 0;
3865
3866 return cifs_fscache_release_page(page, gfp);
3867}
3868
Lukas Czernerd47992f2013-05-21 23:17:23 -04003869static void cifs_invalidate_page(struct page *page, unsigned int offset,
3870 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303871{
3872 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3873
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003874 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303875 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3876}
3877
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003878static int cifs_launder_page(struct page *page)
3879{
3880 int rc = 0;
3881 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003882 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003883 struct writeback_control wbc = {
3884 .sync_mode = WB_SYNC_ALL,
3885 .nr_to_write = 0,
3886 .range_start = range_start,
3887 .range_end = range_end,
3888 };
3889
Joe Perchesf96637b2013-05-04 22:12:25 -05003890 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003891
3892 if (clear_page_dirty_for_io(page))
3893 rc = cifs_writepage_locked(page, &wbc);
3894
3895 cifs_fscache_invalidate_page(page, page->mapping->host);
3896 return rc;
3897}
3898
Tejun Heo9b646972010-07-20 22:09:02 +02003899void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003900{
3901 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3902 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003903 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003904 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003905 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003906 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003907 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003908
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003909 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003910 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003911
3912 server->ops->downgrade_oplock(server, cinode,
3913 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3914
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003915 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003916 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003917 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3918 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003919 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003920 }
3921
Jeff Layton3bc303c2009-09-21 06:47:50 -04003922 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003923 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003924 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003925 else
Al Viro8737c932009-12-24 06:47:55 -05003926 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003927 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003928 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003929 rc = filemap_fdatawait(inode->i_mapping);
3930 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003931 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003932 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003933 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003934 }
3935
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003936 rc = cifs_push_locks(cfile);
3937 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003938 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003939
Jeff Layton3bc303c2009-09-21 06:47:50 -04003940 /*
3941 * releasing stale oplock after recent reconnect of smb session using
3942 * a now incorrect file handle is not a data integrity issue but do
3943 * not bother sending an oplock release if session to server still is
3944 * disconnected since oplock already released by the server
3945 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003946 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003947 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3948 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003949 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003950 }
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +01003951 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003952 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003953}
3954
Steve Frenchdca69282013-11-11 16:42:37 -06003955/*
3956 * The presence of cifs_direct_io() in the address space ops vector
3957 * allowes open() O_DIRECT flags which would have failed otherwise.
3958 *
3959 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3960 * so this method should never be called.
3961 *
3962 * Direct IO is not yet supported in the cached mode.
3963 */
3964static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003965cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003966{
3967 /*
3968 * FIXME
3969 * Eventually need to support direct IO for non forcedirectio mounts
3970 */
3971 return -EINVAL;
3972}
3973
3974
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003975const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003976 .readpage = cifs_readpage,
3977 .readpages = cifs_readpages,
3978 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003979 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003980 .write_begin = cifs_write_begin,
3981 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003982 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303983 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003984 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303985 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003986 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003987};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003988
3989/*
3990 * cifs_readpages requires the server to support a buffer large enough to
3991 * contain the header plus one complete page of data. Otherwise, we need
3992 * to leave cifs_readpages out of the address space operations.
3993 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003994const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003995 .readpage = cifs_readpage,
3996 .writepage = cifs_writepage,
3997 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003998 .write_begin = cifs_write_begin,
3999 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004000 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05304001 .releasepage = cifs_release_page,
4002 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04004003 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00004004};