blob: 3504ef015493ba497af19ea9cce814e5985a5131 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French18a89a12017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
Pavel Shilovsky3a539d92019-09-30 10:06:18 -0700255 if (rc) {
256 server->ops->close(xid, tcon, fid);
257 if (rc == -ESTALE)
258 rc = -EOPENSTALE;
259 }
260
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300261out:
262 kfree(buf);
263 return rc;
264}
265
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400266static bool
267cifs_has_mand_locks(struct cifsInodeInfo *cinode)
268{
269 struct cifs_fid_locks *cur;
270 bool has_locks = false;
271
272 down_read(&cinode->lock_sem);
273 list_for_each_entry(cur, &cinode->llist, llist) {
274 if (!list_empty(&cur->locks)) {
275 has_locks = true;
276 break;
277 }
278 }
279 up_read(&cinode->lock_sem);
280 return has_locks;
281}
282
Jeff Layton15ecb432010-10-15 15:34:02 -0400283struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700284cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400285 struct tcon_link *tlink, __u32 oplock)
286{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500287 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000288 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 struct cifsInodeInfo *cinode = CIFS_I(inode);
290 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700291 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700292 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400293 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400294
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700295 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
296 if (cfile == NULL)
297 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400298
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700299 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
300 if (!fdlocks) {
301 kfree(cfile);
302 return NULL;
303 }
304
305 INIT_LIST_HEAD(&fdlocks->locks);
306 fdlocks->cfile = cfile;
307 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700308 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700309 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700310 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700311
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700312 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700313 cfile->pid = current->tgid;
314 cfile->uid = current_fsuid();
315 cfile->dentry = dget(dentry);
316 cfile->f_flags = file->f_flags;
317 cfile->invalidHandle = false;
318 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700319 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700320 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500321 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400322
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100323 cifs_sb_active(inode->i_sb);
324
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400325 /*
326 * If the server returned a read oplock and we have mandatory brlocks,
327 * set oplock level to None.
328 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400329 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500330 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400331 oplock = 0;
332 }
333
Steve French3afca262016-09-22 18:58:16 -0500334 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400335 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700336 oplock = fid->pending_open->oplock;
337 list_del(&fid->pending_open->olist);
338
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400339 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400340 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700341
342 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500343
Jeff Layton15ecb432010-10-15 15:34:02 -0400344 /* if readable file instance put first in list*/
345 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700346 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400347 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700348 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500349 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400350
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400351 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400352 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400353
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700354 file->private_data = cfile;
355 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400356}
357
Jeff Layton764a1b12012-07-25 14:59:54 -0400358struct cifsFileInfo *
359cifsFileInfo_get(struct cifsFileInfo *cifs_file)
360{
Steve French3afca262016-09-22 18:58:16 -0500361 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400362 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500363 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400364 return cifs_file;
365}
366
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100367/**
368 * cifsFileInfo_put - release a reference of file priv data
369 *
370 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
Steve Frenchcdff08e2010-10-21 22:46:14 +0000371 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400372void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
373{
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100374 _cifsFileInfo_put(cifs_file, true);
375}
376
377/**
378 * _cifsFileInfo_put - release a reference of file priv data
379 *
380 * This may involve closing the filehandle @cifs_file out on the
381 * server. Must be called without holding tcon->open_file_lock and
382 * cifs_file->file_info_lock.
383 *
384 * If @wait_for_oplock_handler is true and we are releasing the last
385 * reference, wait for any running oplock break handler of the file
386 * and cancel any pending one. If calling this function from the
387 * oplock break handler, you need to pass false.
388 *
389 */
390void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
391{
David Howells2b0143b2015-03-17 22:25:59 +0000392 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000393 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700394 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300395 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100396 struct super_block *sb = inode->i_sb;
397 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000398 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700399 struct cifs_fid fid;
400 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000401 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000402
Steve French3afca262016-09-22 18:58:16 -0500403 spin_lock(&tcon->open_file_lock);
404
405 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400406 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500407 spin_unlock(&cifs_file->file_info_lock);
408 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000409 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400410 }
Steve French3afca262016-09-22 18:58:16 -0500411 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000412
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700413 if (server->ops->get_lease_key)
414 server->ops->get_lease_key(inode, &fid);
415
416 /* store open in pending opens to make sure we don't miss lease break */
417 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
418
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419 /* remove it from the lists */
420 list_del(&cifs_file->flist);
421 list_del(&cifs_file->tlist);
422
423 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500424 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000425 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700426 /*
427 * In strict cache mode we need invalidate mapping on the last
428 * close because it may cause a error when we open this file
429 * again and get at least level II oplock.
430 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300431 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400432 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300433 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000434 }
Steve French3afca262016-09-22 18:58:16 -0500435
436 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000437
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +0100438 oplock_break_cancelled = wait_oplock_handler ?
439 cancel_work_sync(&cifs_file->oplock_break) : false;
Jeff Laytonad635942011-07-26 12:20:17 -0400440
Steve Frenchcdff08e2010-10-21 22:46:14 +0000441 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700442 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400443 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700444
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400445 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700446 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400447 server->ops->close(xid, tcon, &cifs_file->fid);
448 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000449 }
450
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000451 if (oplock_break_cancelled)
452 cifs_done_oplock_break(cifsi);
453
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700454 cifs_del_pending_open(&open);
455
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700456 /*
457 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000458 * is closed anyway.
459 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700460 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700461 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000462 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400463 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000464 kfree(li);
465 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700466 list_del(&cifs_file->llist->llist);
467 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700468 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000469
470 cifs_put_tlink(cifs_file->tlink);
471 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100472 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000473 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400474}
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700477
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
479 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400480 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400481 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700483 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000484 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400485 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700486 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300488 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700489 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700490 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400492 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400495 tlink = cifs_sb_tlink(cifs_sb);
496 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400497 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400498 return PTR_ERR(tlink);
499 }
500 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700501 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500503 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530505 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400506 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508
Joe Perchesf96637b2013-05-04 22:12:25 -0500509 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000510 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000511
Namjae Jeon787aded2014-08-22 14:22:51 +0900512 if (file->f_flags & O_DIRECT &&
513 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
514 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
515 file->f_op = &cifs_file_direct_nobrl_ops;
516 else
517 file->f_op = &cifs_file_direct_ops;
518 }
519
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700520 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000521 oplock = REQ_OPLOCK;
522 else
523 oplock = 0;
524
Steve French64cc2c62009-03-04 19:54:08 +0000525 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400526 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
527 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000528 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400529 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000530 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700531 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000532 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500533 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300534 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000535 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
536 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500537 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
538 tcon->ses->serverName,
539 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000540 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000541 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
542 (rc != -EOPNOTSUPP)) /* path not found or net err */
543 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700544 /*
545 * Else fallthrough to retry open the old way on network i/o
546 * or DFS errors.
547 */
Steve French276a74a2009-03-03 18:00:34 +0000548 }
549
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700550 if (server->ops->get_lease_key)
551 server->ops->get_lease_key(inode, &fid);
552
553 cifs_add_pending_open(&fid, tlink, &open);
554
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300555 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700556 if (server->ops->get_lease_key)
557 server->ops->get_lease_key(inode, &fid);
558
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300559 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700560 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700561 if (rc) {
562 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300563 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700564 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300565 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400566
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700567 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
568 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700569 if (server->ops->close)
570 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700571 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 rc = -ENOMEM;
573 goto out;
574 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530576 cifs_fscache_set_inode_cookie(inode, file);
577
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300578 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700579 /*
580 * Time to set mode which we can not set earlier due to
581 * problems creating new read-only files.
582 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300583 struct cifs_unix_set_info_args args = {
584 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800585 .uid = INVALID_UID, /* no change */
586 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300587 .ctime = NO_CHANGE_64,
588 .atime = NO_CHANGE_64,
589 .mtime = NO_CHANGE_64,
590 .device = 0,
591 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700592 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
593 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 }
595
596out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400598 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400599 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 return rc;
601}
602
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400603static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
604
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700605/*
606 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400607 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700608 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400609static int
610cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400612 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000613 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400614 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 int rc = 0;
616
Rabin Vincent6ed24ef2017-05-03 17:17:21 +0200617 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400618 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400619 /* can cache locks - no need to relock */
620 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400621 return rc;
622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400624 if (cap_unix(tcon->ses) &&
625 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
626 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
627 rc = cifs_push_posix_locks(cfile);
628 else
629 rc = tcon->ses->server->ops->push_mand_locks(cfile);
630
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400631 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 return rc;
633}
634
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700635static int
636cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637{
638 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400639 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400640 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000642 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 struct TCP_Server_Info *server;
644 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000645 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700647 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500649 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400650 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400652 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700653 mutex_lock(&cfile->fh_mutex);
654 if (!cfile->invalidHandle) {
655 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530656 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400657 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530658 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 }
660
David Howells2b0143b2015-03-17 22:25:59 +0000661 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700663 tcon = tlink_tcon(cfile->tlink);
664 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000665
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700666 /*
667 * Can not grab rename sem here because various ops, including those
668 * that already have the rename sem can end up causing writepage to get
669 * called and if the server was down that means we end up here, and we
670 * can never tell if the caller already has the rename_sem.
671 */
672 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000674 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700675 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400676 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000677 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 }
679
Joe Perchesf96637b2013-05-04 22:12:25 -0500680 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
681 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300683 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 oplock = REQ_OPLOCK;
685 else
Steve French4b18f2a2008-04-29 00:06:05 +0000686 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400688 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000689 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400690 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400691 /*
692 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
693 * original open. Must mask them off for a reopen.
694 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700695 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400696 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400697
Jeff Layton2422f672010-06-16 13:40:16 -0400698 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700699 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400700 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000701 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500702 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200703 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000704 goto reopen_success;
705 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700706 /*
707 * fallthrough to retry open the old way on errors, especially
708 * in the reconnect path it is important to retry hard
709 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000710 }
711
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700712 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000713
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500714 if (backup_cred(cifs_sb))
715 create_options |= CREATE_OPEN_BACKUP_INTENT;
716
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700717 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400718 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700719
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400720 oparms.tcon = tcon;
721 oparms.cifs_sb = cifs_sb;
722 oparms.desired_access = desired_access;
723 oparms.create_options = create_options;
724 oparms.disposition = disposition;
725 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400726 oparms.fid = &cfile->fid;
727 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400728
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700729 /*
730 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400731 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700732 * file might have write behind data that needs to be flushed and server
733 * version of file size can be stale. If we knew for sure that inode was
734 * not dirty locally we could do this.
735 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400736 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400737 if (rc == -ENOENT && oparms.reconnect == false) {
738 /* durable handle timeout is expired - open the file again */
739 rc = server->ops->open(xid, &oparms, &oplock, NULL);
740 /* indicate that we need to relock the file */
741 oparms.reconnect = true;
742 }
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700745 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500746 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
747 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400748 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 }
Jeff Layton15886172010-10-15 15:33:59 -0400750
751reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700752 cfile->invalidHandle = false;
753 mutex_unlock(&cfile->fh_mutex);
754 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400755
756 if (can_flush) {
757 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400758 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400759
Jeff Layton15886172010-10-15 15:33:59 -0400760 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700761 rc = cifs_get_inode_info_unix(&inode, full_path,
762 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400763 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700764 rc = cifs_get_inode_info(&inode, full_path, NULL,
765 inode->i_sb, xid, NULL);
766 }
767 /*
768 * Else we are writing out data to server already and could deadlock if
769 * we tried to flush data, and since we do not know if we have data that
770 * would invalidate the current end of file on the server we can not go
771 * to the server to get the new inode info.
772 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300773
Pavel Shilovskyde740252016-10-11 15:34:07 -0700774 /*
775 * If the server returned a read oplock and we have mandatory brlocks,
776 * set oplock level to None.
777 */
778 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
779 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
780 oplock = 0;
781 }
782
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400783 server->ops->set_fid(cfile, &cfile->fid, oplock);
784 if (oparms.reconnect)
785 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400786
787reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400789 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return rc;
791}
792
793int cifs_close(struct inode *inode, struct file *file)
794{
Jeff Layton77970692011-04-05 16:23:47 -0700795 if (file->private_data != NULL) {
796 cifsFileInfo_put(file->private_data);
797 file->private_data = NULL;
798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
Steve Frenchcdff08e2010-10-21 22:46:14 +0000800 /* return code from the ->release op is always ignored */
801 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802}
803
Steve French52ace1e2016-09-22 19:23:56 -0500804void
805cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
806{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700807 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500808 struct list_head *tmp;
809 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700810 struct list_head tmp_list;
811
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800812 if (!tcon->use_persistent || !tcon->need_reopen_files)
813 return;
814
815 tcon->need_reopen_files = false;
816
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700817 cifs_dbg(FYI, "Reopen persistent handles");
818 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500819
820 /* list all files open on tree connection, reopen resilient handles */
821 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700822 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500823 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700824 if (!open_file->invalidHandle)
825 continue;
826 cifsFileInfo_get(open_file);
827 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500828 }
829 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700830
831 list_for_each_safe(tmp, tmp1, &tmp_list) {
832 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800833 if (cifs_reopen_file(open_file, false /* do not flush */))
834 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700835 list_del_init(&open_file->rlist);
836 cifsFileInfo_put(open_file);
837 }
Steve French52ace1e2016-09-22 19:23:56 -0500838}
839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840int cifs_closedir(struct inode *inode, struct file *file)
841{
842 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400843 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700844 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700845 struct cifs_tcon *tcon;
846 struct TCP_Server_Info *server;
847 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Joe Perchesf96637b2013-05-04 22:12:25 -0500849 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700851 if (cfile == NULL)
852 return rc;
853
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400854 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700855 tcon = tlink_tcon(cfile->tlink);
856 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Joe Perchesf96637b2013-05-04 22:12:25 -0500858 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500859 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400860 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700861 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500862 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700863 if (server->ops->close_dir)
864 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
865 else
866 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500867 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700868 /* not much we can do if it fails anyway, ignore rc */
869 rc = 0;
870 } else
Steve French3afca262016-09-22 18:58:16 -0500871 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700873 buf = cfile->srch_inf.ntwrk_buf_start;
874 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500875 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700876 cfile->srch_inf.ntwrk_buf_start = NULL;
877 if (cfile->srch_inf.smallBuf)
878 cifs_small_buf_release(buf);
879 else
880 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700882
883 cifs_put_tlink(cfile->tlink);
884 kfree(file->private_data);
885 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400887 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 return rc;
889}
890
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400891static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300892cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000893{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400894 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000895 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400896 if (!lock)
897 return lock;
898 lock->offset = offset;
899 lock->length = length;
900 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400901 lock->pid = current->tgid;
902 INIT_LIST_HEAD(&lock->blist);
903 init_waitqueue_head(&lock->block_q);
904 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400905}
906
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700907void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908cifs_del_lock_waiters(struct cifsLockInfo *lock)
909{
910 struct cifsLockInfo *li, *tmp;
911 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
912 list_del_init(&li->blist);
913 wake_up(&li->block_q);
914 }
915}
916
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400917#define CIFS_LOCK_OP 0
918#define CIFS_READ_OP 1
919#define CIFS_WRITE_OP 2
920
921/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400922static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700923cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
924 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400925 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300927 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700928 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300929 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400930
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700931 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400932 if (offset + length <= li->offset ||
933 offset >= li->offset + li->length)
934 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400935 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
936 server->ops->compare_fids(cfile, cur_cfile)) {
937 /* shared lock prevents write op through the same fid */
938 if (!(li->type & server->vals->shared_lock_type) ||
939 rw_check != CIFS_WRITE_OP)
940 continue;
941 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700942 if ((type & server->vals->shared_lock_type) &&
943 ((server->ops->compare_fids(cfile, cur_cfile) &&
944 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400945 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700946 if (conf_lock)
947 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700948 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400949 }
950 return false;
951}
952
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700953bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300954cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700955 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400956 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400957{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300958 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700959 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000960 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300961
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700962 list_for_each_entry(cur, &cinode->llist, llist) {
963 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700964 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300965 if (rc)
966 break;
967 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300968
969 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400970}
971
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300972/*
973 * Check if there is another lock that prevents us to set the lock (mandatory
974 * style). If such a lock exists, update the flock structure with its
975 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
976 * or leave it the same if we can't. Returns 0 if we don't need to request to
977 * the server or 1 otherwise.
978 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400979static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300980cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
981 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400982{
983 int rc = 0;
984 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000985 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300986 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987 bool exist;
988
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700989 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400990
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300991 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400992 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400993 if (exist) {
994 flock->fl_start = conf_lock->offset;
995 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
996 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300997 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400998 flock->fl_type = F_RDLCK;
999 else
1000 flock->fl_type = F_WRLCK;
1001 } else if (!cinode->can_cache_brlcks)
1002 rc = 1;
1003 else
1004 flock->fl_type = F_UNLCK;
1005
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001006 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001007 return rc;
1008}
1009
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001010static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001011cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012{
David Howells2b0143b2015-03-17 22:25:59 +00001013 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001014 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001015 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001016 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001017}
1018
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001019/*
1020 * Set the byte-range lock (mandatory style). Returns:
1021 * 1) 0, if we set the lock and don't need to request to the server;
1022 * 2) 1, if no locks prevent us but we need to request to the server;
1023 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1024 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001025static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001026cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001027 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001029 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001030 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001031 bool exist;
1032 int rc = 0;
1033
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001034try_again:
1035 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001036 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001037
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001038 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001039 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001040 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001041 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001042 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001043 return rc;
1044 }
1045
1046 if (!exist)
1047 rc = 1;
1048 else if (!wait)
1049 rc = -EACCES;
1050 else {
1051 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001052 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001053 rc = wait_event_interruptible(lock->block_q,
1054 (lock->blist.prev == &lock->blist) &&
1055 (lock->blist.next == &lock->blist));
1056 if (!rc)
1057 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001058 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001059 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001060 }
1061
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001062 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001063 return rc;
1064}
1065
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001066/*
1067 * Check if there is another lock that prevents us to set the lock (posix
1068 * style). If such a lock exists, update the flock structure with its
1069 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1070 * or leave it the same if we can't. Returns 0 if we don't need to request to
1071 * the server or 1 otherwise.
1072 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001073static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001074cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1075{
1076 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001077 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001078 unsigned char saved_type = flock->fl_type;
1079
Pavel Shilovsky50792762011-10-29 17:17:57 +04001080 if ((flock->fl_flags & FL_POSIX) == 0)
1081 return 1;
1082
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001083 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001084 posix_test_lock(file, flock);
1085
1086 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1087 flock->fl_type = saved_type;
1088 rc = 1;
1089 }
1090
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001091 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092 return rc;
1093}
1094
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001095/*
1096 * Set the byte-range lock (posix style). Returns:
1097 * 1) 0, if we set the lock and don't need to request to the server;
1098 * 2) 1, if we need to request to the server;
1099 * 3) <0, if the error occurs while setting the lock.
1100 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101static int
1102cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1103{
Al Viro496ad9a2013-01-23 17:07:38 -05001104 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001105 int rc = 1;
1106
1107 if ((flock->fl_flags & FL_POSIX) == 0)
1108 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001109
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001110try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001111 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001112 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001113 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001114 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001116
1117 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001118 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001119 if (rc == FILE_LOCK_DEFERRED) {
1120 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1121 if (!rc)
1122 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001123 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001124 }
Steve French9ebb3892012-04-01 13:52:54 -05001125 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001126}
1127
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001128int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001129cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001130{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001131 unsigned int xid;
1132 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001133 struct cifsLockInfo *li, *tmp;
1134 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001135 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001136 LOCKING_ANDX_RANGE *buf, *cur;
1137 int types[] = {LOCKING_ANDX_LARGE_FILES,
1138 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1139 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001140
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001141 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001142 tcon = tlink_tcon(cfile->tlink);
1143
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001144 /*
1145 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001146 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001147 */
1148 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001149 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001150 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001151 return -EINVAL;
1152 }
1153
Ross Lagerwall04d76802019-01-08 18:30:56 +00001154 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1155 PAGE_SIZE);
1156 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1157 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001158 max_num = (max_buf - sizeof(struct smb_hdr)) /
1159 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001160 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001161 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001162 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001163 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001164 }
1165
1166 for (i = 0; i < 2; i++) {
1167 cur = buf;
1168 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001169 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001170 if (li->type != types[i])
1171 continue;
1172 cur->Pid = cpu_to_le16(li->pid);
1173 cur->LengthLow = cpu_to_le32((u32)li->length);
1174 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1175 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1176 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1177 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001178 stored_rc = cifs_lockv(xid, tcon,
1179 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001180 (__u8)li->type, 0, num,
1181 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001182 if (stored_rc)
1183 rc = stored_rc;
1184 cur = buf;
1185 num = 0;
1186 } else
1187 cur++;
1188 }
1189
1190 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001191 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001192 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001193 if (stored_rc)
1194 rc = stored_rc;
1195 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001196 }
1197
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001198 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001199 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001200 return rc;
1201}
1202
Jeff Layton3d224622016-05-24 06:27:44 -04001203static __u32
1204hash_lockowner(fl_owner_t owner)
1205{
1206 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1207}
1208
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001209struct lock_to_push {
1210 struct list_head llist;
1211 __u64 offset;
1212 __u64 length;
1213 __u32 pid;
1214 __u16 netfid;
1215 __u8 type;
1216};
1217
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001218static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001219cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001220{
David Howells2b0143b2015-03-17 22:25:59 +00001221 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001222 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001223 struct file_lock *flock;
1224 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001225 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001226 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001227 struct list_head locks_to_send, *el;
1228 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001229 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001230
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001231 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001232
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001233 if (!flctx)
1234 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001235
Jeff Laytone084c1b2015-02-16 14:32:03 -05001236 spin_lock(&flctx->flc_lock);
1237 list_for_each(el, &flctx->flc_posix) {
1238 count++;
1239 }
1240 spin_unlock(&flctx->flc_lock);
1241
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001242 INIT_LIST_HEAD(&locks_to_send);
1243
1244 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001245 * Allocating count locks is enough because no FL_POSIX locks can be
1246 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001247 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001248 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001249 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001250 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1251 if (!lck) {
1252 rc = -ENOMEM;
1253 goto err_out;
1254 }
1255 list_add_tail(&lck->llist, &locks_to_send);
1256 }
1257
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001258 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001259 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001260 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001261 if (el == &locks_to_send) {
1262 /*
1263 * The list ended. We don't have enough allocated
1264 * structures - something is really wrong.
1265 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001266 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001267 break;
1268 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001269 length = 1 + flock->fl_end - flock->fl_start;
1270 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1271 type = CIFS_RDLCK;
1272 else
1273 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001274 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001275 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001276 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001277 lck->length = length;
1278 lck->type = type;
1279 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001280 }
Jeff Layton6109c852015-01-16 15:05:57 -05001281 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001282
1283 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001284 int stored_rc;
1285
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001286 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001287 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001288 lck->type, 0);
1289 if (stored_rc)
1290 rc = stored_rc;
1291 list_del(&lck->llist);
1292 kfree(lck);
1293 }
1294
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001295out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001296 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001297 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001298err_out:
1299 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1300 list_del(&lck->llist);
1301 kfree(lck);
1302 }
1303 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001304}
1305
1306static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001307cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001308{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001309 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001310 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001311 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001312 int rc = 0;
1313
1314 /* we are going to update can_cache_brlcks here - need a write access */
1315 down_write(&cinode->lock_sem);
1316 if (!cinode->can_cache_brlcks) {
1317 up_write(&cinode->lock_sem);
1318 return rc;
1319 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001320
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001321 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001322 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1323 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001324 rc = cifs_push_posix_locks(cfile);
1325 else
1326 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001327
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001328 cinode->can_cache_brlcks = false;
1329 up_write(&cinode->lock_sem);
1330 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001331}
1332
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001334cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001335 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001337 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001338 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001339 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001340 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001341 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001342 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001343 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001345 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001346 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001347 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001348 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001349 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001350 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1351 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001352 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001354 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001355 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001356 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001357 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001358 *lock = 1;
1359 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001360 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001361 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362 *unlock = 1;
1363 /* Check if unlock includes more than one lock range */
1364 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001365 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001366 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001367 *lock = 1;
1368 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001369 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001370 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001371 *lock = 1;
1372 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001373 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001374 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001375 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001377 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001378}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001380static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001381cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001382 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001383{
1384 int rc = 0;
1385 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001386 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1387 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001388 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001389 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001391 if (posix_lck) {
1392 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001393
1394 rc = cifs_posix_lock_test(file, flock);
1395 if (!rc)
1396 return rc;
1397
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001398 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001399 posix_lock_type = CIFS_RDLCK;
1400 else
1401 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001402 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1403 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001404 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001405 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 return rc;
1407 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001408
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001409 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001410 if (!rc)
1411 return rc;
1412
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001413 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001414 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1415 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001416 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001417 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1418 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001419 flock->fl_type = F_UNLCK;
1420 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001421 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1422 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001423 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001424 }
1425
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001426 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001427 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001428 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001429 }
1430
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001431 type &= ~server->vals->exclusive_lock_type;
1432
1433 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1434 type | server->vals->shared_lock_type,
1435 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001436 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001437 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1438 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001439 flock->fl_type = F_RDLCK;
1440 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001441 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1442 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001443 } else
1444 flock->fl_type = F_WRLCK;
1445
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001446 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001447}
1448
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001449void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001450cifs_move_llist(struct list_head *source, struct list_head *dest)
1451{
1452 struct list_head *li, *tmp;
1453 list_for_each_safe(li, tmp, source)
1454 list_move(li, dest);
1455}
1456
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001457void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001458cifs_free_llist(struct list_head *llist)
1459{
1460 struct cifsLockInfo *li, *tmp;
1461 list_for_each_entry_safe(li, tmp, llist, llist) {
1462 cifs_del_lock_waiters(li);
1463 list_del(&li->llist);
1464 kfree(li);
1465 }
1466}
1467
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001468int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001469cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1470 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001471{
1472 int rc = 0, stored_rc;
1473 int types[] = {LOCKING_ANDX_LARGE_FILES,
1474 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1475 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001476 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001477 LOCKING_ANDX_RANGE *buf, *cur;
1478 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001479 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001480 struct cifsLockInfo *li, *tmp;
1481 __u64 length = 1 + flock->fl_end - flock->fl_start;
1482 struct list_head tmp_llist;
1483
1484 INIT_LIST_HEAD(&tmp_llist);
1485
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001486 /*
1487 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001488 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001489 */
1490 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001491 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001492 return -EINVAL;
1493
Ross Lagerwall04d76802019-01-08 18:30:56 +00001494 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1495 PAGE_SIZE);
1496 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1497 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001498 max_num = (max_buf - sizeof(struct smb_hdr)) /
1499 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001500 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001501 if (!buf)
1502 return -ENOMEM;
1503
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001504 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001505 for (i = 0; i < 2; i++) {
1506 cur = buf;
1507 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001508 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001509 if (flock->fl_start > li->offset ||
1510 (flock->fl_start + length) <
1511 (li->offset + li->length))
1512 continue;
1513 if (current->tgid != li->pid)
1514 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001515 if (types[i] != li->type)
1516 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001517 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001518 /*
1519 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001520 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001521 */
1522 list_del(&li->llist);
1523 cifs_del_lock_waiters(li);
1524 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001525 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001526 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001527 cur->Pid = cpu_to_le16(li->pid);
1528 cur->LengthLow = cpu_to_le32((u32)li->length);
1529 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1530 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1531 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1532 /*
1533 * We need to save a lock here to let us add it again to
1534 * the file's list if the unlock range request fails on
1535 * the server.
1536 */
1537 list_move(&li->llist, &tmp_llist);
1538 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001539 stored_rc = cifs_lockv(xid, tcon,
1540 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001541 li->type, num, 0, buf);
1542 if (stored_rc) {
1543 /*
1544 * We failed on the unlock range
1545 * request - add all locks from the tmp
1546 * list to the head of the file's list.
1547 */
1548 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001549 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001550 rc = stored_rc;
1551 } else
1552 /*
1553 * The unlock range request succeed -
1554 * free the tmp list.
1555 */
1556 cifs_free_llist(&tmp_llist);
1557 cur = buf;
1558 num = 0;
1559 } else
1560 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001561 }
1562 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001563 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001564 types[i], num, 0, buf);
1565 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001566 cifs_move_llist(&tmp_llist,
1567 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001568 rc = stored_rc;
1569 } else
1570 cifs_free_llist(&tmp_llist);
1571 }
1572 }
1573
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001574 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001575 kfree(buf);
1576 return rc;
1577}
1578
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001579static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001580cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001581 bool wait_flag, bool posix_lck, int lock, int unlock,
1582 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001583{
1584 int rc = 0;
1585 __u64 length = 1 + flock->fl_end - flock->fl_start;
1586 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1587 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001588 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001589 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001590
1591 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001592 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001593
1594 rc = cifs_posix_lock_set(file, flock);
1595 if (!rc || rc < 0)
1596 return rc;
1597
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001598 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001599 posix_lock_type = CIFS_RDLCK;
1600 else
1601 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001602
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001603 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001604 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001605
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001606 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001607 hash_lockowner(flock->fl_owner),
1608 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001609 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001610 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001611 }
1612
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001613 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001614 struct cifsLockInfo *lock;
1615
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001616 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001617 if (!lock)
1618 return -ENOMEM;
1619
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001620 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001621 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001622 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001623 return rc;
1624 }
1625 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001626 goto out;
1627
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001628 /*
1629 * Windows 7 server can delay breaking lease from read to None
1630 * if we set a byte-range lock on a file - break it explicitly
1631 * before sending the lock to the server to be sure the next
1632 * read won't conflict with non-overlapted locks due to
1633 * pagereading.
1634 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001635 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1636 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001637 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001638 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1639 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001640 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001641 }
1642
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001643 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1644 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001645 if (rc) {
1646 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001647 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001648 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001649
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001650 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001651 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001652 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001653
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001654out:
Aurelien Aptel56300d32019-03-14 18:44:16 +01001655 if (flock->fl_flags & FL_POSIX) {
1656 /*
1657 * If this is a request to remove all locks because we
1658 * are closing the file, it doesn't matter if the
1659 * unlocking failed as both cifs.ko and the SMB server
1660 * remove the lock on file close
1661 */
1662 if (rc) {
1663 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1664 if (!(flock->fl_flags & FL_CLOSE))
1665 return rc;
1666 }
Benjamin Coddington4f656362015-10-22 13:38:14 -04001667 rc = locks_lock_file_wait(file, flock);
Aurelien Aptel56300d32019-03-14 18:44:16 +01001668 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001669 return rc;
1670}
1671
1672int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1673{
1674 int rc, xid;
1675 int lock = 0, unlock = 0;
1676 bool wait_flag = false;
1677 bool posix_lck = false;
1678 struct cifs_sb_info *cifs_sb;
1679 struct cifs_tcon *tcon;
1680 struct cifsInodeInfo *cinode;
1681 struct cifsFileInfo *cfile;
1682 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001683 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001684
1685 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001686 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001687
Joe Perchesf96637b2013-05-04 22:12:25 -05001688 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1689 cmd, flock->fl_flags, flock->fl_type,
1690 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001691
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001692 cfile = (struct cifsFileInfo *)file->private_data;
1693 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001694
1695 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1696 tcon->ses->server);
1697
Al Viro7119e222014-10-22 00:25:12 -04001698 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001699 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001700 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001701
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001702 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001703 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1704 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1705 posix_lck = true;
1706 /*
1707 * BB add code here to normalize offset and length to account for
1708 * negative length which we can not accept over the wire.
1709 */
1710 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001711 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001712 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001713 return rc;
1714 }
1715
1716 if (!lock && !unlock) {
1717 /*
1718 * if no lock or unlock then nothing to do since we do not
1719 * know what it is
1720 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001721 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001722 return -EOPNOTSUPP;
1723 }
1724
1725 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1726 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001727 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 return rc;
1729}
1730
Jeff Layton597b0272012-03-23 14:40:56 -04001731/*
1732 * update the file size (if needed) after a write. Should be called with
1733 * the inode->i_lock held
1734 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001735void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001736cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1737 unsigned int bytes_written)
1738{
1739 loff_t end_of_write = offset + bytes_written;
1740
1741 if (end_of_write > cifsi->server_eof)
1742 cifsi->server_eof = end_of_write;
1743}
1744
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001745static ssize_t
1746cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1747 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748{
1749 int rc = 0;
1750 unsigned int bytes_written = 0;
1751 unsigned int total_written;
1752 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001753 struct cifs_tcon *tcon;
1754 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001755 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001756 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001757 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001758 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
Jeff Layton7da4b492010-10-15 15:34:00 -04001760 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
Al Viro35c265e2014-08-19 20:25:34 -04001762 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1763 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001765 tcon = tlink_tcon(open_file->tlink);
1766 server = tcon->ses->server;
1767
1768 if (!server->ops->sync_write)
1769 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001770
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001771 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 for (total_written = 0; write_size > total_written;
1774 total_written += bytes_written) {
1775 rc = -EAGAIN;
1776 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001777 struct kvec iov[2];
1778 unsigned int len;
1779
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 /* we could deadlock if we called
1782 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001783 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001785 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 if (rc != 0)
1787 break;
1788 }
Steve French3e844692005-10-03 13:37:24 -07001789
David Howells2b0143b2015-03-17 22:25:59 +00001790 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001791 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001792 /* iov[0] is reserved for smb header */
1793 iov[1].iov_base = (char *)write_data + total_written;
1794 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001795 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001796 io_parms.tcon = tcon;
1797 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001798 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001799 rc = server->ops->sync_write(xid, &open_file->fid,
1800 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 }
1802 if (rc || (bytes_written == 0)) {
1803 if (total_written)
1804 break;
1805 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001806 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 return rc;
1808 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001809 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001810 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001811 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001812 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001813 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001817 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
Jeff Layton7da4b492010-10-15 15:34:00 -04001819 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001820 spin_lock(&d_inode(dentry)->i_lock);
1821 if (*offset > d_inode(dentry)->i_size)
1822 i_size_write(d_inode(dentry), *offset);
1823 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 }
David Howells2b0143b2015-03-17 22:25:59 +00001825 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001826 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 return total_written;
1828}
1829
Jeff Layton6508d902010-09-29 19:51:11 -04001830struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1831 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001832{
1833 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001834 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001835 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001836
1837 /* only filter by fsuid on multiuser mounts */
1838 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1839 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001840
Steve French3afca262016-09-22 18:58:16 -05001841 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001842 /* we could simply get the first_list_entry since write-only entries
1843 are always at the end of the list but since the first entry might
1844 have a close pending, we go through the whole list */
1845 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001846 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001847 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001848 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001849 if (!open_file->invalidHandle) {
1850 /* found a good file */
1851 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001852 cifsFileInfo_get(open_file);
1853 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001854 return open_file;
1855 } /* else might as well continue, and look for
1856 another, or simply have the caller reopen it
1857 again rather than trying to fix this handle */
1858 } else /* write only file */
1859 break; /* write only files are last so must be done */
1860 }
Steve French3afca262016-09-22 18:58:16 -05001861 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001862 return NULL;
1863}
Steve French630f3f0c2007-10-25 21:17:17 +00001864
Jeff Layton6508d902010-09-29 19:51:11 -04001865struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1866 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001867{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001868 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001869 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001870 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001871 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001872 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001873 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001874
Steve French60808232006-04-22 15:53:05 +00001875 /* Having a null inode here (because mapping->host was set to zero by
1876 the VFS or MM) should not happen but we had reports of on oops (due to
1877 it being zero) during stress testcases so we need to check for it */
1878
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001879 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001880 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001881 dump_stack();
1882 return NULL;
1883 }
1884
Jeff Laytond3892292010-11-02 16:22:50 -04001885 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001886 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001887
Jeff Layton6508d902010-09-29 19:51:11 -04001888 /* only filter by fsuid on multiuser mounts */
1889 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1890 fsuid_only = false;
1891
Steve French3afca262016-09-22 18:58:16 -05001892 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001893refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001894 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001895 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001896 return NULL;
1897 }
Steve French6148a742005-10-05 12:23:19 -07001898 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001899 if (!any_available && open_file->pid != current->tgid)
1900 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001901 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001902 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001903 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001904 if (!open_file->invalidHandle) {
1905 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001906 cifsFileInfo_get(open_file);
1907 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001908 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001909 } else {
1910 if (!inv_file)
1911 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001912 }
Steve French6148a742005-10-05 12:23:19 -07001913 }
1914 }
Jeff Layton2846d382008-09-22 21:33:33 -04001915 /* couldn't find useable FH with same pid, try any available */
1916 if (!any_available) {
1917 any_available = true;
1918 goto refind_writable;
1919 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001920
1921 if (inv_file) {
1922 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001923 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001924 }
1925
Steve French3afca262016-09-22 18:58:16 -05001926 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001927
1928 if (inv_file) {
1929 rc = cifs_reopen_file(inv_file, false);
1930 if (!rc)
1931 return inv_file;
1932 else {
Steve French3afca262016-09-22 18:58:16 -05001933 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001934 list_move_tail(&inv_file->flist,
1935 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001936 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001937 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001938 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001939 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001940 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001941 goto refind_writable;
1942 }
1943 }
1944
Steve French6148a742005-10-05 12:23:19 -07001945 return NULL;
1946}
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1949{
1950 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001951 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 char *write_data;
1953 int rc = -EFAULT;
1954 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001956 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
1958 if (!mapping || !mapping->host)
1959 return -EFAULT;
1960
1961 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
1963 offset += (loff_t)from;
1964 write_data = kmap(page);
1965 write_data += from;
1966
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001967 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 kunmap(page);
1969 return -EIO;
1970 }
1971
1972 /* racing with truncate? */
1973 if (offset > mapping->host->i_size) {
1974 kunmap(page);
1975 return 0; /* don't care */
1976 }
1977
1978 /* check to make sure that we are not extending the file */
1979 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001980 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Jeff Layton6508d902010-09-29 19:51:11 -04001982 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001983 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001984 bytes_written = cifs_write(open_file, open_file->pid,
1985 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001986 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001988 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001989 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001990 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001991 else if (bytes_written < 0)
1992 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001993 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001994 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 rc = -EIO;
1996 }
1997
1998 kunmap(page);
1999 return rc;
2000}
2001
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002002static struct cifs_writedata *
2003wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2004 pgoff_t end, pgoff_t *index,
2005 unsigned int *found_pages)
2006{
2007 unsigned int nr_pages;
2008 struct page **pages;
2009 struct cifs_writedata *wdata;
2010
2011 wdata = cifs_writedata_alloc((unsigned int)tofind,
2012 cifs_writev_complete);
2013 if (!wdata)
2014 return NULL;
2015
2016 /*
2017 * find_get_pages_tag seems to return a max of 256 on each
2018 * iteration, so we must call it several times in order to
2019 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03002020 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002021 */
2022 *found_pages = 0;
2023 pages = wdata->pages;
2024 do {
2025 nr_pages = find_get_pages_tag(mapping, index,
2026 PAGECACHE_TAG_DIRTY, tofind,
2027 pages);
2028 *found_pages += nr_pages;
2029 tofind -= nr_pages;
2030 pages += nr_pages;
2031 } while (nr_pages && tofind && *index <= end);
2032
2033 return wdata;
2034}
2035
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002036static unsigned int
2037wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2038 struct address_space *mapping,
2039 struct writeback_control *wbc,
2040 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2041{
2042 unsigned int nr_pages = 0, i;
2043 struct page *page;
2044
2045 for (i = 0; i < found_pages; i++) {
2046 page = wdata->pages[i];
2047 /*
2048 * At this point we hold neither mapping->tree_lock nor
2049 * lock on the page itself: the page may be truncated or
2050 * invalidated (changing page->mapping to NULL), or even
2051 * swizzled back from swapper_space to tmpfs file
2052 * mapping
2053 */
2054
2055 if (nr_pages == 0)
2056 lock_page(page);
2057 else if (!trylock_page(page))
2058 break;
2059
2060 if (unlikely(page->mapping != mapping)) {
2061 unlock_page(page);
2062 break;
2063 }
2064
2065 if (!wbc->range_cyclic && page->index > end) {
2066 *done = true;
2067 unlock_page(page);
2068 break;
2069 }
2070
2071 if (*next && (page->index != *next)) {
2072 /* Not next consecutive page */
2073 unlock_page(page);
2074 break;
2075 }
2076
2077 if (wbc->sync_mode != WB_SYNC_NONE)
2078 wait_on_page_writeback(page);
2079
2080 if (PageWriteback(page) ||
2081 !clear_page_dirty_for_io(page)) {
2082 unlock_page(page);
2083 break;
2084 }
2085
2086 /*
2087 * This actually clears the dirty bit in the radix tree.
2088 * See cifs_writepage() for more commentary.
2089 */
2090 set_page_writeback(page);
2091 if (page_offset(page) >= i_size_read(mapping->host)) {
2092 *done = true;
2093 unlock_page(page);
2094 end_page_writeback(page);
2095 break;
2096 }
2097
2098 wdata->pages[i] = page;
2099 *next = page->index + 1;
2100 ++nr_pages;
2101 }
2102
2103 /* reset index to refind any pages skipped */
2104 if (nr_pages == 0)
2105 *index = wdata->pages[0]->index + 1;
2106
2107 /* put any pages we aren't going to use */
2108 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002109 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002110 wdata->pages[i] = NULL;
2111 }
2112
2113 return nr_pages;
2114}
2115
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002116static int
2117wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2118 struct address_space *mapping, struct writeback_control *wbc)
2119{
2120 int rc = 0;
2121 struct TCP_Server_Info *server;
2122 unsigned int i;
2123
2124 wdata->sync_mode = wbc->sync_mode;
2125 wdata->nr_pages = nr_pages;
2126 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002127 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002128 wdata->tailsz = min(i_size_read(mapping->host) -
2129 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002130 (loff_t)PAGE_SIZE);
2131 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002132
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002133 if (wdata->cfile != NULL)
2134 cifsFileInfo_put(wdata->cfile);
2135 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2136 if (!wdata->cfile) {
2137 cifs_dbg(VFS, "No writable handles for inode\n");
2138 rc = -EBADF;
2139 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002140 wdata->pid = wdata->cfile->pid;
2141 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2142 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002143 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002144
2145 for (i = 0; i < nr_pages; ++i)
2146 unlock_page(wdata->pages[i]);
2147
2148 return rc;
2149}
2150
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002152 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002154 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002155 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002156 bool done = false, scanned = false, range_whole = false;
2157 pgoff_t end, index;
2158 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002159 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002160
Steve French37c0eb42005-10-05 14:50:29 -07002161 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002162 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002163 * one page at a time via cifs_writepage
2164 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002165 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002166 return generic_writepages(mapping, wbc);
2167
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002168 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002169 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002170 end = -1;
2171 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002172 index = wbc->range_start >> PAGE_SHIFT;
2173 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002174 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002175 range_whole = true;
2176 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002177 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002178 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002179retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002180 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002181 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002182 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002183
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002184 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2185 &wsize, &credits);
2186 if (rc)
2187 break;
Steve French37c0eb42005-10-05 14:50:29 -07002188
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002189 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002190
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002191 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2192 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002193 if (!wdata) {
2194 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002195 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002196 break;
2197 }
2198
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002199 if (found_pages == 0) {
2200 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002201 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002202 break;
2203 }
2204
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002205 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2206 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002207
2208 /* nothing to write? */
2209 if (nr_pages == 0) {
2210 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002211 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002212 continue;
2213 }
2214
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002215 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002216
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002217 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002218
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002219 /* send failure -- clean up the mess */
2220 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002221 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002222 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002223 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002224 redirty_page_for_writepage(wbc,
2225 wdata->pages[i]);
2226 else
2227 SetPageError(wdata->pages[i]);
2228 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002229 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002230 }
Jeff Layton941b8532011-01-11 07:24:01 -05002231 if (rc != -EAGAIN)
2232 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002233 }
2234 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002235
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002236 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2237 index = saved_index;
2238 continue;
2239 }
2240
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002241 wbc->nr_to_write -= nr_pages;
2242 if (wbc->nr_to_write <= 0)
2243 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002244
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002245 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002246 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002247
Steve French37c0eb42005-10-05 14:50:29 -07002248 if (!scanned && !done) {
2249 /*
2250 * We hit the last page and there is more work to be done: wrap
2251 * back to the start of the file
2252 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002253 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002254 index = 0;
2255 goto retry;
2256 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002257
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002258 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002259 mapping->writeback_index = index;
2260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 return rc;
2262}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002264static int
2265cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002267 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002268 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002270 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002272 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002273 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002274 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002275
2276 /*
2277 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2278 *
2279 * A writepage() implementation always needs to do either this,
2280 * or re-dirty the page with "redirty_page_for_writepage()" in
2281 * the case of a failure.
2282 *
2283 * Just unlocking the page will cause the radix tree tag-bits
2284 * to fail to update with the state of the page correctly.
2285 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002286 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002287retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002288 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002289 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2290 goto retry_write;
2291 else if (rc == -EAGAIN)
2292 redirty_page_for_writepage(wbc, page);
2293 else if (rc != 0)
2294 SetPageError(page);
2295 else
2296 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002297 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002298 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002299 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 return rc;
2301}
2302
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002303static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2304{
2305 int rc = cifs_writepage_locked(page, wbc);
2306 unlock_page(page);
2307 return rc;
2308}
2309
Nick Piggind9414772008-09-24 11:32:59 -04002310static int cifs_write_end(struct file *file, struct address_space *mapping,
2311 loff_t pos, unsigned len, unsigned copied,
2312 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313{
Nick Piggind9414772008-09-24 11:32:59 -04002314 int rc;
2315 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002316 struct cifsFileInfo *cfile = file->private_data;
2317 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2318 __u32 pid;
2319
2320 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2321 pid = cfile->pid;
2322 else
2323 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
Joe Perchesf96637b2013-05-04 22:12:25 -05002325 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002326 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002327
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002328 if (PageChecked(page)) {
2329 if (copied == len)
2330 SetPageUptodate(page);
2331 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002332 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002333 SetPageUptodate(page);
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002336 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002337 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002338 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002339
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002340 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 /* this is probably better than directly calling
2342 partialpage_write since in this function the file handle is
2343 known which we might as well leverage */
2344 /* BB check if anything else missing out of ppw
2345 such as updating last write time */
2346 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002347 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002348 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002350
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002351 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002352 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002353 rc = copied;
2354 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002355 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 }
2357
Nick Piggind9414772008-09-24 11:32:59 -04002358 if (rc > 0) {
2359 spin_lock(&inode->i_lock);
2360 if (pos > inode->i_size)
2361 i_size_write(inode, pos);
2362 spin_unlock(&inode->i_lock);
2363 }
2364
2365 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002366 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 return rc;
2369}
2370
Josef Bacik02c24a82011-07-16 20:44:56 -04002371int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2372 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002374 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002376 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002377 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002378 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002379 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002380 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
Josef Bacik02c24a82011-07-16 20:44:56 -04002382 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2383 if (rc)
2384 return rc;
Al Viro59551022016-01-22 15:40:57 -05002385 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002386
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002387 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388
Al Viro35c265e2014-08-19 20:25:34 -04002389 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2390 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002391
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002392 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002393 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002394 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002395 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002396 rc = 0; /* don't care about it in fsync */
2397 }
2398 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002399
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002400 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002401 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2402 server = tcon->ses->server;
2403 if (server->ops->flush)
2404 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2405 else
2406 rc = -ENOSYS;
2407 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002408
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002409 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002410 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002411 return rc;
2412}
2413
Josef Bacik02c24a82011-07-16 20:44:56 -04002414int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002415{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002416 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002417 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002418 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002419 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002420 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002421 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002422 struct inode *inode = file->f_mapping->host;
2423
2424 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2425 if (rc)
2426 return rc;
Al Viro59551022016-01-22 15:40:57 -05002427 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002428
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002429 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002430
Al Viro35c265e2014-08-19 20:25:34 -04002431 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2432 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002433
2434 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002435 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2436 server = tcon->ses->server;
2437 if (server->ops->flush)
2438 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2439 else
2440 rc = -ENOSYS;
2441 }
Steve Frenchb298f222009-02-21 21:17:43 +00002442
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002443 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002444 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 return rc;
2446}
2447
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448/*
2449 * As file closes, flush all cached write data for this inode checking
2450 * for write behind errors.
2451 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002452int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453{
Al Viro496ad9a2013-01-23 17:07:38 -05002454 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 int rc = 0;
2456
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002457 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002458 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002459
Joe Perchesf96637b2013-05-04 22:12:25 -05002460 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
2462 return rc;
2463}
2464
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002465static int
2466cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2467{
2468 int rc = 0;
2469 unsigned long i;
2470
2471 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002472 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002473 if (!pages[i]) {
2474 /*
2475 * save number of pages we have already allocated and
2476 * return with ENOMEM error
2477 */
2478 num_pages = i;
2479 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002480 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002481 }
2482 }
2483
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002484 if (rc) {
2485 for (i = 0; i < num_pages; i++)
2486 put_page(pages[i]);
2487 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002488 return rc;
2489}
2490
2491static inline
2492size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2493{
2494 size_t num_pages;
2495 size_t clen;
2496
2497 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002498 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002499
2500 if (cur_len)
2501 *cur_len = clen;
2502
2503 return num_pages;
2504}
2505
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002506static void
Steve French4a5c80d2014-02-07 20:45:12 -06002507cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002508{
2509 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002510 struct cifs_writedata *wdata = container_of(refcount,
2511 struct cifs_writedata, refcount);
2512
2513 for (i = 0; i < wdata->nr_pages; i++)
2514 put_page(wdata->pages[i]);
2515 cifs_writedata_release(refcount);
2516}
2517
2518static void
2519cifs_uncached_writev_complete(struct work_struct *work)
2520{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002521 struct cifs_writedata *wdata = container_of(work,
2522 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002523 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002524 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2525
2526 spin_lock(&inode->i_lock);
2527 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2528 if (cifsi->server_eof > inode->i_size)
2529 i_size_write(inode, cifsi->server_eof);
2530 spin_unlock(&inode->i_lock);
2531
2532 complete(&wdata->done);
2533
Steve French4a5c80d2014-02-07 20:45:12 -06002534 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002535}
2536
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002537static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002538wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2539 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002540{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002541 size_t save_len, copied, bytes, cur_len = *len;
2542 unsigned long i, nr_pages = *num_pages;
2543
2544 save_len = cur_len;
2545 for (i = 0; i < nr_pages; i++) {
2546 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2547 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2548 cur_len -= copied;
2549 /*
2550 * If we didn't copy as much as we expected, then that
2551 * may mean we trod into an unmapped area. Stop copying
2552 * at that point. On the next pass through the big
2553 * loop, we'll likely end up getting a zero-length
2554 * write and bailing out of it.
2555 */
2556 if (copied < bytes)
2557 break;
2558 }
2559 cur_len = save_len - cur_len;
2560 *len = cur_len;
2561
2562 /*
2563 * If we have no data to send, then that probably means that
2564 * the copy above failed altogether. That's most likely because
2565 * the address in the iovec was bogus. Return -EFAULT and let
2566 * the caller free anything we allocated and bail out.
2567 */
2568 if (!cur_len)
2569 return -EFAULT;
2570
2571 /*
2572 * i + 1 now represents the number of pages we actually used in
2573 * the copy phase above.
2574 */
2575 *num_pages = i + 1;
2576 return 0;
2577}
2578
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002579static int
2580cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2581 struct cifsFileInfo *open_file,
2582 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002583{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002584 int rc = 0;
2585 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002586 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002587 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002588 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002589 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002590 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002591 struct TCP_Server_Info *server;
2592
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002593 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2594 pid = open_file->pid;
2595 else
2596 pid = current->tgid;
2597
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002598 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002599
2600 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002601 unsigned int wsize, credits;
2602
2603 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2604 &wsize, &credits);
2605 if (rc)
2606 break;
2607
2608 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002609 wdata = cifs_writedata_alloc(nr_pages,
2610 cifs_uncached_writev_complete);
2611 if (!wdata) {
2612 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002613 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002614 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002615 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002616
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002617 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2618 if (rc) {
2619 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002620 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002621 break;
2622 }
2623
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002624 num_pages = nr_pages;
2625 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2626 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002627 for (i = 0; i < nr_pages; i++)
2628 put_page(wdata->pages[i]);
2629 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002630 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002631 break;
2632 }
2633
2634 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002635 * Bring nr_pages down to the number of pages we actually used,
2636 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002637 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002638 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002639 put_page(wdata->pages[nr_pages - 1]);
2640
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002641 wdata->sync_mode = WB_SYNC_ALL;
2642 wdata->nr_pages = nr_pages;
2643 wdata->offset = (__u64)offset;
2644 wdata->cfile = cifsFileInfo_get(open_file);
2645 wdata->pid = pid;
2646 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002647 wdata->pagesz = PAGE_SIZE;
2648 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002649 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002650
2651 if (!wdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01002652 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002653 rc = server->ops->async_writev(wdata,
2654 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002655 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002656 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002657 kref_put(&wdata->refcount,
2658 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002659 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002660 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002661 iov_iter_advance(from, offset - saved_offset);
2662 continue;
2663 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002664 break;
2665 }
2666
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002667 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002668 offset += cur_len;
2669 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002670 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002671
2672 return rc;
2673}
2674
Al Viroe9d15932015-04-06 22:44:11 -04002675ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002676{
Al Viroe9d15932015-04-06 22:44:11 -04002677 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002678 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002679 struct cifsFileInfo *open_file;
2680 struct cifs_tcon *tcon;
2681 struct cifs_sb_info *cifs_sb;
2682 struct cifs_writedata *wdata, *tmp;
2683 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002684 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002685 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002686
Al Viroe9d15932015-04-06 22:44:11 -04002687 /*
2688 * BB - optimize the way when signing is disabled. We can drop this
2689 * extra memory-to-memory copying and use iovec buffers for constructing
2690 * write request.
2691 */
2692
Al Viro3309dd02015-04-09 12:55:47 -04002693 rc = generic_write_checks(iocb, from);
2694 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002695 return rc;
2696
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002697 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002698 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002699 open_file = file->private_data;
2700 tcon = tlink_tcon(open_file->tlink);
2701
2702 if (!tcon->ses->server->ops->async_writev)
2703 return -ENOSYS;
2704
Al Viro3309dd02015-04-09 12:55:47 -04002705 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2706 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002707
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002708 /*
2709 * If at least one write was successfully sent, then discard any rc
2710 * value from the later writes. If the other write succeeds, then
2711 * we'll end up returning whatever was written. If it fails, then
2712 * we'll get a new rc value from that.
2713 */
2714 if (!list_empty(&wdata_list))
2715 rc = 0;
2716
2717 /*
2718 * Wait for and collect replies for any successful sends in order of
2719 * increasing offset. Once an error is hit or we get a fatal signal
2720 * while waiting, then return without waiting for any more replies.
2721 */
2722restart_loop:
2723 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2724 if (!rc) {
2725 /* FIXME: freezable too? */
2726 rc = wait_for_completion_killable(&wdata->done);
2727 if (rc)
2728 rc = -EINTR;
2729 else if (wdata->result)
2730 rc = wdata->result;
2731 else
2732 total_written += wdata->bytes;
2733
2734 /* resend call if it's a retryable error */
2735 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002736 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002737 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002738
2739 INIT_LIST_HEAD(&tmp_list);
2740 list_del_init(&wdata->list);
2741
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002742 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002743 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002744
2745 rc = cifs_write_from_iter(wdata->offset,
2746 wdata->bytes, &tmp_from,
2747 open_file, cifs_sb, &tmp_list);
2748
2749 list_splice(&tmp_list, &wdata_list);
2750
2751 kref_put(&wdata->refcount,
2752 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002753 goto restart_loop;
2754 }
2755 }
2756 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002757 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002758 }
2759
Al Viroe9d15932015-04-06 22:44:11 -04002760 if (unlikely(!total_written))
2761 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002762
Al Viroe9d15932015-04-06 22:44:11 -04002763 iocb->ki_pos += total_written;
2764 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002765 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002766 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002767}
2768
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002769static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002770cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002771{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002772 struct file *file = iocb->ki_filp;
2773 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2774 struct inode *inode = file->f_mapping->host;
2775 struct cifsInodeInfo *cinode = CIFS_I(inode);
2776 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002777 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002778
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002779 /*
2780 * We need to hold the sem to be sure nobody modifies lock list
2781 * with a brlock that prevents writing.
2782 */
2783 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002784 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002785
Al Viro3309dd02015-04-09 12:55:47 -04002786 rc = generic_write_checks(iocb, from);
2787 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002788 goto out;
2789
Al Viro5f380c72015-04-07 11:28:12 -04002790 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002791 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002792 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002793 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002794 else
2795 rc = -EACCES;
2796out:
Al Viro59551022016-01-22 15:40:57 -05002797 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002798
Christoph Hellwige2592212016-04-07 08:52:01 -07002799 if (rc > 0)
2800 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002801 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002802 return rc;
2803}
2804
2805ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002806cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002807{
Al Viro496ad9a2013-01-23 17:07:38 -05002808 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002809 struct cifsInodeInfo *cinode = CIFS_I(inode);
2810 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2811 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2812 iocb->ki_filp->private_data;
2813 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002814 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002815
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002816 written = cifs_get_writer(cinode);
2817 if (written)
2818 return written;
2819
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002820 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002821 if (cap_unix(tcon->ses) &&
2822 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002823 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002824 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002825 goto out;
2826 }
Al Viro3dae8752014-04-03 12:05:17 -04002827 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002828 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002829 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002830 /*
2831 * For non-oplocked files in strict cache mode we need to write the data
2832 * to the server exactly from the pos to pos+len-1 rather than flush all
2833 * affected pages because it may cause a error with mandatory locks on
2834 * these pages but not on the region from pos to ppos+len-1.
2835 */
Al Viro3dae8752014-04-03 12:05:17 -04002836 written = cifs_user_writev(iocb, from);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002837 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002838 /*
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002839 * We have read level caching and we have just sent a write
2840 * request to the server thus making data in the cache stale.
2841 * Zap the cache and set oplock/lease level to NONE to avoid
2842 * reading stale data from the cache. All subsequent read
2843 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002844 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002845 cifs_zap_mapping(inode);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002846 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05002847 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002848 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002849 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002850out:
2851 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002852 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002853}
2854
Jeff Layton0471ca32012-05-16 07:13:16 -04002855static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002856cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002857{
2858 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002859
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002860 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2861 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002862 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002863 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002864 INIT_LIST_HEAD(&rdata->list);
2865 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002866 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002867 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002868
Jeff Layton0471ca32012-05-16 07:13:16 -04002869 return rdata;
2870}
2871
Jeff Layton6993f742012-05-16 07:13:17 -04002872void
2873cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002874{
Jeff Layton6993f742012-05-16 07:13:17 -04002875 struct cifs_readdata *rdata = container_of(refcount,
2876 struct cifs_readdata, refcount);
2877
2878 if (rdata->cfile)
2879 cifsFileInfo_put(rdata->cfile);
2880
Jeff Layton0471ca32012-05-16 07:13:16 -04002881 kfree(rdata);
2882}
2883
Jeff Layton2a1bb132012-05-16 07:13:17 -04002884static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002885cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002886{
2887 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002888 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002889 unsigned int i;
2890
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002891 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002892 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2893 if (!page) {
2894 rc = -ENOMEM;
2895 break;
2896 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002897 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002898 }
2899
2900 if (rc) {
Roberto Bergantinos Corpasdf2b6af2019-05-28 09:38:14 +02002901 unsigned int nr_page_failed = i;
2902
2903 for (i = 0; i < nr_page_failed; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002904 put_page(rdata->pages[i]);
2905 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002906 }
2907 }
2908 return rc;
2909}
2910
2911static void
2912cifs_uncached_readdata_release(struct kref *refcount)
2913{
Jeff Layton1c892542012-05-16 07:13:17 -04002914 struct cifs_readdata *rdata = container_of(refcount,
2915 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002916 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002917
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002918 for (i = 0; i < rdata->nr_pages; i++) {
2919 put_page(rdata->pages[i]);
2920 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002921 }
2922 cifs_readdata_release(refcount);
2923}
2924
Jeff Layton1c892542012-05-16 07:13:17 -04002925/**
2926 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2927 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002928 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002929 *
2930 * This function copies data from a list of pages in a readdata response into
2931 * an array of iovecs. It will first calculate where the data should go
2932 * based on the info in the readdata and then copy the data into that spot.
2933 */
Al Viro7f25bba2014-02-04 14:07:43 -05002934static int
2935cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002936{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002937 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002938 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002939
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002940 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002941 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002942 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovskyc06d74d2017-01-19 13:53:15 -08002943 size_t written;
2944
2945 if (unlikely(iter->type & ITER_PIPE)) {
2946 void *addr = kmap_atomic(page);
2947
2948 written = copy_to_iter(addr, copy, iter);
2949 kunmap_atomic(addr);
2950 } else
2951 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05002952 remaining -= written;
2953 if (written < copy && iov_iter_count(iter) > 0)
2954 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002955 }
Al Viro7f25bba2014-02-04 14:07:43 -05002956 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002957}
2958
2959static void
2960cifs_uncached_readv_complete(struct work_struct *work)
2961{
2962 struct cifs_readdata *rdata = container_of(work,
2963 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002964
2965 complete(&rdata->done);
2966 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2967}
2968
2969static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002970cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2971 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002972{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002973 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002974 unsigned int i;
2975 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002976
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002977 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002978 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002979 for (i = 0; i < nr_pages; i++) {
2980 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002981 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002982
Al Viro71335662016-01-09 19:54:50 -05002983 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002984 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002985 rdata->pages[i] = NULL;
2986 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002987 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002988 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002989 }
Al Viro71335662016-01-09 19:54:50 -05002990 n = len;
2991 if (len >= PAGE_SIZE) {
2992 /* enough data to fill the page */
2993 n = PAGE_SIZE;
2994 len -= n;
2995 } else {
2996 zero_user(page, len, PAGE_SIZE - len);
2997 rdata->tailsz = len;
2998 len = 0;
2999 }
3000 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003001 if (result < 0)
3002 break;
3003
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003004 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04003005 }
3006
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003007 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3008 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04003009}
3010
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003011static int
3012cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3013 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003015 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003016 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003017 size_t cur_len;
3018 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04003019 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003020 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003021
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003022 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07003023
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003024 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3025 pid = open_file->pid;
3026 else
3027 pid = current->tgid;
3028
Jeff Layton1c892542012-05-16 07:13:17 -04003029 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003030 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3031 &rsize, &credits);
3032 if (rc)
3033 break;
3034
3035 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04003036 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003037
Jeff Layton1c892542012-05-16 07:13:17 -04003038 /* allocate a readdata struct */
3039 rdata = cifs_readdata_alloc(npages,
3040 cifs_uncached_readv_complete);
3041 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003042 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003043 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003044 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003046
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003047 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003048 if (rc)
3049 goto error;
3050
3051 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003052 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003053 rdata->offset = offset;
3054 rdata->bytes = cur_len;
3055 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003056 rdata->pagesz = PAGE_SIZE;
3057 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003058 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04003059
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003060 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003061 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003062 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003063error:
3064 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003065 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003066 kref_put(&rdata->refcount,
3067 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003068 if (rc == -EAGAIN)
3069 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003070 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 }
Jeff Layton1c892542012-05-16 07:13:17 -04003072
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003073 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003074 offset += cur_len;
3075 len -= cur_len;
3076 } while (len > 0);
3077
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003078 return rc;
3079}
3080
3081ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3082{
3083 struct file *file = iocb->ki_filp;
3084 ssize_t rc;
3085 size_t len;
3086 ssize_t total_read = 0;
3087 loff_t offset = iocb->ki_pos;
3088 struct cifs_sb_info *cifs_sb;
3089 struct cifs_tcon *tcon;
3090 struct cifsFileInfo *open_file;
3091 struct cifs_readdata *rdata, *tmp;
3092 struct list_head rdata_list;
3093
3094 len = iov_iter_count(to);
3095 if (!len)
3096 return 0;
3097
3098 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003099 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003100 open_file = file->private_data;
3101 tcon = tlink_tcon(open_file->tlink);
3102
3103 if (!tcon->ses->server->ops->async_readv)
3104 return -ENOSYS;
3105
3106 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3107 cifs_dbg(FYI, "attempting read on write only file instance\n");
3108
3109 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3110
Jeff Layton1c892542012-05-16 07:13:17 -04003111 /* if at least one read request send succeeded, then reset rc */
3112 if (!list_empty(&rdata_list))
3113 rc = 0;
3114
Al Viroe6a7bcb2014-04-02 19:53:36 -04003115 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003116 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003117again:
Jeff Layton1c892542012-05-16 07:13:17 -04003118 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3119 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003120 /* FIXME: freezable sleep too? */
3121 rc = wait_for_completion_killable(&rdata->done);
3122 if (rc)
3123 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003124 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003125 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003126 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003127 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003128
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003129 list_del_init(&rdata->list);
3130 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003131
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003132 /*
3133 * Got a part of data and then reconnect has
3134 * happened -- fill the buffer and continue
3135 * reading.
3136 */
3137 if (got_bytes && got_bytes < rdata->bytes) {
3138 rc = cifs_readdata_to_iov(rdata, to);
3139 if (rc) {
3140 kref_put(&rdata->refcount,
3141 cifs_uncached_readdata_release);
3142 continue;
3143 }
3144 }
3145
3146 rc = cifs_send_async_read(
3147 rdata->offset + got_bytes,
3148 rdata->bytes - got_bytes,
3149 rdata->cfile, cifs_sb,
3150 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003151
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003152 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003153
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003154 kref_put(&rdata->refcount,
3155 cifs_uncached_readdata_release);
3156 goto again;
3157 } else if (rdata->result)
3158 rc = rdata->result;
3159 else
Jeff Layton1c892542012-05-16 07:13:17 -04003160 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003161
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003162 /* if there was a short read -- discard anything left */
3163 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3164 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003165 }
3166 list_del_init(&rdata->list);
3167 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003169
Al Viroe6a7bcb2014-04-02 19:53:36 -04003170 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003171
Jeff Layton1c892542012-05-16 07:13:17 -04003172 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003173
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003174 /* mask nodata case */
3175 if (rc == -ENODATA)
3176 rc = 0;
3177
Al Viro0165e812014-02-04 14:19:48 -05003178 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003179 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003180 return total_read;
3181 }
3182 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003183}
3184
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003185ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003186cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003187{
Al Viro496ad9a2013-01-23 17:07:38 -05003188 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003189 struct cifsInodeInfo *cinode = CIFS_I(inode);
3190 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3191 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3192 iocb->ki_filp->private_data;
3193 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3194 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003195
3196 /*
3197 * In strict cache mode we need to read from the server all the time
3198 * if we don't have level II oplock because the server can delay mtime
3199 * change - so we can't make a decision about inode invalidating.
3200 * And we can also fail with pagereading if there are mandatory locks
3201 * on pages affected by this read but not on the region from pos to
3202 * pos+len-1.
3203 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003204 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003205 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003206
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003207 if (cap_unix(tcon->ses) &&
3208 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3209 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003210 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003211
3212 /*
3213 * We need to hold the sem to be sure nobody modifies lock list
3214 * with a brlock that prevents reading.
3215 */
3216 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003217 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003218 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003219 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003220 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003221 up_read(&cinode->lock_sem);
3222 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003223}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003225static ssize_t
3226cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227{
3228 int rc = -EACCES;
3229 unsigned int bytes_read = 0;
3230 unsigned int total_read;
3231 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003232 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003234 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003235 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003236 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003237 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003239 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003240 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003241 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003243 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003244 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003246 /* FIXME: set up handlers for larger reads and/or convert to async */
3247 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3248
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303250 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003251 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303252 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003254 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003255 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003256 server = tcon->ses->server;
3257
3258 if (!server->ops->sync_read) {
3259 free_xid(xid);
3260 return -ENOSYS;
3261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003263 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3264 pid = open_file->pid;
3265 else
3266 pid = current->tgid;
3267
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003269 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003271 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3272 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003273 do {
3274 current_read_size = min_t(uint, read_size - total_read,
3275 rsize);
3276 /*
3277 * For windows me and 9x we do not want to request more
3278 * than it negotiated since it will refuse the read
3279 * then.
3280 */
3281 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003282 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003283 current_read_size = min_t(uint,
3284 current_read_size, CIFSMaxBufSize);
3285 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003286 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003287 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 if (rc != 0)
3289 break;
3290 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003291 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003292 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003293 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003294 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003295 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003296 &bytes_read, &cur_offset,
3297 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003298 } while (rc == -EAGAIN);
3299
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 if (rc || (bytes_read == 0)) {
3301 if (total_read) {
3302 break;
3303 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003304 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 return rc;
3306 }
3307 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003308 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003309 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 }
3311 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003312 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 return total_read;
3314}
3315
Jeff Laytonca83ce32011-04-12 09:13:44 -04003316/*
3317 * If the page is mmap'ed into a process' page tables, then we need to make
3318 * sure that it doesn't change while being written back.
3319 */
3320static int
3321cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3322{
3323 struct page *page = vmf->page;
3324
3325 lock_page(page);
3326 return VM_FAULT_LOCKED;
3327}
3328
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003329static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003330 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003331 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003332 .page_mkwrite = cifs_page_mkwrite,
3333};
3334
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003335int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3336{
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003337 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003338 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003339
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003340 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003341
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003342 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003343 rc = cifs_zap_mapping(inode);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003344 if (!rc)
3345 rc = generic_file_mmap(file, vma);
3346 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003347 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003348
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003349 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003350 return rc;
3351}
3352
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3354{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 int rc, xid;
3356
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003357 xid = get_xid();
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003358
Jeff Laytonabab0952010-02-12 07:44:18 -05003359 rc = cifs_revalidate_file(file);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003360 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003361 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3362 rc);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003363 if (!rc)
3364 rc = generic_file_mmap(file, vma);
3365 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003366 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003367
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003368 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 return rc;
3370}
3371
Jeff Layton0471ca32012-05-16 07:13:16 -04003372static void
3373cifs_readv_complete(struct work_struct *work)
3374{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003375 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003376 struct cifs_readdata *rdata = container_of(work,
3377 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003378
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003379 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003380 for (i = 0; i < rdata->nr_pages; i++) {
3381 struct page *page = rdata->pages[i];
3382
Jeff Layton0471ca32012-05-16 07:13:16 -04003383 lru_cache_add_file(page);
3384
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003385 if (rdata->result == 0 ||
3386 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003387 flush_dcache_page(page);
3388 SetPageUptodate(page);
3389 }
3390
3391 unlock_page(page);
3392
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003393 if (rdata->result == 0 ||
3394 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003395 cifs_readpage_to_fscache(rdata->mapping->host, page);
3396
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003397 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003398
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003399 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003400 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003401 }
Jeff Layton6993f742012-05-16 07:13:17 -04003402 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003403}
3404
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003405static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003406cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3407 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003408{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003409 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003410 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003411 u64 eof;
3412 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003413 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003414
3415 /* determine the eof that the server (probably) has */
3416 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003417 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003418 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003419
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003420 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003421 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003422 for (i = 0; i < nr_pages; i++) {
3423 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003424 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003425
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003426 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003427 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003428 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003429 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003430 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003431 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003432 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003433 } else if (page->index > eof_index) {
3434 /*
3435 * The VFS will not try to do readahead past the
3436 * i_size, but it's possible that we have outstanding
3437 * writes with gaps in the middle and the i_size hasn't
3438 * caught up yet. Populate those with zeroed out pages
3439 * to prevent the VFS from repeatedly attempting to
3440 * fill them until the writes are flushed.
3441 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003442 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003443 lru_cache_add_file(page);
3444 flush_dcache_page(page);
3445 SetPageUptodate(page);
3446 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003447 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003448 rdata->pages[i] = NULL;
3449 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003450 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003451 } else {
3452 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003453 lru_cache_add_file(page);
3454 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003455 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003456 rdata->pages[i] = NULL;
3457 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003458 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003459 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003460
Al Viro71335662016-01-09 19:54:50 -05003461 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003462 if (result < 0)
3463 break;
3464
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003465 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003466 }
3467
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003468 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3469 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003470}
3471
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003472static int
3473readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3474 unsigned int rsize, struct list_head *tmplist,
3475 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3476{
3477 struct page *page, *tpage;
3478 unsigned int expected_index;
3479 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003480 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003481
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003482 INIT_LIST_HEAD(tmplist);
3483
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003484 page = list_entry(page_list->prev, struct page, lru);
3485
3486 /*
3487 * Lock the page and put it in the cache. Since no one else
3488 * should have access to this page, we're safe to simply set
3489 * PG_locked without checking it first.
3490 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003491 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003492 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003493 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003494
3495 /* give up if we can't stick it in the cache */
3496 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003497 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003498 return rc;
3499 }
3500
3501 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003502 *offset = (loff_t)page->index << PAGE_SHIFT;
3503 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003504 *nr_pages = 1;
3505 list_move_tail(&page->lru, tmplist);
3506
3507 /* now try and add more pages onto the request */
3508 expected_index = page->index + 1;
3509 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3510 /* discontinuity ? */
3511 if (page->index != expected_index)
3512 break;
3513
3514 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003515 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003516 break;
3517
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003518 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003519 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003520 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003521 break;
3522 }
3523 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003524 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003525 expected_index++;
3526 (*nr_pages)++;
3527 }
3528 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529}
3530
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531static int cifs_readpages(struct file *file, struct address_space *mapping,
3532 struct list_head *page_list, unsigned num_pages)
3533{
Jeff Layton690c5e32011-10-19 15:30:16 -04003534 int rc;
3535 struct list_head tmplist;
3536 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003537 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003538 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003539 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
Jeff Layton690c5e32011-10-19 15:30:16 -04003541 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303542 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3543 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003544 *
3545 * After this point, every page in the list might have PG_fscache set,
3546 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303547 */
3548 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3549 &num_pages);
3550 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003551 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303552
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003553 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3554 pid = open_file->pid;
3555 else
3556 pid = current->tgid;
3557
Jeff Layton690c5e32011-10-19 15:30:16 -04003558 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003559 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560
Joe Perchesf96637b2013-05-04 22:12:25 -05003561 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3562 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003563
3564 /*
3565 * Start with the page at end of list and move it to private
3566 * list. Do the same with any following pages until we hit
3567 * the rsize limit, hit an index discontinuity, or run out of
3568 * pages. Issue the async read and then start the loop again
3569 * until the list is empty.
3570 *
3571 * Note that list order is important. The page_list is in
3572 * the order of declining indexes. When we put the pages in
3573 * the rdata->pages, then we want them in increasing order.
3574 */
3575 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003576 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003577 loff_t offset;
3578 struct page *page, *tpage;
3579 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003580 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003582 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3583 &rsize, &credits);
3584 if (rc)
3585 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586
Jeff Layton690c5e32011-10-19 15:30:16 -04003587 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003588 * Give up immediately if rsize is too small to read an entire
3589 * page. The VFS will fall back to readpage. We should never
3590 * reach this point however since we set ra_pages to 0 when the
3591 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003592 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003593 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003594 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003595 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003598 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3599 &nr_pages, &offset, &bytes);
3600 if (rc) {
3601 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003603 }
3604
Jeff Layton0471ca32012-05-16 07:13:16 -04003605 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003606 if (!rdata) {
3607 /* best to give up if we're out of mem */
3608 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3609 list_del(&page->lru);
3610 lru_cache_add_file(page);
3611 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003612 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003613 }
3614 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003615 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003616 break;
3617 }
3618
Jeff Layton6993f742012-05-16 07:13:17 -04003619 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003620 rdata->mapping = mapping;
3621 rdata->offset = offset;
3622 rdata->bytes = bytes;
3623 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003624 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003625 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003626 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003627
3628 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3629 list_del(&page->lru);
3630 rdata->pages[rdata->nr_pages++] = page;
3631 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003632
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003633 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003634 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003635 rc = server->ops->async_readv(rdata);
3636 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003637 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003638 for (i = 0; i < rdata->nr_pages; i++) {
3639 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003640 lru_cache_add_file(page);
3641 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003642 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003644 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003645 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003646 break;
3647 }
Jeff Layton6993f742012-05-16 07:13:17 -04003648
3649 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 }
3651
David Howells54afa992013-09-04 17:10:39 +00003652 /* Any pages that have been shown to fscache but didn't get added to
3653 * the pagecache must be uncached before they get returned to the
3654 * allocator.
3655 */
3656 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 return rc;
3658}
3659
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003660/*
3661 * cifs_readpage_worker must be called with the page pinned
3662 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663static int cifs_readpage_worker(struct file *file, struct page *page,
3664 loff_t *poffset)
3665{
3666 char *read_data;
3667 int rc;
3668
Suresh Jayaraman56698232010-07-05 18:13:25 +05303669 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003670 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303671 if (rc == 0)
3672 goto read_complete;
3673
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 read_data = kmap(page);
3675 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003676
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003677 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003678
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 if (rc < 0)
3680 goto io_error;
3681 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003682 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003683
Al Viro496ad9a2013-01-23 17:07:38 -05003684 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003685 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003686
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003687 if (PAGE_SIZE > rc)
3688 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689
3690 flush_dcache_page(page);
3691 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303692
3693 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003694 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303695
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003697
Linus Torvalds1da177e2005-04-16 15:20:36 -07003698io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003699 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003700 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303701
3702read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703 return rc;
3704}
3705
3706static int cifs_readpage(struct file *file, struct page *page)
3707{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003708 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003709 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003710 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003712 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003713
3714 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303715 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003716 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303717 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 }
3719
Joe Perchesf96637b2013-05-04 22:12:25 -05003720 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003721 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722
3723 rc = cifs_readpage_worker(file, page, &offset);
3724
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003725 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726 return rc;
3727}
3728
Steve Frencha403a0a2007-07-26 15:54:16 +00003729static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3730{
3731 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003732 struct cifs_tcon *tcon =
3733 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003734
Steve French3afca262016-09-22 18:58:16 -05003735 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003736 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003737 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003738 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003739 return 1;
3740 }
3741 }
Steve French3afca262016-09-22 18:58:16 -05003742 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003743 return 0;
3744}
3745
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746/* We do not want to update the file size from server for inodes
3747 open for write - to avoid races with writepage extending
3748 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003749 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 but this is tricky to do without racing with writebehind
3751 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003752bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753{
Steve Frencha403a0a2007-07-26 15:54:16 +00003754 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003755 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003756
Steve Frencha403a0a2007-07-26 15:54:16 +00003757 if (is_inode_writable(cifsInode)) {
3758 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003759 struct cifs_sb_info *cifs_sb;
3760
Steve Frenchc32a0b62006-01-12 14:41:28 -08003761 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003762 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003763 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003764 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003765 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003766 }
3767
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003768 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003769 return true;
Steve French7ba52632007-02-08 18:14:13 +00003770
Steve French4b18f2a2008-04-29 00:06:05 +00003771 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003772 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003773 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774}
3775
Nick Piggind9414772008-09-24 11:32:59 -04003776static int cifs_write_begin(struct file *file, struct address_space *mapping,
3777 loff_t pos, unsigned len, unsigned flags,
3778 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003780 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003781 pgoff_t index = pos >> PAGE_SHIFT;
3782 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003783 loff_t page_start = pos & PAGE_MASK;
3784 loff_t i_size;
3785 struct page *page;
3786 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
Joe Perchesf96637b2013-05-04 22:12:25 -05003788 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003789
Sachin Prabhu466bd312013-09-13 14:11:57 +01003790start:
Nick Piggin54566b22009-01-04 12:00:53 -08003791 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003792 if (!page) {
3793 rc = -ENOMEM;
3794 goto out;
3795 }
Nick Piggind9414772008-09-24 11:32:59 -04003796
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003797 if (PageUptodate(page))
3798 goto out;
Steve French8a236262007-03-06 00:31:00 +00003799
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003800 /*
3801 * If we write a full page it will be up to date, no need to read from
3802 * the server. If the write is short, we'll end up doing a sync write
3803 * instead.
3804 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003805 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003806 goto out;
3807
3808 /*
3809 * optimize away the read when we have an oplock, and we're not
3810 * expecting to use any of the data we'd be reading in. That
3811 * is, when the page lies beyond the EOF, or straddles the EOF
3812 * and the write will cover all of the existing data.
3813 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003814 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003815 i_size = i_size_read(mapping->host);
3816 if (page_start >= i_size ||
3817 (offset == 0 && (pos + len) >= i_size)) {
3818 zero_user_segments(page, 0, offset,
3819 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003820 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003821 /*
3822 * PageChecked means that the parts of the page
3823 * to which we're not writing are considered up
3824 * to date. Once the data is copied to the
3825 * page, it can be set uptodate.
3826 */
3827 SetPageChecked(page);
3828 goto out;
3829 }
3830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831
Sachin Prabhu466bd312013-09-13 14:11:57 +01003832 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003833 /*
3834 * might as well read a page, it is fast enough. If we get
3835 * an error, we don't need to return it. cifs_write_end will
3836 * do a sync write instead since PG_uptodate isn't set.
3837 */
3838 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003839 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003840 oncethru = 1;
3841 goto start;
Steve French8a236262007-03-06 00:31:00 +00003842 } else {
3843 /* we could try using another file handle if there is one -
3844 but how would we lock it to prevent close of that handle
3845 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003846 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003847 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003848out:
3849 *pagep = page;
3850 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003851}
3852
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303853static int cifs_release_page(struct page *page, gfp_t gfp)
3854{
3855 if (PagePrivate(page))
3856 return 0;
3857
3858 return cifs_fscache_release_page(page, gfp);
3859}
3860
Lukas Czernerd47992f2013-05-21 23:17:23 -04003861static void cifs_invalidate_page(struct page *page, unsigned int offset,
3862 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303863{
3864 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3865
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003866 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303867 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3868}
3869
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003870static int cifs_launder_page(struct page *page)
3871{
3872 int rc = 0;
3873 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003874 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003875 struct writeback_control wbc = {
3876 .sync_mode = WB_SYNC_ALL,
3877 .nr_to_write = 0,
3878 .range_start = range_start,
3879 .range_end = range_end,
3880 };
3881
Joe Perchesf96637b2013-05-04 22:12:25 -05003882 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003883
3884 if (clear_page_dirty_for_io(page))
3885 rc = cifs_writepage_locked(page, &wbc);
3886
3887 cifs_fscache_invalidate_page(page, page->mapping->host);
3888 return rc;
3889}
3890
Tejun Heo9b646972010-07-20 22:09:02 +02003891void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003892{
3893 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3894 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003895 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003896 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003897 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003898 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003899 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003900
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003901 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003902 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003903
3904 server->ops->downgrade_oplock(server, cinode,
3905 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3906
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003907 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003908 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003909 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3910 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003911 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003912 }
3913
Jeff Layton3bc303c2009-09-21 06:47:50 -04003914 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003915 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003916 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003917 else
Al Viro8737c932009-12-24 06:47:55 -05003918 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003919 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003920 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003921 rc = filemap_fdatawait(inode->i_mapping);
3922 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003923 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003924 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003925 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003926 }
3927
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003928 rc = cifs_push_locks(cfile);
3929 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003930 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003931
Jeff Layton3bc303c2009-09-21 06:47:50 -04003932 /*
3933 * releasing stale oplock after recent reconnect of smb session using
3934 * a now incorrect file handle is not a data integrity issue but do
3935 * not bother sending an oplock release if session to server still is
3936 * disconnected since oplock already released by the server
3937 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003938 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003939 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3940 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003941 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003942 }
Aurelien Aptel1ee4f2d2019-03-29 10:49:12 +01003943 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003944 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003945}
3946
Steve Frenchdca69282013-11-11 16:42:37 -06003947/*
3948 * The presence of cifs_direct_io() in the address space ops vector
3949 * allowes open() O_DIRECT flags which would have failed otherwise.
3950 *
3951 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3952 * so this method should never be called.
3953 *
3954 * Direct IO is not yet supported in the cached mode.
3955 */
3956static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003957cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003958{
3959 /*
3960 * FIXME
3961 * Eventually need to support direct IO for non forcedirectio mounts
3962 */
3963 return -EINVAL;
3964}
3965
3966
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003967const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 .readpage = cifs_readpage,
3969 .readpages = cifs_readpages,
3970 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003971 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003972 .write_begin = cifs_write_begin,
3973 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303975 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003976 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303977 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003978 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003980
3981/*
3982 * cifs_readpages requires the server to support a buffer large enough to
3983 * contain the header plus one complete page of data. Otherwise, we need
3984 * to leave cifs_readpages out of the address space operations.
3985 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003986const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003987 .readpage = cifs_readpage,
3988 .writepage = cifs_writepage,
3989 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003990 .write_begin = cifs_write_begin,
3991 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003992 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303993 .releasepage = cifs_release_page,
3994 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003995 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003996};