blob: 1c5099fffaec06093fd2249bfb1c72a98c446375 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French18a89a12017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
255out:
256 kfree(buf);
257 return rc;
258}
259
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400260static bool
261cifs_has_mand_locks(struct cifsInodeInfo *cinode)
262{
263 struct cifs_fid_locks *cur;
264 bool has_locks = false;
265
266 down_read(&cinode->lock_sem);
267 list_for_each_entry(cur, &cinode->llist, llist) {
268 if (!list_empty(&cur->locks)) {
269 has_locks = true;
270 break;
271 }
272 }
273 up_read(&cinode->lock_sem);
274 return has_locks;
275}
276
Jeff Layton15ecb432010-10-15 15:34:02 -0400277struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700278cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400279 struct tcon_link *tlink, __u32 oplock)
280{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500281 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000282 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 struct cifsInodeInfo *cinode = CIFS_I(inode);
284 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700285 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700286 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400287 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400288
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
290 if (cfile == NULL)
291 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400292
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700293 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
294 if (!fdlocks) {
295 kfree(cfile);
296 return NULL;
297 }
298
299 INIT_LIST_HEAD(&fdlocks->locks);
300 fdlocks->cfile = cfile;
301 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700302 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700303 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700304 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700305
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700306 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->pid = current->tgid;
308 cfile->uid = current_fsuid();
309 cfile->dentry = dget(dentry);
310 cfile->f_flags = file->f_flags;
311 cfile->invalidHandle = false;
312 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700313 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700314 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500315 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400316
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100317 cifs_sb_active(inode->i_sb);
318
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 /*
320 * If the server returned a read oplock and we have mandatory brlocks,
321 * set oplock level to None.
322 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400323 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500324 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400325 oplock = 0;
326 }
327
Steve French3afca262016-09-22 18:58:16 -0500328 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700330 oplock = fid->pending_open->oplock;
331 list_del(&fid->pending_open->olist);
332
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400333 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400334 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700335
336 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500337
Jeff Layton15ecb432010-10-15 15:34:02 -0400338 /* if readable file instance put first in list*/
339 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700340 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400341 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700342 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500343 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400344
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400345 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400346 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700348 file->private_data = cfile;
349 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400350}
351
Jeff Layton764a1b12012-07-25 14:59:54 -0400352struct cifsFileInfo *
353cifsFileInfo_get(struct cifsFileInfo *cifs_file)
354{
Steve French3afca262016-09-22 18:58:16 -0500355 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400356 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500357 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400358 return cifs_file;
359}
360
Steve Frenchcdff08e2010-10-21 22:46:14 +0000361/*
362 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400363 * the filehandle out on the server. Must be called without holding
Steve French3afca262016-09-22 18:58:16 -0500364 * tcon->open_file_lock and cifs_file->file_info_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000365 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400366void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
367{
David Howells2b0143b2015-03-17 22:25:59 +0000368 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000369 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700370 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300371 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100372 struct super_block *sb = inode->i_sb;
373 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000374 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700375 struct cifs_fid fid;
376 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000377 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000378
Steve French3afca262016-09-22 18:58:16 -0500379 spin_lock(&tcon->open_file_lock);
380
381 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400382 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500383 spin_unlock(&cifs_file->file_info_lock);
384 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000385 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400386 }
Steve French3afca262016-09-22 18:58:16 -0500387 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000388
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700389 if (server->ops->get_lease_key)
390 server->ops->get_lease_key(inode, &fid);
391
392 /* store open in pending opens to make sure we don't miss lease break */
393 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
394
Steve Frenchcdff08e2010-10-21 22:46:14 +0000395 /* remove it from the lists */
396 list_del(&cifs_file->flist);
397 list_del(&cifs_file->tlist);
398
399 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500400 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000401 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700402 /*
403 * In strict cache mode we need invalidate mapping on the last
404 * close because it may cause a error when we open this file
405 * again and get at least level II oplock.
406 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400408 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300409 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000410 }
Steve French3afca262016-09-22 18:58:16 -0500411
412 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000414 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
Jeff Laytonad635942011-07-26 12:20:17 -0400415
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700417 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400418 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700419
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400420 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700421 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400422 server->ops->close(xid, tcon, &cifs_file->fid);
423 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000424 }
425
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000426 if (oplock_break_cancelled)
427 cifs_done_oplock_break(cifsi);
428
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700429 cifs_del_pending_open(&open);
430
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700431 /*
432 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000433 * is closed anyway.
434 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700435 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700436 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000437 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400438 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000439 kfree(li);
440 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700441 list_del(&cifs_file->llist->llist);
442 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700443 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000444
445 cifs_put_tlink(cifs_file->tlink);
446 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100447 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000448 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400449}
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
454 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400455 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400456 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700458 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000459 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400460 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700461 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300463 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700464 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700465 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400467 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400470 tlink = cifs_sb_tlink(cifs_sb);
471 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400472 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400473 return PTR_ERR(tlink);
474 }
475 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700476 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500478 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530480 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400481 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 }
483
Joe Perchesf96637b2013-05-04 22:12:25 -0500484 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000485 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000486
Namjae Jeon787aded2014-08-22 14:22:51 +0900487 if (file->f_flags & O_DIRECT &&
488 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
489 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
490 file->f_op = &cifs_file_direct_nobrl_ops;
491 else
492 file->f_op = &cifs_file_direct_ops;
493 }
494
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700495 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000496 oplock = REQ_OPLOCK;
497 else
498 oplock = 0;
499
Steve French64cc2c62009-03-04 19:54:08 +0000500 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400501 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
502 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000503 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400504 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000505 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700506 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000507 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500508 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300509 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000510 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
511 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500512 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
513 tcon->ses->serverName,
514 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000515 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000516 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
517 (rc != -EOPNOTSUPP)) /* path not found or net err */
518 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700519 /*
520 * Else fallthrough to retry open the old way on network i/o
521 * or DFS errors.
522 */
Steve French276a74a2009-03-03 18:00:34 +0000523 }
524
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700525 if (server->ops->get_lease_key)
526 server->ops->get_lease_key(inode, &fid);
527
528 cifs_add_pending_open(&fid, tlink, &open);
529
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300530 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700531 if (server->ops->get_lease_key)
532 server->ops->get_lease_key(inode, &fid);
533
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300534 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700535 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700536 if (rc) {
537 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300538 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700539 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300540 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400541
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700542 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
543 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700544 if (server->ops->close)
545 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700546 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 rc = -ENOMEM;
548 goto out;
549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530551 cifs_fscache_set_inode_cookie(inode, file);
552
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300553 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700554 /*
555 * Time to set mode which we can not set earlier due to
556 * problems creating new read-only files.
557 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300558 struct cifs_unix_set_info_args args = {
559 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800560 .uid = INVALID_UID, /* no change */
561 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300562 .ctime = NO_CHANGE_64,
563 .atime = NO_CHANGE_64,
564 .mtime = NO_CHANGE_64,
565 .device = 0,
566 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700567 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
568 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
570
571out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400573 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400574 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 return rc;
576}
577
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400578static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
579
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700580/*
581 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400582 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400584static int
585cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400587 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000588 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400589 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 int rc = 0;
591
Rabin Vincent6ed24ef2017-05-03 17:17:21 +0200592 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400593 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400594 /* can cache locks - no need to relock */
595 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400596 return rc;
597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400599 if (cap_unix(tcon->ses) &&
600 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
601 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
602 rc = cifs_push_posix_locks(cfile);
603 else
604 rc = tcon->ses->server->ops->push_mand_locks(cfile);
605
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400606 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return rc;
608}
609
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610static int
611cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
613 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400614 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400615 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000617 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700618 struct TCP_Server_Info *server;
619 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000620 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700622 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500624 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400625 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400627 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700628 mutex_lock(&cfile->fh_mutex);
629 if (!cfile->invalidHandle) {
630 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530631 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400632 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530633 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
635
David Howells2b0143b2015-03-17 22:25:59 +0000636 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700638 tcon = tlink_tcon(cfile->tlink);
639 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000640
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700641 /*
642 * Can not grab rename sem here because various ops, including those
643 * that already have the rename sem can end up causing writepage to get
644 * called and if the server was down that means we end up here, and we
645 * can never tell if the caller already has the rename_sem.
646 */
647 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000649 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700650 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400651 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000652 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654
Joe Perchesf96637b2013-05-04 22:12:25 -0500655 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
656 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300658 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 oplock = REQ_OPLOCK;
660 else
Steve French4b18f2a2008-04-29 00:06:05 +0000661 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400663 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000664 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400665 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400666 /*
667 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
668 * original open. Must mask them off for a reopen.
669 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700670 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400671 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400672
Jeff Layton2422f672010-06-16 13:40:16 -0400673 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400675 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000676 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500677 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200678 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000679 goto reopen_success;
680 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700681 /*
682 * fallthrough to retry open the old way on errors, especially
683 * in the reconnect path it is important to retry hard
684 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000685 }
686
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700687 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000688
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500689 if (backup_cred(cifs_sb))
690 create_options |= CREATE_OPEN_BACKUP_INTENT;
691
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700692 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400693 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700694
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400695 oparms.tcon = tcon;
696 oparms.cifs_sb = cifs_sb;
697 oparms.desired_access = desired_access;
698 oparms.create_options = create_options;
699 oparms.disposition = disposition;
700 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400701 oparms.fid = &cfile->fid;
702 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400703
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700704 /*
705 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400706 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 * file might have write behind data that needs to be flushed and server
708 * version of file size can be stale. If we knew for sure that inode was
709 * not dirty locally we could do this.
710 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400711 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400712 if (rc == -ENOENT && oparms.reconnect == false) {
713 /* durable handle timeout is expired - open the file again */
714 rc = server->ops->open(xid, &oparms, &oplock, NULL);
715 /* indicate that we need to relock the file */
716 oparms.reconnect = true;
717 }
718
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700720 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500721 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
722 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400723 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 }
Jeff Layton15886172010-10-15 15:33:59 -0400725
726reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700727 cfile->invalidHandle = false;
728 mutex_unlock(&cfile->fh_mutex);
729 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400730
731 if (can_flush) {
732 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400733 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400734
Jeff Layton15886172010-10-15 15:33:59 -0400735 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700736 rc = cifs_get_inode_info_unix(&inode, full_path,
737 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400738 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700739 rc = cifs_get_inode_info(&inode, full_path, NULL,
740 inode->i_sb, xid, NULL);
741 }
742 /*
743 * Else we are writing out data to server already and could deadlock if
744 * we tried to flush data, and since we do not know if we have data that
745 * would invalidate the current end of file on the server we can not go
746 * to the server to get the new inode info.
747 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300748
Pavel Shilovskyde740252016-10-11 15:34:07 -0700749 /*
750 * If the server returned a read oplock and we have mandatory brlocks,
751 * set oplock level to None.
752 */
753 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
754 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
755 oplock = 0;
756 }
757
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400758 server->ops->set_fid(cfile, &cfile->fid, oplock);
759 if (oparms.reconnect)
760 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400761
762reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400764 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 return rc;
766}
767
768int cifs_close(struct inode *inode, struct file *file)
769{
Jeff Layton77970692011-04-05 16:23:47 -0700770 if (file->private_data != NULL) {
771 cifsFileInfo_put(file->private_data);
772 file->private_data = NULL;
773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Steve Frenchcdff08e2010-10-21 22:46:14 +0000775 /* return code from the ->release op is always ignored */
776 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
Steve French52ace1e2016-09-22 19:23:56 -0500779void
780cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
781{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700782 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500783 struct list_head *tmp;
784 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700785 struct list_head tmp_list;
786
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800787 if (!tcon->use_persistent || !tcon->need_reopen_files)
788 return;
789
790 tcon->need_reopen_files = false;
791
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700792 cifs_dbg(FYI, "Reopen persistent handles");
793 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500794
795 /* list all files open on tree connection, reopen resilient handles */
796 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700797 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500798 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700799 if (!open_file->invalidHandle)
800 continue;
801 cifsFileInfo_get(open_file);
802 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500803 }
804 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700805
806 list_for_each_safe(tmp, tmp1, &tmp_list) {
807 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800808 if (cifs_reopen_file(open_file, false /* do not flush */))
809 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700810 list_del_init(&open_file->rlist);
811 cifsFileInfo_put(open_file);
812 }
Steve French52ace1e2016-09-22 19:23:56 -0500813}
814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815int cifs_closedir(struct inode *inode, struct file *file)
816{
817 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400818 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700819 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700820 struct cifs_tcon *tcon;
821 struct TCP_Server_Info *server;
822 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Joe Perchesf96637b2013-05-04 22:12:25 -0500824 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700826 if (cfile == NULL)
827 return rc;
828
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400829 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700830 tcon = tlink_tcon(cfile->tlink);
831 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Joe Perchesf96637b2013-05-04 22:12:25 -0500833 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500834 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400835 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700836 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500837 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700838 if (server->ops->close_dir)
839 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
840 else
841 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500842 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700843 /* not much we can do if it fails anyway, ignore rc */
844 rc = 0;
845 } else
Steve French3afca262016-09-22 18:58:16 -0500846 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700848 buf = cfile->srch_inf.ntwrk_buf_start;
849 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500850 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700851 cfile->srch_inf.ntwrk_buf_start = NULL;
852 if (cfile->srch_inf.smallBuf)
853 cifs_small_buf_release(buf);
854 else
855 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700857
858 cifs_put_tlink(cfile->tlink);
859 kfree(file->private_data);
860 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400862 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 return rc;
864}
865
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400866static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300867cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000868{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400869 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000870 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400871 if (!lock)
872 return lock;
873 lock->offset = offset;
874 lock->length = length;
875 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400876 lock->pid = current->tgid;
877 INIT_LIST_HEAD(&lock->blist);
878 init_waitqueue_head(&lock->block_q);
879 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400880}
881
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700882void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883cifs_del_lock_waiters(struct cifsLockInfo *lock)
884{
885 struct cifsLockInfo *li, *tmp;
886 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
887 list_del_init(&li->blist);
888 wake_up(&li->block_q);
889 }
890}
891
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400892#define CIFS_LOCK_OP 0
893#define CIFS_READ_OP 1
894#define CIFS_WRITE_OP 2
895
896/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400897static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700898cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
899 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400900 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400901{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300902 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700903 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300904 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400905
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700906 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907 if (offset + length <= li->offset ||
908 offset >= li->offset + li->length)
909 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400910 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
911 server->ops->compare_fids(cfile, cur_cfile)) {
912 /* shared lock prevents write op through the same fid */
913 if (!(li->type & server->vals->shared_lock_type) ||
914 rw_check != CIFS_WRITE_OP)
915 continue;
916 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700917 if ((type & server->vals->shared_lock_type) &&
918 ((server->ops->compare_fids(cfile, cur_cfile) &&
919 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700921 if (conf_lock)
922 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700923 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400924 }
925 return false;
926}
927
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700928bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300929cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700930 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400931 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400932{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300933 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700934 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000935 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300936
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700937 list_for_each_entry(cur, &cinode->llist, llist) {
938 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700939 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300940 if (rc)
941 break;
942 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300943
944 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400945}
946
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300947/*
948 * Check if there is another lock that prevents us to set the lock (mandatory
949 * style). If such a lock exists, update the flock structure with its
950 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
951 * or leave it the same if we can't. Returns 0 if we don't need to request to
952 * the server or 1 otherwise.
953 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400954static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300955cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
956 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400957{
958 int rc = 0;
959 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000960 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300961 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400962 bool exist;
963
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700964 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400965
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300966 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400967 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400968 if (exist) {
969 flock->fl_start = conf_lock->offset;
970 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
971 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300972 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400973 flock->fl_type = F_RDLCK;
974 else
975 flock->fl_type = F_WRLCK;
976 } else if (!cinode->can_cache_brlcks)
977 rc = 1;
978 else
979 flock->fl_type = F_UNLCK;
980
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700981 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400982 return rc;
983}
984
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400985static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300986cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987{
David Howells2b0143b2015-03-17 22:25:59 +0000988 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700989 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700990 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700991 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000992}
993
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300994/*
995 * Set the byte-range lock (mandatory style). Returns:
996 * 1) 0, if we set the lock and don't need to request to the server;
997 * 2) 1, if no locks prevent us but we need to request to the server;
998 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
999 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001000static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001001cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001002 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001003{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001004 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001005 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001006 bool exist;
1007 int rc = 0;
1008
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001009try_again:
1010 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001011 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001013 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001014 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001016 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001017 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001018 return rc;
1019 }
1020
1021 if (!exist)
1022 rc = 1;
1023 else if (!wait)
1024 rc = -EACCES;
1025 else {
1026 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001027 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028 rc = wait_event_interruptible(lock->block_q,
1029 (lock->blist.prev == &lock->blist) &&
1030 (lock->blist.next == &lock->blist));
1031 if (!rc)
1032 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001033 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001034 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035 }
1036
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001037 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001038 return rc;
1039}
1040
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001041/*
1042 * Check if there is another lock that prevents us to set the lock (posix
1043 * style). If such a lock exists, update the flock structure with its
1044 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1045 * or leave it the same if we can't. Returns 0 if we don't need to request to
1046 * the server or 1 otherwise.
1047 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001048static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1050{
1051 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001052 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001053 unsigned char saved_type = flock->fl_type;
1054
Pavel Shilovsky50792762011-10-29 17:17:57 +04001055 if ((flock->fl_flags & FL_POSIX) == 0)
1056 return 1;
1057
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001058 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001059 posix_test_lock(file, flock);
1060
1061 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1062 flock->fl_type = saved_type;
1063 rc = 1;
1064 }
1065
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001066 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001067 return rc;
1068}
1069
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001070/*
1071 * Set the byte-range lock (posix style). Returns:
1072 * 1) 0, if we set the lock and don't need to request to the server;
1073 * 2) 1, if we need to request to the server;
1074 * 3) <0, if the error occurs while setting the lock.
1075 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001076static int
1077cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1078{
Al Viro496ad9a2013-01-23 17:07:38 -05001079 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001080 int rc = 1;
1081
1082 if ((flock->fl_flags & FL_POSIX) == 0)
1083 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001084
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001085try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001086 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001087 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001088 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001089 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001090 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001091
1092 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001093 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001094 if (rc == FILE_LOCK_DEFERRED) {
1095 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1096 if (!rc)
1097 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001098 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001099 }
Steve French9ebb3892012-04-01 13:52:54 -05001100 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101}
1102
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001103int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001104cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001105{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001106 unsigned int xid;
1107 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001108 struct cifsLockInfo *li, *tmp;
1109 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001110 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001111 LOCKING_ANDX_RANGE *buf, *cur;
1112 int types[] = {LOCKING_ANDX_LARGE_FILES,
1113 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1114 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001115
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001116 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001117 tcon = tlink_tcon(cfile->tlink);
1118
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001119 /*
1120 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001121 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001122 */
1123 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001124 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001125 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001126 return -EINVAL;
1127 }
1128
Ross Lagerwall04d76802019-01-08 18:30:56 +00001129 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1130 PAGE_SIZE);
1131 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1132 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001133 max_num = (max_buf - sizeof(struct smb_hdr)) /
1134 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001135 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001136 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001137 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001138 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001139 }
1140
1141 for (i = 0; i < 2; i++) {
1142 cur = buf;
1143 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001144 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001145 if (li->type != types[i])
1146 continue;
1147 cur->Pid = cpu_to_le16(li->pid);
1148 cur->LengthLow = cpu_to_le32((u32)li->length);
1149 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1150 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1151 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1152 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001153 stored_rc = cifs_lockv(xid, tcon,
1154 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001155 (__u8)li->type, 0, num,
1156 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001157 if (stored_rc)
1158 rc = stored_rc;
1159 cur = buf;
1160 num = 0;
1161 } else
1162 cur++;
1163 }
1164
1165 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001166 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001167 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001168 if (stored_rc)
1169 rc = stored_rc;
1170 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001171 }
1172
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001173 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001174 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001175 return rc;
1176}
1177
Jeff Layton3d224622016-05-24 06:27:44 -04001178static __u32
1179hash_lockowner(fl_owner_t owner)
1180{
1181 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1182}
1183
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001184struct lock_to_push {
1185 struct list_head llist;
1186 __u64 offset;
1187 __u64 length;
1188 __u32 pid;
1189 __u16 netfid;
1190 __u8 type;
1191};
1192
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001193static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001194cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001195{
David Howells2b0143b2015-03-17 22:25:59 +00001196 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001197 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001198 struct file_lock *flock;
1199 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001200 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001201 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001202 struct list_head locks_to_send, *el;
1203 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001204 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001205
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001206 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001207
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001208 if (!flctx)
1209 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001210
Jeff Laytone084c1b2015-02-16 14:32:03 -05001211 spin_lock(&flctx->flc_lock);
1212 list_for_each(el, &flctx->flc_posix) {
1213 count++;
1214 }
1215 spin_unlock(&flctx->flc_lock);
1216
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001217 INIT_LIST_HEAD(&locks_to_send);
1218
1219 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001220 * Allocating count locks is enough because no FL_POSIX locks can be
1221 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001222 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001223 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001224 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001225 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1226 if (!lck) {
1227 rc = -ENOMEM;
1228 goto err_out;
1229 }
1230 list_add_tail(&lck->llist, &locks_to_send);
1231 }
1232
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001233 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001234 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001235 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001236 if (el == &locks_to_send) {
1237 /*
1238 * The list ended. We don't have enough allocated
1239 * structures - something is really wrong.
1240 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001241 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001242 break;
1243 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001244 length = 1 + flock->fl_end - flock->fl_start;
1245 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1246 type = CIFS_RDLCK;
1247 else
1248 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001249 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001250 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001251 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001252 lck->length = length;
1253 lck->type = type;
1254 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001255 }
Jeff Layton6109c852015-01-16 15:05:57 -05001256 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001257
1258 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001259 int stored_rc;
1260
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001261 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001262 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001263 lck->type, 0);
1264 if (stored_rc)
1265 rc = stored_rc;
1266 list_del(&lck->llist);
1267 kfree(lck);
1268 }
1269
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001270out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001271 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001272 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001273err_out:
1274 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1275 list_del(&lck->llist);
1276 kfree(lck);
1277 }
1278 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001279}
1280
1281static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001282cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001283{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001284 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001285 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001286 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001287 int rc = 0;
1288
1289 /* we are going to update can_cache_brlcks here - need a write access */
1290 down_write(&cinode->lock_sem);
1291 if (!cinode->can_cache_brlcks) {
1292 up_write(&cinode->lock_sem);
1293 return rc;
1294 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001295
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001296 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001297 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1298 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001299 rc = cifs_push_posix_locks(cfile);
1300 else
1301 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001302
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001303 cinode->can_cache_brlcks = false;
1304 up_write(&cinode->lock_sem);
1305 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001306}
1307
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001308static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001309cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001310 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001312 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001313 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001314 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001315 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001316 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001317 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001318 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001320 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001321 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001322 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001323 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001324 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001325 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1326 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001327 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001329 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001330 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001331 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001332 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 *lock = 1;
1334 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001335 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001336 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001337 *unlock = 1;
1338 /* Check if unlock includes more than one lock range */
1339 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001340 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001341 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001342 *lock = 1;
1343 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001344 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001345 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001346 *lock = 1;
1347 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001348 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001349 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001350 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001352 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001353}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001355static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001356cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001357 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001358{
1359 int rc = 0;
1360 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001361 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1362 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001363 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001364 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001366 if (posix_lck) {
1367 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001368
1369 rc = cifs_posix_lock_test(file, flock);
1370 if (!rc)
1371 return rc;
1372
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001373 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001374 posix_lock_type = CIFS_RDLCK;
1375 else
1376 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001377 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1378 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001379 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001380 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 return rc;
1382 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001383
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001384 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001385 if (!rc)
1386 return rc;
1387
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001388 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001389 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1390 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001391 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001392 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1393 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001394 flock->fl_type = F_UNLCK;
1395 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001396 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1397 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001398 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001399 }
1400
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001401 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001402 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001403 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001404 }
1405
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001406 type &= ~server->vals->exclusive_lock_type;
1407
1408 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1409 type | server->vals->shared_lock_type,
1410 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001411 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001412 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1413 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001414 flock->fl_type = F_RDLCK;
1415 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001416 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1417 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001418 } else
1419 flock->fl_type = F_WRLCK;
1420
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001421 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001422}
1423
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001424void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001425cifs_move_llist(struct list_head *source, struct list_head *dest)
1426{
1427 struct list_head *li, *tmp;
1428 list_for_each_safe(li, tmp, source)
1429 list_move(li, dest);
1430}
1431
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001432void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001433cifs_free_llist(struct list_head *llist)
1434{
1435 struct cifsLockInfo *li, *tmp;
1436 list_for_each_entry_safe(li, tmp, llist, llist) {
1437 cifs_del_lock_waiters(li);
1438 list_del(&li->llist);
1439 kfree(li);
1440 }
1441}
1442
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001443int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001444cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1445 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001446{
1447 int rc = 0, stored_rc;
1448 int types[] = {LOCKING_ANDX_LARGE_FILES,
1449 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1450 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001451 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001452 LOCKING_ANDX_RANGE *buf, *cur;
1453 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001454 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001455 struct cifsLockInfo *li, *tmp;
1456 __u64 length = 1 + flock->fl_end - flock->fl_start;
1457 struct list_head tmp_llist;
1458
1459 INIT_LIST_HEAD(&tmp_llist);
1460
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001461 /*
1462 * Accessing maxBuf is racy with cifs_reconnect - need to store value
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001463 * and check it before using.
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001464 */
1465 max_buf = tcon->ses->server->maxBuf;
Ross Lagerwallbaf6b192019-01-08 18:30:57 +00001466 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001467 return -EINVAL;
1468
Ross Lagerwall04d76802019-01-08 18:30:56 +00001469 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1470 PAGE_SIZE);
1471 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1472 PAGE_SIZE);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001473 max_num = (max_buf - sizeof(struct smb_hdr)) /
1474 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001475 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001476 if (!buf)
1477 return -ENOMEM;
1478
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001479 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001480 for (i = 0; i < 2; i++) {
1481 cur = buf;
1482 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001483 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001484 if (flock->fl_start > li->offset ||
1485 (flock->fl_start + length) <
1486 (li->offset + li->length))
1487 continue;
1488 if (current->tgid != li->pid)
1489 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001490 if (types[i] != li->type)
1491 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001492 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001493 /*
1494 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001495 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001496 */
1497 list_del(&li->llist);
1498 cifs_del_lock_waiters(li);
1499 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001500 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001501 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001502 cur->Pid = cpu_to_le16(li->pid);
1503 cur->LengthLow = cpu_to_le32((u32)li->length);
1504 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1505 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1506 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1507 /*
1508 * We need to save a lock here to let us add it again to
1509 * the file's list if the unlock range request fails on
1510 * the server.
1511 */
1512 list_move(&li->llist, &tmp_llist);
1513 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001514 stored_rc = cifs_lockv(xid, tcon,
1515 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001516 li->type, num, 0, buf);
1517 if (stored_rc) {
1518 /*
1519 * We failed on the unlock range
1520 * request - add all locks from the tmp
1521 * list to the head of the file's list.
1522 */
1523 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001524 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001525 rc = stored_rc;
1526 } else
1527 /*
1528 * The unlock range request succeed -
1529 * free the tmp list.
1530 */
1531 cifs_free_llist(&tmp_llist);
1532 cur = buf;
1533 num = 0;
1534 } else
1535 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001536 }
1537 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001538 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001539 types[i], num, 0, buf);
1540 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001541 cifs_move_llist(&tmp_llist,
1542 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001543 rc = stored_rc;
1544 } else
1545 cifs_free_llist(&tmp_llist);
1546 }
1547 }
1548
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001549 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001550 kfree(buf);
1551 return rc;
1552}
1553
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001554static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001555cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001556 bool wait_flag, bool posix_lck, int lock, int unlock,
1557 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558{
1559 int rc = 0;
1560 __u64 length = 1 + flock->fl_end - flock->fl_start;
1561 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1562 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001563 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001564 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001565
1566 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001567 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001568
1569 rc = cifs_posix_lock_set(file, flock);
1570 if (!rc || rc < 0)
1571 return rc;
1572
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001573 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001574 posix_lock_type = CIFS_RDLCK;
1575 else
1576 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001577
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001578 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001579 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001580
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001581 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001582 hash_lockowner(flock->fl_owner),
1583 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001584 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001585 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001586 }
1587
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001588 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001589 struct cifsLockInfo *lock;
1590
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001591 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001592 if (!lock)
1593 return -ENOMEM;
1594
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001595 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001596 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001597 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001598 return rc;
1599 }
1600 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001601 goto out;
1602
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001603 /*
1604 * Windows 7 server can delay breaking lease from read to None
1605 * if we set a byte-range lock on a file - break it explicitly
1606 * before sending the lock to the server to be sure the next
1607 * read won't conflict with non-overlapted locks due to
1608 * pagereading.
1609 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001610 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1611 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001612 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001613 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1614 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001615 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001616 }
1617
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001618 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1619 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001620 if (rc) {
1621 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001622 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001623 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001624
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001625 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001626 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001627 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001628
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001629out:
Chengyu Song00b8c952015-03-24 20:18:49 -04001630 if (flock->fl_flags & FL_POSIX && !rc)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001631 rc = locks_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001632 return rc;
1633}
1634
1635int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1636{
1637 int rc, xid;
1638 int lock = 0, unlock = 0;
1639 bool wait_flag = false;
1640 bool posix_lck = false;
1641 struct cifs_sb_info *cifs_sb;
1642 struct cifs_tcon *tcon;
1643 struct cifsInodeInfo *cinode;
1644 struct cifsFileInfo *cfile;
1645 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001646 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001647
1648 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001649 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001650
Joe Perchesf96637b2013-05-04 22:12:25 -05001651 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1652 cmd, flock->fl_flags, flock->fl_type,
1653 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001654
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001655 cfile = (struct cifsFileInfo *)file->private_data;
1656 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001657
1658 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1659 tcon->ses->server);
1660
Al Viro7119e222014-10-22 00:25:12 -04001661 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001662 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001663 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001664
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001665 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001666 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1667 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1668 posix_lck = true;
1669 /*
1670 * BB add code here to normalize offset and length to account for
1671 * negative length which we can not accept over the wire.
1672 */
1673 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001674 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001675 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001676 return rc;
1677 }
1678
1679 if (!lock && !unlock) {
1680 /*
1681 * if no lock or unlock then nothing to do since we do not
1682 * know what it is
1683 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001684 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001685 return -EOPNOTSUPP;
1686 }
1687
1688 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1689 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001690 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 return rc;
1692}
1693
Jeff Layton597b0272012-03-23 14:40:56 -04001694/*
1695 * update the file size (if needed) after a write. Should be called with
1696 * the inode->i_lock held
1697 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001698void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001699cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1700 unsigned int bytes_written)
1701{
1702 loff_t end_of_write = offset + bytes_written;
1703
1704 if (end_of_write > cifsi->server_eof)
1705 cifsi->server_eof = end_of_write;
1706}
1707
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001708static ssize_t
1709cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1710 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711{
1712 int rc = 0;
1713 unsigned int bytes_written = 0;
1714 unsigned int total_written;
1715 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001716 struct cifs_tcon *tcon;
1717 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001718 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001719 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001720 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001721 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
Jeff Layton7da4b492010-10-15 15:34:00 -04001723 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Al Viro35c265e2014-08-19 20:25:34 -04001725 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1726 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001728 tcon = tlink_tcon(open_file->tlink);
1729 server = tcon->ses->server;
1730
1731 if (!server->ops->sync_write)
1732 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001733
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001734 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 for (total_written = 0; write_size > total_written;
1737 total_written += bytes_written) {
1738 rc = -EAGAIN;
1739 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001740 struct kvec iov[2];
1741 unsigned int len;
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 /* we could deadlock if we called
1745 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001746 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001748 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 if (rc != 0)
1750 break;
1751 }
Steve French3e844692005-10-03 13:37:24 -07001752
David Howells2b0143b2015-03-17 22:25:59 +00001753 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001754 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001755 /* iov[0] is reserved for smb header */
1756 iov[1].iov_base = (char *)write_data + total_written;
1757 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001758 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001759 io_parms.tcon = tcon;
1760 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001761 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001762 rc = server->ops->sync_write(xid, &open_file->fid,
1763 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 }
1765 if (rc || (bytes_written == 0)) {
1766 if (total_written)
1767 break;
1768 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001769 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 return rc;
1771 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001772 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001773 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001774 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001775 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001776 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001777 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 }
1779
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001780 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
Jeff Layton7da4b492010-10-15 15:34:00 -04001782 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001783 spin_lock(&d_inode(dentry)->i_lock);
1784 if (*offset > d_inode(dentry)->i_size)
1785 i_size_write(d_inode(dentry), *offset);
1786 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 }
David Howells2b0143b2015-03-17 22:25:59 +00001788 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001789 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 return total_written;
1791}
1792
Jeff Layton6508d902010-09-29 19:51:11 -04001793struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1794 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001795{
1796 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001797 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001798 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001799
1800 /* only filter by fsuid on multiuser mounts */
1801 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1802 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001803
Steve French3afca262016-09-22 18:58:16 -05001804 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001805 /* we could simply get the first_list_entry since write-only entries
1806 are always at the end of the list but since the first entry might
1807 have a close pending, we go through the whole list */
1808 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001809 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001810 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001811 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001812 if (!open_file->invalidHandle) {
1813 /* found a good file */
1814 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001815 cifsFileInfo_get(open_file);
1816 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001817 return open_file;
1818 } /* else might as well continue, and look for
1819 another, or simply have the caller reopen it
1820 again rather than trying to fix this handle */
1821 } else /* write only file */
1822 break; /* write only files are last so must be done */
1823 }
Steve French3afca262016-09-22 18:58:16 -05001824 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001825 return NULL;
1826}
Steve French630f3f0c2007-10-25 21:17:17 +00001827
Jeff Layton6508d902010-09-29 19:51:11 -04001828struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1829 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001830{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001831 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001832 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001833 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001834 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001835 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001836 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001837
Steve French60808232006-04-22 15:53:05 +00001838 /* Having a null inode here (because mapping->host was set to zero by
1839 the VFS or MM) should not happen but we had reports of on oops (due to
1840 it being zero) during stress testcases so we need to check for it */
1841
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001842 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001843 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001844 dump_stack();
1845 return NULL;
1846 }
1847
Jeff Laytond3892292010-11-02 16:22:50 -04001848 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001849 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001850
Jeff Layton6508d902010-09-29 19:51:11 -04001851 /* only filter by fsuid on multiuser mounts */
1852 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1853 fsuid_only = false;
1854
Steve French3afca262016-09-22 18:58:16 -05001855 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001856refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001857 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001858 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001859 return NULL;
1860 }
Steve French6148a742005-10-05 12:23:19 -07001861 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001862 if (!any_available && open_file->pid != current->tgid)
1863 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001864 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001865 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001866 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001867 if (!open_file->invalidHandle) {
1868 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001869 cifsFileInfo_get(open_file);
1870 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001871 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001872 } else {
1873 if (!inv_file)
1874 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001875 }
Steve French6148a742005-10-05 12:23:19 -07001876 }
1877 }
Jeff Layton2846d382008-09-22 21:33:33 -04001878 /* couldn't find useable FH with same pid, try any available */
1879 if (!any_available) {
1880 any_available = true;
1881 goto refind_writable;
1882 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001883
1884 if (inv_file) {
1885 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001886 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001887 }
1888
Steve French3afca262016-09-22 18:58:16 -05001889 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001890
1891 if (inv_file) {
1892 rc = cifs_reopen_file(inv_file, false);
1893 if (!rc)
1894 return inv_file;
1895 else {
Steve French3afca262016-09-22 18:58:16 -05001896 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001897 list_move_tail(&inv_file->flist,
1898 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001899 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001900 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001901 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001902 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001903 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001904 goto refind_writable;
1905 }
1906 }
1907
Steve French6148a742005-10-05 12:23:19 -07001908 return NULL;
1909}
1910
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1912{
1913 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001914 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 char *write_data;
1916 int rc = -EFAULT;
1917 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001919 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920
1921 if (!mapping || !mapping->host)
1922 return -EFAULT;
1923
1924 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
1926 offset += (loff_t)from;
1927 write_data = kmap(page);
1928 write_data += from;
1929
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001930 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 kunmap(page);
1932 return -EIO;
1933 }
1934
1935 /* racing with truncate? */
1936 if (offset > mapping->host->i_size) {
1937 kunmap(page);
1938 return 0; /* don't care */
1939 }
1940
1941 /* check to make sure that we are not extending the file */
1942 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001943 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Jeff Layton6508d902010-09-29 19:51:11 -04001945 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001946 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001947 bytes_written = cifs_write(open_file, open_file->pid,
1948 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001949 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001951 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001952 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001953 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001954 else if (bytes_written < 0)
1955 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001956 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001957 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 rc = -EIO;
1959 }
1960
1961 kunmap(page);
1962 return rc;
1963}
1964
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001965static struct cifs_writedata *
1966wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1967 pgoff_t end, pgoff_t *index,
1968 unsigned int *found_pages)
1969{
1970 unsigned int nr_pages;
1971 struct page **pages;
1972 struct cifs_writedata *wdata;
1973
1974 wdata = cifs_writedata_alloc((unsigned int)tofind,
1975 cifs_writev_complete);
1976 if (!wdata)
1977 return NULL;
1978
1979 /*
1980 * find_get_pages_tag seems to return a max of 256 on each
1981 * iteration, so we must call it several times in order to
1982 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03001983 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001984 */
1985 *found_pages = 0;
1986 pages = wdata->pages;
1987 do {
1988 nr_pages = find_get_pages_tag(mapping, index,
1989 PAGECACHE_TAG_DIRTY, tofind,
1990 pages);
1991 *found_pages += nr_pages;
1992 tofind -= nr_pages;
1993 pages += nr_pages;
1994 } while (nr_pages && tofind && *index <= end);
1995
1996 return wdata;
1997}
1998
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04001999static unsigned int
2000wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2001 struct address_space *mapping,
2002 struct writeback_control *wbc,
2003 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2004{
2005 unsigned int nr_pages = 0, i;
2006 struct page *page;
2007
2008 for (i = 0; i < found_pages; i++) {
2009 page = wdata->pages[i];
2010 /*
2011 * At this point we hold neither mapping->tree_lock nor
2012 * lock on the page itself: the page may be truncated or
2013 * invalidated (changing page->mapping to NULL), or even
2014 * swizzled back from swapper_space to tmpfs file
2015 * mapping
2016 */
2017
2018 if (nr_pages == 0)
2019 lock_page(page);
2020 else if (!trylock_page(page))
2021 break;
2022
2023 if (unlikely(page->mapping != mapping)) {
2024 unlock_page(page);
2025 break;
2026 }
2027
2028 if (!wbc->range_cyclic && page->index > end) {
2029 *done = true;
2030 unlock_page(page);
2031 break;
2032 }
2033
2034 if (*next && (page->index != *next)) {
2035 /* Not next consecutive page */
2036 unlock_page(page);
2037 break;
2038 }
2039
2040 if (wbc->sync_mode != WB_SYNC_NONE)
2041 wait_on_page_writeback(page);
2042
2043 if (PageWriteback(page) ||
2044 !clear_page_dirty_for_io(page)) {
2045 unlock_page(page);
2046 break;
2047 }
2048
2049 /*
2050 * This actually clears the dirty bit in the radix tree.
2051 * See cifs_writepage() for more commentary.
2052 */
2053 set_page_writeback(page);
2054 if (page_offset(page) >= i_size_read(mapping->host)) {
2055 *done = true;
2056 unlock_page(page);
2057 end_page_writeback(page);
2058 break;
2059 }
2060
2061 wdata->pages[i] = page;
2062 *next = page->index + 1;
2063 ++nr_pages;
2064 }
2065
2066 /* reset index to refind any pages skipped */
2067 if (nr_pages == 0)
2068 *index = wdata->pages[0]->index + 1;
2069
2070 /* put any pages we aren't going to use */
2071 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002072 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002073 wdata->pages[i] = NULL;
2074 }
2075
2076 return nr_pages;
2077}
2078
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002079static int
2080wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2081 struct address_space *mapping, struct writeback_control *wbc)
2082{
2083 int rc = 0;
2084 struct TCP_Server_Info *server;
2085 unsigned int i;
2086
2087 wdata->sync_mode = wbc->sync_mode;
2088 wdata->nr_pages = nr_pages;
2089 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002090 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002091 wdata->tailsz = min(i_size_read(mapping->host) -
2092 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002093 (loff_t)PAGE_SIZE);
2094 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002095
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002096 if (wdata->cfile != NULL)
2097 cifsFileInfo_put(wdata->cfile);
2098 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2099 if (!wdata->cfile) {
2100 cifs_dbg(VFS, "No writable handles for inode\n");
2101 rc = -EBADF;
2102 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002103 wdata->pid = wdata->cfile->pid;
2104 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2105 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002106 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002107
2108 for (i = 0; i < nr_pages; ++i)
2109 unlock_page(wdata->pages[i]);
2110
2111 return rc;
2112}
2113
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002115 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002117 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002118 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002119 bool done = false, scanned = false, range_whole = false;
2120 pgoff_t end, index;
2121 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002122 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002123
Steve French37c0eb42005-10-05 14:50:29 -07002124 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002125 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002126 * one page at a time via cifs_writepage
2127 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002128 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002129 return generic_writepages(mapping, wbc);
2130
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002131 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002132 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002133 end = -1;
2134 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002135 index = wbc->range_start >> PAGE_SHIFT;
2136 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002137 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002138 range_whole = true;
2139 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002140 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002141 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002142retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002143 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002144 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002145 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002146
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002147 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2148 &wsize, &credits);
2149 if (rc)
2150 break;
Steve French37c0eb42005-10-05 14:50:29 -07002151
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002152 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002153
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002154 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2155 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002156 if (!wdata) {
2157 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002158 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002159 break;
2160 }
2161
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002162 if (found_pages == 0) {
2163 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002164 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002165 break;
2166 }
2167
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002168 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2169 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002170
2171 /* nothing to write? */
2172 if (nr_pages == 0) {
2173 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002174 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002175 continue;
2176 }
2177
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002178 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002179
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002180 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002181
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002182 /* send failure -- clean up the mess */
2183 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002184 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002185 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002186 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002187 redirty_page_for_writepage(wbc,
2188 wdata->pages[i]);
2189 else
2190 SetPageError(wdata->pages[i]);
2191 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002192 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002193 }
Jeff Layton941b8532011-01-11 07:24:01 -05002194 if (rc != -EAGAIN)
2195 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002196 }
2197 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002198
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002199 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2200 index = saved_index;
2201 continue;
2202 }
2203
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002204 wbc->nr_to_write -= nr_pages;
2205 if (wbc->nr_to_write <= 0)
2206 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002207
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002208 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002209 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002210
Steve French37c0eb42005-10-05 14:50:29 -07002211 if (!scanned && !done) {
2212 /*
2213 * We hit the last page and there is more work to be done: wrap
2214 * back to the start of the file
2215 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002216 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002217 index = 0;
2218 goto retry;
2219 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002220
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002221 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002222 mapping->writeback_index = index;
2223
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 return rc;
2225}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002227static int
2228cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002230 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002231 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002233 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002235 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002236 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002237 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002238
2239 /*
2240 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2241 *
2242 * A writepage() implementation always needs to do either this,
2243 * or re-dirty the page with "redirty_page_for_writepage()" in
2244 * the case of a failure.
2245 *
2246 * Just unlocking the page will cause the radix tree tag-bits
2247 * to fail to update with the state of the page correctly.
2248 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002249 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002250retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002251 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002252 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2253 goto retry_write;
2254 else if (rc == -EAGAIN)
2255 redirty_page_for_writepage(wbc, page);
2256 else if (rc != 0)
2257 SetPageError(page);
2258 else
2259 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002260 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002261 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002262 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 return rc;
2264}
2265
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002266static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2267{
2268 int rc = cifs_writepage_locked(page, wbc);
2269 unlock_page(page);
2270 return rc;
2271}
2272
Nick Piggind9414772008-09-24 11:32:59 -04002273static int cifs_write_end(struct file *file, struct address_space *mapping,
2274 loff_t pos, unsigned len, unsigned copied,
2275 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276{
Nick Piggind9414772008-09-24 11:32:59 -04002277 int rc;
2278 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002279 struct cifsFileInfo *cfile = file->private_data;
2280 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2281 __u32 pid;
2282
2283 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2284 pid = cfile->pid;
2285 else
2286 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
Joe Perchesf96637b2013-05-04 22:12:25 -05002288 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002289 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002290
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002291 if (PageChecked(page)) {
2292 if (copied == len)
2293 SetPageUptodate(page);
2294 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002295 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002296 SetPageUptodate(page);
2297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002299 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002300 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002301 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002302
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002303 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 /* this is probably better than directly calling
2305 partialpage_write since in this function the file handle is
2306 known which we might as well leverage */
2307 /* BB check if anything else missing out of ppw
2308 such as updating last write time */
2309 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002310 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002311 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002313
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002314 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002315 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002316 rc = copied;
2317 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002318 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 }
2320
Nick Piggind9414772008-09-24 11:32:59 -04002321 if (rc > 0) {
2322 spin_lock(&inode->i_lock);
2323 if (pos > inode->i_size)
2324 i_size_write(inode, pos);
2325 spin_unlock(&inode->i_lock);
2326 }
2327
2328 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002329 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 return rc;
2332}
2333
Josef Bacik02c24a82011-07-16 20:44:56 -04002334int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2335 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002337 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002339 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002340 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002341 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002342 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002343 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
Josef Bacik02c24a82011-07-16 20:44:56 -04002345 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2346 if (rc)
2347 return rc;
Al Viro59551022016-01-22 15:40:57 -05002348 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002349
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002350 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Al Viro35c265e2014-08-19 20:25:34 -04002352 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2353 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002354
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002355 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002356 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002357 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002358 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002359 rc = 0; /* don't care about it in fsync */
2360 }
2361 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002362
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002363 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002364 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2365 server = tcon->ses->server;
2366 if (server->ops->flush)
2367 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2368 else
2369 rc = -ENOSYS;
2370 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002371
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002372 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002373 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002374 return rc;
2375}
2376
Josef Bacik02c24a82011-07-16 20:44:56 -04002377int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002378{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002379 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002380 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002381 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002382 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002383 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002384 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002385 struct inode *inode = file->f_mapping->host;
2386
2387 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2388 if (rc)
2389 return rc;
Al Viro59551022016-01-22 15:40:57 -05002390 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002391
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002392 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002393
Al Viro35c265e2014-08-19 20:25:34 -04002394 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2395 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002396
2397 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002398 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2399 server = tcon->ses->server;
2400 if (server->ops->flush)
2401 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2402 else
2403 rc = -ENOSYS;
2404 }
Steve Frenchb298f222009-02-21 21:17:43 +00002405
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002406 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002407 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 return rc;
2409}
2410
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411/*
2412 * As file closes, flush all cached write data for this inode checking
2413 * for write behind errors.
2414 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002415int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416{
Al Viro496ad9a2013-01-23 17:07:38 -05002417 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 int rc = 0;
2419
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002420 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002421 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002422
Joe Perchesf96637b2013-05-04 22:12:25 -05002423 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
2425 return rc;
2426}
2427
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002428static int
2429cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2430{
2431 int rc = 0;
2432 unsigned long i;
2433
2434 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002435 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002436 if (!pages[i]) {
2437 /*
2438 * save number of pages we have already allocated and
2439 * return with ENOMEM error
2440 */
2441 num_pages = i;
2442 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002443 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002444 }
2445 }
2446
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002447 if (rc) {
2448 for (i = 0; i < num_pages; i++)
2449 put_page(pages[i]);
2450 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002451 return rc;
2452}
2453
2454static inline
2455size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2456{
2457 size_t num_pages;
2458 size_t clen;
2459
2460 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002461 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002462
2463 if (cur_len)
2464 *cur_len = clen;
2465
2466 return num_pages;
2467}
2468
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002469static void
Steve French4a5c80d2014-02-07 20:45:12 -06002470cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002471{
2472 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002473 struct cifs_writedata *wdata = container_of(refcount,
2474 struct cifs_writedata, refcount);
2475
2476 for (i = 0; i < wdata->nr_pages; i++)
2477 put_page(wdata->pages[i]);
2478 cifs_writedata_release(refcount);
2479}
2480
2481static void
2482cifs_uncached_writev_complete(struct work_struct *work)
2483{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002484 struct cifs_writedata *wdata = container_of(work,
2485 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002486 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002487 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2488
2489 spin_lock(&inode->i_lock);
2490 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2491 if (cifsi->server_eof > inode->i_size)
2492 i_size_write(inode, cifsi->server_eof);
2493 spin_unlock(&inode->i_lock);
2494
2495 complete(&wdata->done);
2496
Steve French4a5c80d2014-02-07 20:45:12 -06002497 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002498}
2499
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002500static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002501wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2502 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002503{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002504 size_t save_len, copied, bytes, cur_len = *len;
2505 unsigned long i, nr_pages = *num_pages;
2506
2507 save_len = cur_len;
2508 for (i = 0; i < nr_pages; i++) {
2509 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2510 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2511 cur_len -= copied;
2512 /*
2513 * If we didn't copy as much as we expected, then that
2514 * may mean we trod into an unmapped area. Stop copying
2515 * at that point. On the next pass through the big
2516 * loop, we'll likely end up getting a zero-length
2517 * write and bailing out of it.
2518 */
2519 if (copied < bytes)
2520 break;
2521 }
2522 cur_len = save_len - cur_len;
2523 *len = cur_len;
2524
2525 /*
2526 * If we have no data to send, then that probably means that
2527 * the copy above failed altogether. That's most likely because
2528 * the address in the iovec was bogus. Return -EFAULT and let
2529 * the caller free anything we allocated and bail out.
2530 */
2531 if (!cur_len)
2532 return -EFAULT;
2533
2534 /*
2535 * i + 1 now represents the number of pages we actually used in
2536 * the copy phase above.
2537 */
2538 *num_pages = i + 1;
2539 return 0;
2540}
2541
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002542static int
2543cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2544 struct cifsFileInfo *open_file,
2545 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002546{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002547 int rc = 0;
2548 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002549 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002550 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002551 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002552 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002553 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002554 struct TCP_Server_Info *server;
2555
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002556 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2557 pid = open_file->pid;
2558 else
2559 pid = current->tgid;
2560
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002561 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002562
2563 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002564 unsigned int wsize, credits;
2565
2566 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2567 &wsize, &credits);
2568 if (rc)
2569 break;
2570
2571 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002572 wdata = cifs_writedata_alloc(nr_pages,
2573 cifs_uncached_writev_complete);
2574 if (!wdata) {
2575 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002576 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002577 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002578 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002579
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002580 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2581 if (rc) {
2582 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002583 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002584 break;
2585 }
2586
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002587 num_pages = nr_pages;
2588 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2589 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002590 for (i = 0; i < nr_pages; i++)
2591 put_page(wdata->pages[i]);
2592 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002593 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002594 break;
2595 }
2596
2597 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002598 * Bring nr_pages down to the number of pages we actually used,
2599 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002600 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002601 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002602 put_page(wdata->pages[nr_pages - 1]);
2603
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002604 wdata->sync_mode = WB_SYNC_ALL;
2605 wdata->nr_pages = nr_pages;
2606 wdata->offset = (__u64)offset;
2607 wdata->cfile = cifsFileInfo_get(open_file);
2608 wdata->pid = pid;
2609 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002610 wdata->pagesz = PAGE_SIZE;
2611 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002612 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002613
2614 if (!wdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01002615 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002616 rc = server->ops->async_writev(wdata,
2617 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002618 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002619 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002620 kref_put(&wdata->refcount,
2621 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002622 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002623 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002624 iov_iter_advance(from, offset - saved_offset);
2625 continue;
2626 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002627 break;
2628 }
2629
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002630 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002631 offset += cur_len;
2632 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002633 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002634
2635 return rc;
2636}
2637
Al Viroe9d15932015-04-06 22:44:11 -04002638ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002639{
Al Viroe9d15932015-04-06 22:44:11 -04002640 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002641 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002642 struct cifsFileInfo *open_file;
2643 struct cifs_tcon *tcon;
2644 struct cifs_sb_info *cifs_sb;
2645 struct cifs_writedata *wdata, *tmp;
2646 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002647 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002648 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002649
Al Viroe9d15932015-04-06 22:44:11 -04002650 /*
2651 * BB - optimize the way when signing is disabled. We can drop this
2652 * extra memory-to-memory copying and use iovec buffers for constructing
2653 * write request.
2654 */
2655
Al Viro3309dd02015-04-09 12:55:47 -04002656 rc = generic_write_checks(iocb, from);
2657 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002658 return rc;
2659
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002660 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002661 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002662 open_file = file->private_data;
2663 tcon = tlink_tcon(open_file->tlink);
2664
2665 if (!tcon->ses->server->ops->async_writev)
2666 return -ENOSYS;
2667
Al Viro3309dd02015-04-09 12:55:47 -04002668 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2669 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002670
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002671 /*
2672 * If at least one write was successfully sent, then discard any rc
2673 * value from the later writes. If the other write succeeds, then
2674 * we'll end up returning whatever was written. If it fails, then
2675 * we'll get a new rc value from that.
2676 */
2677 if (!list_empty(&wdata_list))
2678 rc = 0;
2679
2680 /*
2681 * Wait for and collect replies for any successful sends in order of
2682 * increasing offset. Once an error is hit or we get a fatal signal
2683 * while waiting, then return without waiting for any more replies.
2684 */
2685restart_loop:
2686 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2687 if (!rc) {
2688 /* FIXME: freezable too? */
2689 rc = wait_for_completion_killable(&wdata->done);
2690 if (rc)
2691 rc = -EINTR;
2692 else if (wdata->result)
2693 rc = wdata->result;
2694 else
2695 total_written += wdata->bytes;
2696
2697 /* resend call if it's a retryable error */
2698 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002699 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002700 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002701
2702 INIT_LIST_HEAD(&tmp_list);
2703 list_del_init(&wdata->list);
2704
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002705 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002706 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002707
2708 rc = cifs_write_from_iter(wdata->offset,
2709 wdata->bytes, &tmp_from,
2710 open_file, cifs_sb, &tmp_list);
2711
2712 list_splice(&tmp_list, &wdata_list);
2713
2714 kref_put(&wdata->refcount,
2715 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002716 goto restart_loop;
2717 }
2718 }
2719 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002720 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002721 }
2722
Al Viroe9d15932015-04-06 22:44:11 -04002723 if (unlikely(!total_written))
2724 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002725
Al Viroe9d15932015-04-06 22:44:11 -04002726 iocb->ki_pos += total_written;
2727 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002728 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002729 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002730}
2731
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002732static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002733cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002734{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002735 struct file *file = iocb->ki_filp;
2736 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2737 struct inode *inode = file->f_mapping->host;
2738 struct cifsInodeInfo *cinode = CIFS_I(inode);
2739 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002740 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002741
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002742 /*
2743 * We need to hold the sem to be sure nobody modifies lock list
2744 * with a brlock that prevents writing.
2745 */
2746 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002747 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002748
Al Viro3309dd02015-04-09 12:55:47 -04002749 rc = generic_write_checks(iocb, from);
2750 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002751 goto out;
2752
Al Viro5f380c72015-04-07 11:28:12 -04002753 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002754 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002755 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002756 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002757 else
2758 rc = -EACCES;
2759out:
Al Viro59551022016-01-22 15:40:57 -05002760 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002761
Christoph Hellwige2592212016-04-07 08:52:01 -07002762 if (rc > 0)
2763 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002764 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002765 return rc;
2766}
2767
2768ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002769cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002770{
Al Viro496ad9a2013-01-23 17:07:38 -05002771 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002772 struct cifsInodeInfo *cinode = CIFS_I(inode);
2773 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2774 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2775 iocb->ki_filp->private_data;
2776 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002777 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002778
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002779 written = cifs_get_writer(cinode);
2780 if (written)
2781 return written;
2782
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002783 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002784 if (cap_unix(tcon->ses) &&
2785 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002786 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002787 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002788 goto out;
2789 }
Al Viro3dae8752014-04-03 12:05:17 -04002790 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002791 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002792 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002793 /*
2794 * For non-oplocked files in strict cache mode we need to write the data
2795 * to the server exactly from the pos to pos+len-1 rather than flush all
2796 * affected pages because it may cause a error with mandatory locks on
2797 * these pages but not on the region from pos to ppos+len-1.
2798 */
Al Viro3dae8752014-04-03 12:05:17 -04002799 written = cifs_user_writev(iocb, from);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002800 if (CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002801 /*
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002802 * We have read level caching and we have just sent a write
2803 * request to the server thus making data in the cache stale.
2804 * Zap the cache and set oplock/lease level to NONE to avoid
2805 * reading stale data from the cache. All subsequent read
2806 * operations will read new data from the server.
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002807 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002808 cifs_zap_mapping(inode);
Pavel Shilovsky776f01e2019-03-04 17:48:01 -08002809 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
Joe Perchesf96637b2013-05-04 22:12:25 -05002810 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002811 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002812 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002813out:
2814 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002815 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002816}
2817
Jeff Layton0471ca32012-05-16 07:13:16 -04002818static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002819cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002820{
2821 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002822
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002823 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2824 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002825 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002826 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002827 INIT_LIST_HEAD(&rdata->list);
2828 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002829 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002830 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002831
Jeff Layton0471ca32012-05-16 07:13:16 -04002832 return rdata;
2833}
2834
Jeff Layton6993f742012-05-16 07:13:17 -04002835void
2836cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002837{
Jeff Layton6993f742012-05-16 07:13:17 -04002838 struct cifs_readdata *rdata = container_of(refcount,
2839 struct cifs_readdata, refcount);
2840
2841 if (rdata->cfile)
2842 cifsFileInfo_put(rdata->cfile);
2843
Jeff Layton0471ca32012-05-16 07:13:16 -04002844 kfree(rdata);
2845}
2846
Jeff Layton2a1bb132012-05-16 07:13:17 -04002847static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002848cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002849{
2850 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002851 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002852 unsigned int i;
2853
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002854 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002855 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2856 if (!page) {
2857 rc = -ENOMEM;
2858 break;
2859 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002860 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002861 }
2862
2863 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002864 for (i = 0; i < nr_pages; i++) {
2865 put_page(rdata->pages[i]);
2866 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002867 }
2868 }
2869 return rc;
2870}
2871
2872static void
2873cifs_uncached_readdata_release(struct kref *refcount)
2874{
Jeff Layton1c892542012-05-16 07:13:17 -04002875 struct cifs_readdata *rdata = container_of(refcount,
2876 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002877 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002878
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002879 for (i = 0; i < rdata->nr_pages; i++) {
2880 put_page(rdata->pages[i]);
2881 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002882 }
2883 cifs_readdata_release(refcount);
2884}
2885
Jeff Layton1c892542012-05-16 07:13:17 -04002886/**
2887 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2888 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002889 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002890 *
2891 * This function copies data from a list of pages in a readdata response into
2892 * an array of iovecs. It will first calculate where the data should go
2893 * based on the info in the readdata and then copy the data into that spot.
2894 */
Al Viro7f25bba2014-02-04 14:07:43 -05002895static int
2896cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002897{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002898 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002899 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002900
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002901 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002902 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002903 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovskyc06d74d2017-01-19 13:53:15 -08002904 size_t written;
2905
2906 if (unlikely(iter->type & ITER_PIPE)) {
2907 void *addr = kmap_atomic(page);
2908
2909 written = copy_to_iter(addr, copy, iter);
2910 kunmap_atomic(addr);
2911 } else
2912 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05002913 remaining -= written;
2914 if (written < copy && iov_iter_count(iter) > 0)
2915 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002916 }
Al Viro7f25bba2014-02-04 14:07:43 -05002917 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002918}
2919
2920static void
2921cifs_uncached_readv_complete(struct work_struct *work)
2922{
2923 struct cifs_readdata *rdata = container_of(work,
2924 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002925
2926 complete(&rdata->done);
2927 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2928}
2929
2930static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002931cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2932 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002933{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002934 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002935 unsigned int i;
2936 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002937
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002938 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002939 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002940 for (i = 0; i < nr_pages; i++) {
2941 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002942 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002943
Al Viro71335662016-01-09 19:54:50 -05002944 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002945 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002946 rdata->pages[i] = NULL;
2947 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002948 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002949 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002950 }
Al Viro71335662016-01-09 19:54:50 -05002951 n = len;
2952 if (len >= PAGE_SIZE) {
2953 /* enough data to fill the page */
2954 n = PAGE_SIZE;
2955 len -= n;
2956 } else {
2957 zero_user(page, len, PAGE_SIZE - len);
2958 rdata->tailsz = len;
2959 len = 0;
2960 }
2961 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07002962 if (result < 0)
2963 break;
2964
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002965 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002966 }
2967
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002968 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2969 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002970}
2971
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002972static int
2973cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2974 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002976 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002977 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002978 size_t cur_len;
2979 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04002980 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002981 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002982
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002983 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002984
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002985 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2986 pid = open_file->pid;
2987 else
2988 pid = current->tgid;
2989
Jeff Layton1c892542012-05-16 07:13:17 -04002990 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002991 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2992 &rsize, &credits);
2993 if (rc)
2994 break;
2995
2996 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04002997 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002998
Jeff Layton1c892542012-05-16 07:13:17 -04002999 /* allocate a readdata struct */
3000 rdata = cifs_readdata_alloc(npages,
3001 cifs_uncached_readv_complete);
3002 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003003 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003004 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04003005 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003007
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003008 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04003009 if (rc)
3010 goto error;
3011
3012 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003013 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003014 rdata->offset = offset;
3015 rdata->bytes = cur_len;
3016 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003017 rdata->pagesz = PAGE_SIZE;
3018 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003019 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04003020
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003021 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003022 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003023 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003024error:
3025 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003026 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003027 kref_put(&rdata->refcount,
3028 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003029 if (rc == -EAGAIN)
3030 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003031 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032 }
Jeff Layton1c892542012-05-16 07:13:17 -04003033
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003034 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003035 offset += cur_len;
3036 len -= cur_len;
3037 } while (len > 0);
3038
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003039 return rc;
3040}
3041
3042ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3043{
3044 struct file *file = iocb->ki_filp;
3045 ssize_t rc;
3046 size_t len;
3047 ssize_t total_read = 0;
3048 loff_t offset = iocb->ki_pos;
3049 struct cifs_sb_info *cifs_sb;
3050 struct cifs_tcon *tcon;
3051 struct cifsFileInfo *open_file;
3052 struct cifs_readdata *rdata, *tmp;
3053 struct list_head rdata_list;
3054
3055 len = iov_iter_count(to);
3056 if (!len)
3057 return 0;
3058
3059 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003060 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003061 open_file = file->private_data;
3062 tcon = tlink_tcon(open_file->tlink);
3063
3064 if (!tcon->ses->server->ops->async_readv)
3065 return -ENOSYS;
3066
3067 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3068 cifs_dbg(FYI, "attempting read on write only file instance\n");
3069
3070 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3071
Jeff Layton1c892542012-05-16 07:13:17 -04003072 /* if at least one read request send succeeded, then reset rc */
3073 if (!list_empty(&rdata_list))
3074 rc = 0;
3075
Al Viroe6a7bcb2014-04-02 19:53:36 -04003076 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003077 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003078again:
Jeff Layton1c892542012-05-16 07:13:17 -04003079 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3080 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003081 /* FIXME: freezable sleep too? */
3082 rc = wait_for_completion_killable(&rdata->done);
3083 if (rc)
3084 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003085 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003086 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003087 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003088 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003089
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003090 list_del_init(&rdata->list);
3091 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003092
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003093 /*
3094 * Got a part of data and then reconnect has
3095 * happened -- fill the buffer and continue
3096 * reading.
3097 */
3098 if (got_bytes && got_bytes < rdata->bytes) {
3099 rc = cifs_readdata_to_iov(rdata, to);
3100 if (rc) {
3101 kref_put(&rdata->refcount,
3102 cifs_uncached_readdata_release);
3103 continue;
3104 }
3105 }
3106
3107 rc = cifs_send_async_read(
3108 rdata->offset + got_bytes,
3109 rdata->bytes - got_bytes,
3110 rdata->cfile, cifs_sb,
3111 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003112
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003113 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003114
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003115 kref_put(&rdata->refcount,
3116 cifs_uncached_readdata_release);
3117 goto again;
3118 } else if (rdata->result)
3119 rc = rdata->result;
3120 else
Jeff Layton1c892542012-05-16 07:13:17 -04003121 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003122
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003123 /* if there was a short read -- discard anything left */
3124 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3125 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003126 }
3127 list_del_init(&rdata->list);
3128 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003130
Al Viroe6a7bcb2014-04-02 19:53:36 -04003131 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003132
Jeff Layton1c892542012-05-16 07:13:17 -04003133 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003134
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003135 /* mask nodata case */
3136 if (rc == -ENODATA)
3137 rc = 0;
3138
Al Viro0165e812014-02-04 14:19:48 -05003139 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003140 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003141 return total_read;
3142 }
3143 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003144}
3145
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003146ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003147cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003148{
Al Viro496ad9a2013-01-23 17:07:38 -05003149 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003150 struct cifsInodeInfo *cinode = CIFS_I(inode);
3151 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3152 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3153 iocb->ki_filp->private_data;
3154 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3155 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003156
3157 /*
3158 * In strict cache mode we need to read from the server all the time
3159 * if we don't have level II oplock because the server can delay mtime
3160 * change - so we can't make a decision about inode invalidating.
3161 * And we can also fail with pagereading if there are mandatory locks
3162 * on pages affected by this read but not on the region from pos to
3163 * pos+len-1.
3164 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003165 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003166 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003167
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003168 if (cap_unix(tcon->ses) &&
3169 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3170 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003171 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003172
3173 /*
3174 * We need to hold the sem to be sure nobody modifies lock list
3175 * with a brlock that prevents reading.
3176 */
3177 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003178 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003179 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003180 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003181 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003182 up_read(&cinode->lock_sem);
3183 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003184}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003186static ssize_t
3187cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188{
3189 int rc = -EACCES;
3190 unsigned int bytes_read = 0;
3191 unsigned int total_read;
3192 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003193 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003195 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003196 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003197 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003198 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003200 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003201 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003202 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003204 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003205 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003207 /* FIXME: set up handlers for larger reads and/or convert to async */
3208 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3209
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303211 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003212 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303213 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003215 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003216 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003217 server = tcon->ses->server;
3218
3219 if (!server->ops->sync_read) {
3220 free_xid(xid);
3221 return -ENOSYS;
3222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003224 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3225 pid = open_file->pid;
3226 else
3227 pid = current->tgid;
3228
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003230 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003232 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3233 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003234 do {
3235 current_read_size = min_t(uint, read_size - total_read,
3236 rsize);
3237 /*
3238 * For windows me and 9x we do not want to request more
3239 * than it negotiated since it will refuse the read
3240 * then.
3241 */
3242 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003243 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003244 current_read_size = min_t(uint,
3245 current_read_size, CIFSMaxBufSize);
3246 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003247 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003248 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003249 if (rc != 0)
3250 break;
3251 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003252 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003253 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003254 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003255 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003256 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003257 &bytes_read, &cur_offset,
3258 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003259 } while (rc == -EAGAIN);
3260
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 if (rc || (bytes_read == 0)) {
3262 if (total_read) {
3263 break;
3264 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003265 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266 return rc;
3267 }
3268 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003269 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003270 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 }
3272 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003273 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 return total_read;
3275}
3276
Jeff Laytonca83ce32011-04-12 09:13:44 -04003277/*
3278 * If the page is mmap'ed into a process' page tables, then we need to make
3279 * sure that it doesn't change while being written back.
3280 */
3281static int
3282cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3283{
3284 struct page *page = vmf->page;
3285
3286 lock_page(page);
3287 return VM_FAULT_LOCKED;
3288}
3289
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003290static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003291 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003292 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003293 .page_mkwrite = cifs_page_mkwrite,
3294};
3295
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003296int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3297{
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003298 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003299 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003300
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003301 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003302
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003303 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003304 rc = cifs_zap_mapping(inode);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003305 if (!rc)
3306 rc = generic_file_mmap(file, vma);
3307 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003308 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003309
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003310 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003311 return rc;
3312}
3313
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3315{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316 int rc, xid;
3317
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003318 xid = get_xid();
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003319
Jeff Laytonabab0952010-02-12 07:44:18 -05003320 rc = cifs_revalidate_file(file);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003321 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003322 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3323 rc);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003324 if (!rc)
3325 rc = generic_file_mmap(file, vma);
3326 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003327 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003328
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003329 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 return rc;
3331}
3332
Jeff Layton0471ca32012-05-16 07:13:16 -04003333static void
3334cifs_readv_complete(struct work_struct *work)
3335{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003336 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003337 struct cifs_readdata *rdata = container_of(work,
3338 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003339
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003340 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003341 for (i = 0; i < rdata->nr_pages; i++) {
3342 struct page *page = rdata->pages[i];
3343
Jeff Layton0471ca32012-05-16 07:13:16 -04003344 lru_cache_add_file(page);
3345
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003346 if (rdata->result == 0 ||
3347 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003348 flush_dcache_page(page);
3349 SetPageUptodate(page);
3350 }
3351
3352 unlock_page(page);
3353
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003354 if (rdata->result == 0 ||
3355 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003356 cifs_readpage_to_fscache(rdata->mapping->host, page);
3357
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003358 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003359
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003360 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003361 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003362 }
Jeff Layton6993f742012-05-16 07:13:17 -04003363 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003364}
3365
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003366static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003367cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3368 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003369{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003370 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003371 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003372 u64 eof;
3373 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003374 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003375
3376 /* determine the eof that the server (probably) has */
3377 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003378 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003379 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003380
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003381 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003382 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003383 for (i = 0; i < nr_pages; i++) {
3384 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003385 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003386
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003387 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003388 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003389 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003390 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003391 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003392 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003393 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003394 } else if (page->index > eof_index) {
3395 /*
3396 * The VFS will not try to do readahead past the
3397 * i_size, but it's possible that we have outstanding
3398 * writes with gaps in the middle and the i_size hasn't
3399 * caught up yet. Populate those with zeroed out pages
3400 * to prevent the VFS from repeatedly attempting to
3401 * fill them until the writes are flushed.
3402 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003403 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003404 lru_cache_add_file(page);
3405 flush_dcache_page(page);
3406 SetPageUptodate(page);
3407 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003408 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003409 rdata->pages[i] = NULL;
3410 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003411 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003412 } else {
3413 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003414 lru_cache_add_file(page);
3415 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003416 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003417 rdata->pages[i] = NULL;
3418 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003419 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003420 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003421
Al Viro71335662016-01-09 19:54:50 -05003422 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003423 if (result < 0)
3424 break;
3425
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003426 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003427 }
3428
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003429 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3430 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003431}
3432
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003433static int
3434readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3435 unsigned int rsize, struct list_head *tmplist,
3436 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3437{
3438 struct page *page, *tpage;
3439 unsigned int expected_index;
3440 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003441 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003442
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003443 INIT_LIST_HEAD(tmplist);
3444
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003445 page = list_entry(page_list->prev, struct page, lru);
3446
3447 /*
3448 * Lock the page and put it in the cache. Since no one else
3449 * should have access to this page, we're safe to simply set
3450 * PG_locked without checking it first.
3451 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003452 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003453 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003454 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003455
3456 /* give up if we can't stick it in the cache */
3457 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003458 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003459 return rc;
3460 }
3461
3462 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003463 *offset = (loff_t)page->index << PAGE_SHIFT;
3464 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003465 *nr_pages = 1;
3466 list_move_tail(&page->lru, tmplist);
3467
3468 /* now try and add more pages onto the request */
3469 expected_index = page->index + 1;
3470 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3471 /* discontinuity ? */
3472 if (page->index != expected_index)
3473 break;
3474
3475 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003476 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003477 break;
3478
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003479 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003480 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003481 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003482 break;
3483 }
3484 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003485 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003486 expected_index++;
3487 (*nr_pages)++;
3488 }
3489 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490}
3491
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492static int cifs_readpages(struct file *file, struct address_space *mapping,
3493 struct list_head *page_list, unsigned num_pages)
3494{
Jeff Layton690c5e32011-10-19 15:30:16 -04003495 int rc;
3496 struct list_head tmplist;
3497 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003498 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003499 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003500 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501
Jeff Layton690c5e32011-10-19 15:30:16 -04003502 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303503 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3504 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003505 *
3506 * After this point, every page in the list might have PG_fscache set,
3507 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303508 */
3509 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3510 &num_pages);
3511 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003512 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303513
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003514 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3515 pid = open_file->pid;
3516 else
3517 pid = current->tgid;
3518
Jeff Layton690c5e32011-10-19 15:30:16 -04003519 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003520 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
Joe Perchesf96637b2013-05-04 22:12:25 -05003522 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3523 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003524
3525 /*
3526 * Start with the page at end of list and move it to private
3527 * list. Do the same with any following pages until we hit
3528 * the rsize limit, hit an index discontinuity, or run out of
3529 * pages. Issue the async read and then start the loop again
3530 * until the list is empty.
3531 *
3532 * Note that list order is important. The page_list is in
3533 * the order of declining indexes. When we put the pages in
3534 * the rdata->pages, then we want them in increasing order.
3535 */
3536 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003537 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003538 loff_t offset;
3539 struct page *page, *tpage;
3540 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003541 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003543 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3544 &rsize, &credits);
3545 if (rc)
3546 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547
Jeff Layton690c5e32011-10-19 15:30:16 -04003548 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003549 * Give up immediately if rsize is too small to read an entire
3550 * page. The VFS will fall back to readpage. We should never
3551 * reach this point however since we set ra_pages to 0 when the
3552 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003553 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003554 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003555 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003556 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003559 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3560 &nr_pages, &offset, &bytes);
3561 if (rc) {
3562 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003564 }
3565
Jeff Layton0471ca32012-05-16 07:13:16 -04003566 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003567 if (!rdata) {
3568 /* best to give up if we're out of mem */
3569 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3570 list_del(&page->lru);
3571 lru_cache_add_file(page);
3572 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003573 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003574 }
3575 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003576 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003577 break;
3578 }
3579
Jeff Layton6993f742012-05-16 07:13:17 -04003580 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003581 rdata->mapping = mapping;
3582 rdata->offset = offset;
3583 rdata->bytes = bytes;
3584 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003585 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003586 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003587 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003588
3589 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3590 list_del(&page->lru);
3591 rdata->pages[rdata->nr_pages++] = page;
3592 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003593
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003594 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003595 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003596 rc = server->ops->async_readv(rdata);
3597 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003598 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003599 for (i = 0; i < rdata->nr_pages; i++) {
3600 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003601 lru_cache_add_file(page);
3602 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003603 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003605 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003606 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 break;
3608 }
Jeff Layton6993f742012-05-16 07:13:17 -04003609
3610 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 }
3612
David Howells54afa992013-09-04 17:10:39 +00003613 /* Any pages that have been shown to fscache but didn't get added to
3614 * the pagecache must be uncached before they get returned to the
3615 * allocator.
3616 */
3617 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 return rc;
3619}
3620
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003621/*
3622 * cifs_readpage_worker must be called with the page pinned
3623 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624static int cifs_readpage_worker(struct file *file, struct page *page,
3625 loff_t *poffset)
3626{
3627 char *read_data;
3628 int rc;
3629
Suresh Jayaraman56698232010-07-05 18:13:25 +05303630 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003631 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303632 if (rc == 0)
3633 goto read_complete;
3634
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635 read_data = kmap(page);
3636 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003637
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003638 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003639
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 if (rc < 0)
3641 goto io_error;
3642 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003643 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003644
Al Viro496ad9a2013-01-23 17:07:38 -05003645 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003646 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003647
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003648 if (PAGE_SIZE > rc)
3649 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650
3651 flush_dcache_page(page);
3652 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303653
3654 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003655 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303656
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003658
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003660 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003661 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303662
3663read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 return rc;
3665}
3666
3667static int cifs_readpage(struct file *file, struct page *page)
3668{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003669 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003670 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003671 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003672
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003673 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674
3675 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303676 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003677 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303678 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679 }
3680
Joe Perchesf96637b2013-05-04 22:12:25 -05003681 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003682 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683
3684 rc = cifs_readpage_worker(file, page, &offset);
3685
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003686 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687 return rc;
3688}
3689
Steve Frencha403a0a2007-07-26 15:54:16 +00003690static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3691{
3692 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003693 struct cifs_tcon *tcon =
3694 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003695
Steve French3afca262016-09-22 18:58:16 -05003696 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003697 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003698 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003699 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003700 return 1;
3701 }
3702 }
Steve French3afca262016-09-22 18:58:16 -05003703 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003704 return 0;
3705}
3706
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707/* We do not want to update the file size from server for inodes
3708 open for write - to avoid races with writepage extending
3709 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003710 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 but this is tricky to do without racing with writebehind
3712 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003713bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714{
Steve Frencha403a0a2007-07-26 15:54:16 +00003715 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003716 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003717
Steve Frencha403a0a2007-07-26 15:54:16 +00003718 if (is_inode_writable(cifsInode)) {
3719 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003720 struct cifs_sb_info *cifs_sb;
3721
Steve Frenchc32a0b62006-01-12 14:41:28 -08003722 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003723 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003724 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003725 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003726 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003727 }
3728
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003729 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003730 return true;
Steve French7ba52632007-02-08 18:14:13 +00003731
Steve French4b18f2a2008-04-29 00:06:05 +00003732 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003733 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003734 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735}
3736
Nick Piggind9414772008-09-24 11:32:59 -04003737static int cifs_write_begin(struct file *file, struct address_space *mapping,
3738 loff_t pos, unsigned len, unsigned flags,
3739 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003741 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003742 pgoff_t index = pos >> PAGE_SHIFT;
3743 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003744 loff_t page_start = pos & PAGE_MASK;
3745 loff_t i_size;
3746 struct page *page;
3747 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748
Joe Perchesf96637b2013-05-04 22:12:25 -05003749 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003750
Sachin Prabhu466bd312013-09-13 14:11:57 +01003751start:
Nick Piggin54566b22009-01-04 12:00:53 -08003752 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003753 if (!page) {
3754 rc = -ENOMEM;
3755 goto out;
3756 }
Nick Piggind9414772008-09-24 11:32:59 -04003757
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003758 if (PageUptodate(page))
3759 goto out;
Steve French8a236262007-03-06 00:31:00 +00003760
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003761 /*
3762 * If we write a full page it will be up to date, no need to read from
3763 * the server. If the write is short, we'll end up doing a sync write
3764 * instead.
3765 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003766 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003767 goto out;
3768
3769 /*
3770 * optimize away the read when we have an oplock, and we're not
3771 * expecting to use any of the data we'd be reading in. That
3772 * is, when the page lies beyond the EOF, or straddles the EOF
3773 * and the write will cover all of the existing data.
3774 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003775 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003776 i_size = i_size_read(mapping->host);
3777 if (page_start >= i_size ||
3778 (offset == 0 && (pos + len) >= i_size)) {
3779 zero_user_segments(page, 0, offset,
3780 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003781 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003782 /*
3783 * PageChecked means that the parts of the page
3784 * to which we're not writing are considered up
3785 * to date. Once the data is copied to the
3786 * page, it can be set uptodate.
3787 */
3788 SetPageChecked(page);
3789 goto out;
3790 }
3791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792
Sachin Prabhu466bd312013-09-13 14:11:57 +01003793 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003794 /*
3795 * might as well read a page, it is fast enough. If we get
3796 * an error, we don't need to return it. cifs_write_end will
3797 * do a sync write instead since PG_uptodate isn't set.
3798 */
3799 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003800 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003801 oncethru = 1;
3802 goto start;
Steve French8a236262007-03-06 00:31:00 +00003803 } else {
3804 /* we could try using another file handle if there is one -
3805 but how would we lock it to prevent close of that handle
3806 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003807 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003808 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003809out:
3810 *pagep = page;
3811 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003812}
3813
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303814static int cifs_release_page(struct page *page, gfp_t gfp)
3815{
3816 if (PagePrivate(page))
3817 return 0;
3818
3819 return cifs_fscache_release_page(page, gfp);
3820}
3821
Lukas Czernerd47992f2013-05-21 23:17:23 -04003822static void cifs_invalidate_page(struct page *page, unsigned int offset,
3823 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303824{
3825 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3826
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003827 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303828 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3829}
3830
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003831static int cifs_launder_page(struct page *page)
3832{
3833 int rc = 0;
3834 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003835 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003836 struct writeback_control wbc = {
3837 .sync_mode = WB_SYNC_ALL,
3838 .nr_to_write = 0,
3839 .range_start = range_start,
3840 .range_end = range_end,
3841 };
3842
Joe Perchesf96637b2013-05-04 22:12:25 -05003843 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003844
3845 if (clear_page_dirty_for_io(page))
3846 rc = cifs_writepage_locked(page, &wbc);
3847
3848 cifs_fscache_invalidate_page(page, page->mapping->host);
3849 return rc;
3850}
3851
Tejun Heo9b646972010-07-20 22:09:02 +02003852void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003853{
3854 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3855 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003856 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003857 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003858 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003859 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003860 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003861
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003862 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003863 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003864
3865 server->ops->downgrade_oplock(server, cinode,
3866 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3867
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003868 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003869 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003870 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3871 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003872 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003873 }
3874
Jeff Layton3bc303c2009-09-21 06:47:50 -04003875 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003876 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003877 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003878 else
Al Viro8737c932009-12-24 06:47:55 -05003879 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003880 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003881 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003882 rc = filemap_fdatawait(inode->i_mapping);
3883 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003884 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003885 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003886 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003887 }
3888
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003889 rc = cifs_push_locks(cfile);
3890 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003891 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003892
Jeff Layton3bc303c2009-09-21 06:47:50 -04003893 /*
3894 * releasing stale oplock after recent reconnect of smb session using
3895 * a now incorrect file handle is not a data integrity issue but do
3896 * not bother sending an oplock release if session to server still is
3897 * disconnected since oplock already released by the server
3898 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003899 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003900 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3901 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003902 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003903 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003904 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003905}
3906
Steve Frenchdca69282013-11-11 16:42:37 -06003907/*
3908 * The presence of cifs_direct_io() in the address space ops vector
3909 * allowes open() O_DIRECT flags which would have failed otherwise.
3910 *
3911 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3912 * so this method should never be called.
3913 *
3914 * Direct IO is not yet supported in the cached mode.
3915 */
3916static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003917cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003918{
3919 /*
3920 * FIXME
3921 * Eventually need to support direct IO for non forcedirectio mounts
3922 */
3923 return -EINVAL;
3924}
3925
3926
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003927const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 .readpage = cifs_readpage,
3929 .readpages = cifs_readpages,
3930 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003931 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003932 .write_begin = cifs_write_begin,
3933 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303935 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003936 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303937 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003938 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003940
3941/*
3942 * cifs_readpages requires the server to support a buffer large enough to
3943 * contain the header plus one complete page of data. Otherwise, we need
3944 * to leave cifs_readpages out of the address space operations.
3945 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003946const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003947 .readpage = cifs_readpage,
3948 .writepage = cifs_writepage,
3949 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003950 .write_begin = cifs_write_begin,
3951 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003952 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303953 .releasepage = cifs_release_page,
3954 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003955 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003956};