blob: 49eeed25f200942cf43ba1c8c48501b8de58e18c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Steve French18a89a12017-09-22 01:40:27 -0500227 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
228 if (f_flags & O_SYNC)
229 create_options |= CREATE_WRITE_THROUGH;
230
231 if (f_flags & O_DIRECT)
232 create_options |= CREATE_NO_BUFFER;
233
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400234 oparms.tcon = tcon;
235 oparms.cifs_sb = cifs_sb;
236 oparms.desired_access = desired_access;
237 oparms.create_options = create_options;
238 oparms.disposition = disposition;
239 oparms.path = full_path;
240 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400241 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400242
243 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300244
245 if (rc)
246 goto out;
247
248 if (tcon->unix_ext)
249 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
250 xid);
251 else
252 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600253 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300254
255out:
256 kfree(buf);
257 return rc;
258}
259
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400260static bool
261cifs_has_mand_locks(struct cifsInodeInfo *cinode)
262{
263 struct cifs_fid_locks *cur;
264 bool has_locks = false;
265
266 down_read(&cinode->lock_sem);
267 list_for_each_entry(cur, &cinode->llist, llist) {
268 if (!list_empty(&cur->locks)) {
269 has_locks = true;
270 break;
271 }
272 }
273 up_read(&cinode->lock_sem);
274 return has_locks;
275}
276
Jeff Layton15ecb432010-10-15 15:34:02 -0400277struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700278cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400279 struct tcon_link *tlink, __u32 oplock)
280{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500281 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000282 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 struct cifsInodeInfo *cinode = CIFS_I(inode);
284 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700285 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700286 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400287 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400288
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700289 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
290 if (cfile == NULL)
291 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400292
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700293 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
294 if (!fdlocks) {
295 kfree(cfile);
296 return NULL;
297 }
298
299 INIT_LIST_HEAD(&fdlocks->locks);
300 fdlocks->cfile = cfile;
301 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700302 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700303 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700304 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700305
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700306 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 cfile->pid = current->tgid;
308 cfile->uid = current_fsuid();
309 cfile->dentry = dget(dentry);
310 cfile->f_flags = file->f_flags;
311 cfile->invalidHandle = false;
312 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700313 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700314 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500315 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400316
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100317 cifs_sb_active(inode->i_sb);
318
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 /*
320 * If the server returned a read oplock and we have mandatory brlocks,
321 * set oplock level to None.
322 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400323 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500324 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400325 oplock = 0;
326 }
327
Steve French3afca262016-09-22 18:58:16 -0500328 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400329 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700330 oplock = fid->pending_open->oplock;
331 list_del(&fid->pending_open->olist);
332
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400333 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400334 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700335
336 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500337
Jeff Layton15ecb432010-10-15 15:34:02 -0400338 /* if readable file instance put first in list*/
339 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700340 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400341 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700342 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500343 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400344
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400345 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400346 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400347
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700348 file->private_data = cfile;
349 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400350}
351
Jeff Layton764a1b12012-07-25 14:59:54 -0400352struct cifsFileInfo *
353cifsFileInfo_get(struct cifsFileInfo *cifs_file)
354{
Steve French3afca262016-09-22 18:58:16 -0500355 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400356 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500357 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400358 return cifs_file;
359}
360
Steve Frenchcdff08e2010-10-21 22:46:14 +0000361/*
362 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400363 * the filehandle out on the server. Must be called without holding
Steve French3afca262016-09-22 18:58:16 -0500364 * tcon->open_file_lock and cifs_file->file_info_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000365 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400366void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
367{
David Howells2b0143b2015-03-17 22:25:59 +0000368 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000369 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700370 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300371 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100372 struct super_block *sb = inode->i_sb;
373 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000374 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700375 struct cifs_fid fid;
376 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000377 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000378
Steve French3afca262016-09-22 18:58:16 -0500379 spin_lock(&tcon->open_file_lock);
380
381 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400382 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500383 spin_unlock(&cifs_file->file_info_lock);
384 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000385 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400386 }
Steve French3afca262016-09-22 18:58:16 -0500387 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000388
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700389 if (server->ops->get_lease_key)
390 server->ops->get_lease_key(inode, &fid);
391
392 /* store open in pending opens to make sure we don't miss lease break */
393 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
394
Steve Frenchcdff08e2010-10-21 22:46:14 +0000395 /* remove it from the lists */
396 list_del(&cifs_file->flist);
397 list_del(&cifs_file->tlist);
398
399 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500400 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000401 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700402 /*
403 * In strict cache mode we need invalidate mapping on the last
404 * close because it may cause a error when we open this file
405 * again and get at least level II oplock.
406 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300407 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400408 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300409 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000410 }
Steve French3afca262016-09-22 18:58:16 -0500411
412 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000414 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
Jeff Laytonad635942011-07-26 12:20:17 -0400415
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700417 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400418 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700419
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400420 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700421 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400422 server->ops->close(xid, tcon, &cifs_file->fid);
423 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000424 }
425
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000426 if (oplock_break_cancelled)
427 cifs_done_oplock_break(cifsi);
428
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700429 cifs_del_pending_open(&open);
430
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700431 /*
432 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000433 * is closed anyway.
434 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700435 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700436 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000437 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400438 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000439 kfree(li);
440 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700441 list_del(&cifs_file->llist->llist);
442 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700443 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000444
445 cifs_put_tlink(cifs_file->tlink);
446 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100447 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000448 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400449}
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
454 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400455 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400456 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700458 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000459 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400460 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700461 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300463 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700464 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700465 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400467 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400470 tlink = cifs_sb_tlink(cifs_sb);
471 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400472 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400473 return PTR_ERR(tlink);
474 }
475 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700476 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500478 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530480 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400481 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 }
483
Joe Perchesf96637b2013-05-04 22:12:25 -0500484 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000485 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000486
Namjae Jeon787aded2014-08-22 14:22:51 +0900487 if (file->f_flags & O_DIRECT &&
488 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
489 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
490 file->f_op = &cifs_file_direct_nobrl_ops;
491 else
492 file->f_op = &cifs_file_direct_ops;
493 }
494
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700495 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000496 oplock = REQ_OPLOCK;
497 else
498 oplock = 0;
499
Steve French64cc2c62009-03-04 19:54:08 +0000500 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400501 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
502 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000503 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400504 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000505 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700506 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000507 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500508 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300509 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000510 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
511 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500512 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
513 tcon->ses->serverName,
514 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000515 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000516 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
517 (rc != -EOPNOTSUPP)) /* path not found or net err */
518 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700519 /*
520 * Else fallthrough to retry open the old way on network i/o
521 * or DFS errors.
522 */
Steve French276a74a2009-03-03 18:00:34 +0000523 }
524
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700525 if (server->ops->get_lease_key)
526 server->ops->get_lease_key(inode, &fid);
527
528 cifs_add_pending_open(&fid, tlink, &open);
529
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300530 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700531 if (server->ops->get_lease_key)
532 server->ops->get_lease_key(inode, &fid);
533
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300534 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700535 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700536 if (rc) {
537 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300538 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700539 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300540 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400541
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700542 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
543 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700544 if (server->ops->close)
545 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700546 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 rc = -ENOMEM;
548 goto out;
549 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530551 cifs_fscache_set_inode_cookie(inode, file);
552
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300553 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700554 /*
555 * Time to set mode which we can not set earlier due to
556 * problems creating new read-only files.
557 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300558 struct cifs_unix_set_info_args args = {
559 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800560 .uid = INVALID_UID, /* no change */
561 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300562 .ctime = NO_CHANGE_64,
563 .atime = NO_CHANGE_64,
564 .mtime = NO_CHANGE_64,
565 .device = 0,
566 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700567 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
568 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
570
571out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400573 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400574 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 return rc;
576}
577
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400578static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
579
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700580/*
581 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400582 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400584static int
585cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400587 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000588 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400589 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 int rc = 0;
591
Rabin Vincent6ed24ef2017-05-03 17:17:21 +0200592 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400593 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400594 /* can cache locks - no need to relock */
595 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400596 return rc;
597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400599 if (cap_unix(tcon->ses) &&
600 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
601 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
602 rc = cifs_push_posix_locks(cfile);
603 else
604 rc = tcon->ses->server->ops->push_mand_locks(cfile);
605
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400606 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return rc;
608}
609
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610static int
611cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612{
613 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400614 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400615 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000617 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700618 struct TCP_Server_Info *server;
619 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000620 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700622 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500624 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400625 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400627 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700628 mutex_lock(&cfile->fh_mutex);
629 if (!cfile->invalidHandle) {
630 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530631 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400632 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530633 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 }
635
David Howells2b0143b2015-03-17 22:25:59 +0000636 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700638 tcon = tlink_tcon(cfile->tlink);
639 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000640
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700641 /*
642 * Can not grab rename sem here because various ops, including those
643 * that already have the rename sem can end up causing writepage to get
644 * called and if the server was down that means we end up here, and we
645 * can never tell if the caller already has the rename_sem.
646 */
647 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000649 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700650 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400651 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000652 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 }
654
Joe Perchesf96637b2013-05-04 22:12:25 -0500655 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
656 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300658 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 oplock = REQ_OPLOCK;
660 else
Steve French4b18f2a2008-04-29 00:06:05 +0000661 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400663 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000664 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400665 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400666 /*
667 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
668 * original open. Must mask them off for a reopen.
669 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700670 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400671 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400672
Jeff Layton2422f672010-06-16 13:40:16 -0400673 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400675 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000676 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500677 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200678 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000679 goto reopen_success;
680 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700681 /*
682 * fallthrough to retry open the old way on errors, especially
683 * in the reconnect path it is important to retry hard
684 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000685 }
686
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700687 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000688
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500689 if (backup_cred(cifs_sb))
690 create_options |= CREATE_OPEN_BACKUP_INTENT;
691
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700692 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400693 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700694
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400695 oparms.tcon = tcon;
696 oparms.cifs_sb = cifs_sb;
697 oparms.desired_access = desired_access;
698 oparms.create_options = create_options;
699 oparms.disposition = disposition;
700 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400701 oparms.fid = &cfile->fid;
702 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400703
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700704 /*
705 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400706 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 * file might have write behind data that needs to be flushed and server
708 * version of file size can be stale. If we knew for sure that inode was
709 * not dirty locally we could do this.
710 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400711 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400712 if (rc == -ENOENT && oparms.reconnect == false) {
713 /* durable handle timeout is expired - open the file again */
714 rc = server->ops->open(xid, &oparms, &oplock, NULL);
715 /* indicate that we need to relock the file */
716 oparms.reconnect = true;
717 }
718
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700720 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500721 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
722 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400723 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 }
Jeff Layton15886172010-10-15 15:33:59 -0400725
726reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700727 cfile->invalidHandle = false;
728 mutex_unlock(&cfile->fh_mutex);
729 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400730
731 if (can_flush) {
732 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400733 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400734
Jeff Layton15886172010-10-15 15:33:59 -0400735 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700736 rc = cifs_get_inode_info_unix(&inode, full_path,
737 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400738 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700739 rc = cifs_get_inode_info(&inode, full_path, NULL,
740 inode->i_sb, xid, NULL);
741 }
742 /*
743 * Else we are writing out data to server already and could deadlock if
744 * we tried to flush data, and since we do not know if we have data that
745 * would invalidate the current end of file on the server we can not go
746 * to the server to get the new inode info.
747 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300748
Pavel Shilovskyde740252016-10-11 15:34:07 -0700749 /*
750 * If the server returned a read oplock and we have mandatory brlocks,
751 * set oplock level to None.
752 */
753 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
754 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
755 oplock = 0;
756 }
757
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400758 server->ops->set_fid(cfile, &cfile->fid, oplock);
759 if (oparms.reconnect)
760 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400761
762reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400764 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 return rc;
766}
767
768int cifs_close(struct inode *inode, struct file *file)
769{
Jeff Layton77970692011-04-05 16:23:47 -0700770 if (file->private_data != NULL) {
771 cifsFileInfo_put(file->private_data);
772 file->private_data = NULL;
773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Steve Frenchcdff08e2010-10-21 22:46:14 +0000775 /* return code from the ->release op is always ignored */
776 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
Steve French52ace1e2016-09-22 19:23:56 -0500779void
780cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
781{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700782 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500783 struct list_head *tmp;
784 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700785 struct list_head tmp_list;
786
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800787 if (!tcon->use_persistent || !tcon->need_reopen_files)
788 return;
789
790 tcon->need_reopen_files = false;
791
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700792 cifs_dbg(FYI, "Reopen persistent handles");
793 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500794
795 /* list all files open on tree connection, reopen resilient handles */
796 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700797 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500798 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700799 if (!open_file->invalidHandle)
800 continue;
801 cifsFileInfo_get(open_file);
802 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500803 }
804 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700805
806 list_for_each_safe(tmp, tmp1, &tmp_list) {
807 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
Pavel Shilovsky46890ff2016-11-29 11:31:23 -0800808 if (cifs_reopen_file(open_file, false /* do not flush */))
809 tcon->need_reopen_files = true;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700810 list_del_init(&open_file->rlist);
811 cifsFileInfo_put(open_file);
812 }
Steve French52ace1e2016-09-22 19:23:56 -0500813}
814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815int cifs_closedir(struct inode *inode, struct file *file)
816{
817 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400818 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700819 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700820 struct cifs_tcon *tcon;
821 struct TCP_Server_Info *server;
822 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
Joe Perchesf96637b2013-05-04 22:12:25 -0500824 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700826 if (cfile == NULL)
827 return rc;
828
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400829 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700830 tcon = tlink_tcon(cfile->tlink);
831 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
Joe Perchesf96637b2013-05-04 22:12:25 -0500833 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500834 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400835 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700836 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500837 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700838 if (server->ops->close_dir)
839 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
840 else
841 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500842 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700843 /* not much we can do if it fails anyway, ignore rc */
844 rc = 0;
845 } else
Steve French3afca262016-09-22 18:58:16 -0500846 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700848 buf = cfile->srch_inf.ntwrk_buf_start;
849 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500850 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700851 cfile->srch_inf.ntwrk_buf_start = NULL;
852 if (cfile->srch_inf.smallBuf)
853 cifs_small_buf_release(buf);
854 else
855 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700857
858 cifs_put_tlink(cfile->tlink);
859 kfree(file->private_data);
860 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400862 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 return rc;
864}
865
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400866static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300867cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000868{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400869 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000870 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400871 if (!lock)
872 return lock;
873 lock->offset = offset;
874 lock->length = length;
875 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400876 lock->pid = current->tgid;
877 INIT_LIST_HEAD(&lock->blist);
878 init_waitqueue_head(&lock->block_q);
879 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400880}
881
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700882void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883cifs_del_lock_waiters(struct cifsLockInfo *lock)
884{
885 struct cifsLockInfo *li, *tmp;
886 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
887 list_del_init(&li->blist);
888 wake_up(&li->block_q);
889 }
890}
891
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400892#define CIFS_LOCK_OP 0
893#define CIFS_READ_OP 1
894#define CIFS_WRITE_OP 2
895
896/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400897static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700898cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
899 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400900 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400901{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300902 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700903 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300904 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400905
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700906 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907 if (offset + length <= li->offset ||
908 offset >= li->offset + li->length)
909 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400910 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
911 server->ops->compare_fids(cfile, cur_cfile)) {
912 /* shared lock prevents write op through the same fid */
913 if (!(li->type & server->vals->shared_lock_type) ||
914 rw_check != CIFS_WRITE_OP)
915 continue;
916 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700917 if ((type & server->vals->shared_lock_type) &&
918 ((server->ops->compare_fids(cfile, cur_cfile) &&
919 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700921 if (conf_lock)
922 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700923 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400924 }
925 return false;
926}
927
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700928bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300929cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700930 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400931 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400932{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300933 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700934 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000935 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300936
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700937 list_for_each_entry(cur, &cinode->llist, llist) {
938 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700939 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300940 if (rc)
941 break;
942 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300943
944 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400945}
946
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300947/*
948 * Check if there is another lock that prevents us to set the lock (mandatory
949 * style). If such a lock exists, update the flock structure with its
950 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
951 * or leave it the same if we can't. Returns 0 if we don't need to request to
952 * the server or 1 otherwise.
953 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400954static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300955cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
956 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400957{
958 int rc = 0;
959 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000960 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300961 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400962 bool exist;
963
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700964 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400965
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300966 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400967 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400968 if (exist) {
969 flock->fl_start = conf_lock->offset;
970 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
971 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300972 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400973 flock->fl_type = F_RDLCK;
974 else
975 flock->fl_type = F_WRLCK;
976 } else if (!cinode->can_cache_brlcks)
977 rc = 1;
978 else
979 flock->fl_type = F_UNLCK;
980
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700981 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400982 return rc;
983}
984
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400985static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300986cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987{
David Howells2b0143b2015-03-17 22:25:59 +0000988 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700989 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700990 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700991 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000992}
993
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300994/*
995 * Set the byte-range lock (mandatory style). Returns:
996 * 1) 0, if we set the lock and don't need to request to the server;
997 * 2) 1, if no locks prevent us but we need to request to the server;
998 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
999 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001000static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001001cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001002 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001003{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001004 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +00001005 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001006 bool exist;
1007 int rc = 0;
1008
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001009try_again:
1010 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001011 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001013 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001014 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001016 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001017 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001018 return rc;
1019 }
1020
1021 if (!exist)
1022 rc = 1;
1023 else if (!wait)
1024 rc = -EACCES;
1025 else {
1026 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001027 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028 rc = wait_event_interruptible(lock->block_q,
1029 (lock->blist.prev == &lock->blist) &&
1030 (lock->blist.next == &lock->blist));
1031 if (!rc)
1032 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001033 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001034 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035 }
1036
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001037 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001038 return rc;
1039}
1040
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001041/*
1042 * Check if there is another lock that prevents us to set the lock (posix
1043 * style). If such a lock exists, update the flock structure with its
1044 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1045 * or leave it the same if we can't. Returns 0 if we don't need to request to
1046 * the server or 1 otherwise.
1047 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001048static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001049cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1050{
1051 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001052 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001053 unsigned char saved_type = flock->fl_type;
1054
Pavel Shilovsky50792762011-10-29 17:17:57 +04001055 if ((flock->fl_flags & FL_POSIX) == 0)
1056 return 1;
1057
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001058 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001059 posix_test_lock(file, flock);
1060
1061 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1062 flock->fl_type = saved_type;
1063 rc = 1;
1064 }
1065
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001066 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001067 return rc;
1068}
1069
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001070/*
1071 * Set the byte-range lock (posix style). Returns:
1072 * 1) 0, if we set the lock and don't need to request to the server;
1073 * 2) 1, if we need to request to the server;
1074 * 3) <0, if the error occurs while setting the lock.
1075 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001076static int
1077cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1078{
Al Viro496ad9a2013-01-23 17:07:38 -05001079 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001080 int rc = 1;
1081
1082 if ((flock->fl_flags & FL_POSIX) == 0)
1083 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001084
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001085try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001086 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001087 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001088 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001089 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001090 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001091
1092 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001093 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001094 if (rc == FILE_LOCK_DEFERRED) {
1095 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1096 if (!rc)
1097 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001098 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001099 }
Steve French9ebb3892012-04-01 13:52:54 -05001100 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101}
1102
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001103int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001104cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001105{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001106 unsigned int xid;
1107 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001108 struct cifsLockInfo *li, *tmp;
1109 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001110 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001111 LOCKING_ANDX_RANGE *buf, *cur;
1112 int types[] = {LOCKING_ANDX_LARGE_FILES,
1113 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1114 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001115
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001116 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001117 tcon = tlink_tcon(cfile->tlink);
1118
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001119 /*
1120 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1121 * and check it for zero before using.
1122 */
1123 max_buf = tcon->ses->server->maxBuf;
1124 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001125 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001126 return -EINVAL;
1127 }
1128
1129 max_num = (max_buf - sizeof(struct smb_hdr)) /
1130 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001131 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001132 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001133 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001134 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001135 }
1136
1137 for (i = 0; i < 2; i++) {
1138 cur = buf;
1139 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001140 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001141 if (li->type != types[i])
1142 continue;
1143 cur->Pid = cpu_to_le16(li->pid);
1144 cur->LengthLow = cpu_to_le32((u32)li->length);
1145 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1146 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1147 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1148 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001149 stored_rc = cifs_lockv(xid, tcon,
1150 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001151 (__u8)li->type, 0, num,
1152 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001153 if (stored_rc)
1154 rc = stored_rc;
1155 cur = buf;
1156 num = 0;
1157 } else
1158 cur++;
1159 }
1160
1161 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001162 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001163 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001164 if (stored_rc)
1165 rc = stored_rc;
1166 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001167 }
1168
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001169 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001170 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001171 return rc;
1172}
1173
Jeff Layton3d224622016-05-24 06:27:44 -04001174static __u32
1175hash_lockowner(fl_owner_t owner)
1176{
1177 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1178}
1179
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001180struct lock_to_push {
1181 struct list_head llist;
1182 __u64 offset;
1183 __u64 length;
1184 __u32 pid;
1185 __u16 netfid;
1186 __u8 type;
1187};
1188
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001189static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001190cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001191{
David Howells2b0143b2015-03-17 22:25:59 +00001192 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001193 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001194 struct file_lock *flock;
1195 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001196 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001197 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001198 struct list_head locks_to_send, *el;
1199 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001200 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001201
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001202 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001203
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001204 if (!flctx)
1205 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001206
Jeff Laytone084c1b2015-02-16 14:32:03 -05001207 spin_lock(&flctx->flc_lock);
1208 list_for_each(el, &flctx->flc_posix) {
1209 count++;
1210 }
1211 spin_unlock(&flctx->flc_lock);
1212
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001213 INIT_LIST_HEAD(&locks_to_send);
1214
1215 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001216 * Allocating count locks is enough because no FL_POSIX locks can be
1217 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001218 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001219 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001220 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001221 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1222 if (!lck) {
1223 rc = -ENOMEM;
1224 goto err_out;
1225 }
1226 list_add_tail(&lck->llist, &locks_to_send);
1227 }
1228
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001229 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001230 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001231 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001232 if (el == &locks_to_send) {
1233 /*
1234 * The list ended. We don't have enough allocated
1235 * structures - something is really wrong.
1236 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001237 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001238 break;
1239 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001240 length = 1 + flock->fl_end - flock->fl_start;
1241 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1242 type = CIFS_RDLCK;
1243 else
1244 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001245 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001246 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001247 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001248 lck->length = length;
1249 lck->type = type;
1250 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001251 }
Jeff Layton6109c852015-01-16 15:05:57 -05001252 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001253
1254 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001255 int stored_rc;
1256
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001257 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001258 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001259 lck->type, 0);
1260 if (stored_rc)
1261 rc = stored_rc;
1262 list_del(&lck->llist);
1263 kfree(lck);
1264 }
1265
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001266out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001267 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001268 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001269err_out:
1270 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1271 list_del(&lck->llist);
1272 kfree(lck);
1273 }
1274 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001275}
1276
1277static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001278cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001279{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001280 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001281 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001282 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001283 int rc = 0;
1284
1285 /* we are going to update can_cache_brlcks here - need a write access */
1286 down_write(&cinode->lock_sem);
1287 if (!cinode->can_cache_brlcks) {
1288 up_write(&cinode->lock_sem);
1289 return rc;
1290 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001291
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001292 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001293 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1294 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001295 rc = cifs_push_posix_locks(cfile);
1296 else
1297 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001298
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001299 cinode->can_cache_brlcks = false;
1300 up_write(&cinode->lock_sem);
1301 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001302}
1303
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001304static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001305cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001306 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001308 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001309 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001310 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001311 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001312 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001313 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001314 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001316 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001317 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001318 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001319 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001320 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001321 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1322 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001323 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001325 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001326 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001327 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001328 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001329 *lock = 1;
1330 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001331 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001332 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 *unlock = 1;
1334 /* Check if unlock includes more than one lock range */
1335 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001336 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001337 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001338 *lock = 1;
1339 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001340 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001341 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001342 *lock = 1;
1343 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001344 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001345 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001346 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001348 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001349}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001351static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001352cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001353 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001354{
1355 int rc = 0;
1356 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001357 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1358 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001359 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001360 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362 if (posix_lck) {
1363 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001364
1365 rc = cifs_posix_lock_test(file, flock);
1366 if (!rc)
1367 return rc;
1368
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001369 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001370 posix_lock_type = CIFS_RDLCK;
1371 else
1372 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001373 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1374 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001375 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001376 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 return rc;
1378 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001379
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001380 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001381 if (!rc)
1382 return rc;
1383
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001384 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001385 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1386 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001388 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1389 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001390 flock->fl_type = F_UNLCK;
1391 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001392 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1393 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001394 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001395 }
1396
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001397 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001398 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001399 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001400 }
1401
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001402 type &= ~server->vals->exclusive_lock_type;
1403
1404 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1405 type | server->vals->shared_lock_type,
1406 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001407 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001408 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1409 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001410 flock->fl_type = F_RDLCK;
1411 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001412 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1413 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001414 } else
1415 flock->fl_type = F_WRLCK;
1416
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001417 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001418}
1419
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001420void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001421cifs_move_llist(struct list_head *source, struct list_head *dest)
1422{
1423 struct list_head *li, *tmp;
1424 list_for_each_safe(li, tmp, source)
1425 list_move(li, dest);
1426}
1427
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001428void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001429cifs_free_llist(struct list_head *llist)
1430{
1431 struct cifsLockInfo *li, *tmp;
1432 list_for_each_entry_safe(li, tmp, llist, llist) {
1433 cifs_del_lock_waiters(li);
1434 list_del(&li->llist);
1435 kfree(li);
1436 }
1437}
1438
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001439int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001440cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1441 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001442{
1443 int rc = 0, stored_rc;
1444 int types[] = {LOCKING_ANDX_LARGE_FILES,
1445 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1446 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001447 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001448 LOCKING_ANDX_RANGE *buf, *cur;
1449 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001450 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001451 struct cifsLockInfo *li, *tmp;
1452 __u64 length = 1 + flock->fl_end - flock->fl_start;
1453 struct list_head tmp_llist;
1454
1455 INIT_LIST_HEAD(&tmp_llist);
1456
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001457 /*
1458 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1459 * and check it for zero before using.
1460 */
1461 max_buf = tcon->ses->server->maxBuf;
1462 if (!max_buf)
1463 return -EINVAL;
1464
1465 max_num = (max_buf - sizeof(struct smb_hdr)) /
1466 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001467 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001468 if (!buf)
1469 return -ENOMEM;
1470
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001471 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001472 for (i = 0; i < 2; i++) {
1473 cur = buf;
1474 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001475 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001476 if (flock->fl_start > li->offset ||
1477 (flock->fl_start + length) <
1478 (li->offset + li->length))
1479 continue;
1480 if (current->tgid != li->pid)
1481 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001482 if (types[i] != li->type)
1483 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001484 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001485 /*
1486 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001487 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001488 */
1489 list_del(&li->llist);
1490 cifs_del_lock_waiters(li);
1491 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001492 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001493 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001494 cur->Pid = cpu_to_le16(li->pid);
1495 cur->LengthLow = cpu_to_le32((u32)li->length);
1496 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1497 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1498 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1499 /*
1500 * We need to save a lock here to let us add it again to
1501 * the file's list if the unlock range request fails on
1502 * the server.
1503 */
1504 list_move(&li->llist, &tmp_llist);
1505 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001506 stored_rc = cifs_lockv(xid, tcon,
1507 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001508 li->type, num, 0, buf);
1509 if (stored_rc) {
1510 /*
1511 * We failed on the unlock range
1512 * request - add all locks from the tmp
1513 * list to the head of the file's list.
1514 */
1515 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001516 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001517 rc = stored_rc;
1518 } else
1519 /*
1520 * The unlock range request succeed -
1521 * free the tmp list.
1522 */
1523 cifs_free_llist(&tmp_llist);
1524 cur = buf;
1525 num = 0;
1526 } else
1527 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001528 }
1529 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001530 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001531 types[i], num, 0, buf);
1532 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001533 cifs_move_llist(&tmp_llist,
1534 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001535 rc = stored_rc;
1536 } else
1537 cifs_free_llist(&tmp_llist);
1538 }
1539 }
1540
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001541 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001542 kfree(buf);
1543 return rc;
1544}
1545
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001546static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001547cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001548 bool wait_flag, bool posix_lck, int lock, int unlock,
1549 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001550{
1551 int rc = 0;
1552 __u64 length = 1 + flock->fl_end - flock->fl_start;
1553 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1554 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001555 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001556 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001557
1558 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001559 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001560
1561 rc = cifs_posix_lock_set(file, flock);
1562 if (!rc || rc < 0)
1563 return rc;
1564
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001565 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001566 posix_lock_type = CIFS_RDLCK;
1567 else
1568 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001569
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001570 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001571 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001572
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001573 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001574 hash_lockowner(flock->fl_owner),
1575 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001576 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001577 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001578 }
1579
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001580 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001581 struct cifsLockInfo *lock;
1582
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001583 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001584 if (!lock)
1585 return -ENOMEM;
1586
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001587 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001588 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001589 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001590 return rc;
1591 }
1592 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001593 goto out;
1594
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001595 /*
1596 * Windows 7 server can delay breaking lease from read to None
1597 * if we set a byte-range lock on a file - break it explicitly
1598 * before sending the lock to the server to be sure the next
1599 * read won't conflict with non-overlapted locks due to
1600 * pagereading.
1601 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001602 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1603 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001604 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001605 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1606 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001607 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001608 }
1609
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001610 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1611 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001612 if (rc) {
1613 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001614 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001615 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001616
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001617 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001618 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001619 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001620
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001621out:
Chengyu Song00b8c952015-03-24 20:18:49 -04001622 if (flock->fl_flags & FL_POSIX && !rc)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001623 rc = locks_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001624 return rc;
1625}
1626
1627int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1628{
1629 int rc, xid;
1630 int lock = 0, unlock = 0;
1631 bool wait_flag = false;
1632 bool posix_lck = false;
1633 struct cifs_sb_info *cifs_sb;
1634 struct cifs_tcon *tcon;
1635 struct cifsInodeInfo *cinode;
1636 struct cifsFileInfo *cfile;
1637 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001638 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001639
1640 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001641 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001642
Joe Perchesf96637b2013-05-04 22:12:25 -05001643 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1644 cmd, flock->fl_flags, flock->fl_type,
1645 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001646
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001647 cfile = (struct cifsFileInfo *)file->private_data;
1648 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001649
1650 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1651 tcon->ses->server);
1652
Al Viro7119e222014-10-22 00:25:12 -04001653 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001654 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001655 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001656
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001657 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001658 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1659 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1660 posix_lck = true;
1661 /*
1662 * BB add code here to normalize offset and length to account for
1663 * negative length which we can not accept over the wire.
1664 */
1665 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001666 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001667 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001668 return rc;
1669 }
1670
1671 if (!lock && !unlock) {
1672 /*
1673 * if no lock or unlock then nothing to do since we do not
1674 * know what it is
1675 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001676 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001677 return -EOPNOTSUPP;
1678 }
1679
1680 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1681 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001682 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 return rc;
1684}
1685
Jeff Layton597b0272012-03-23 14:40:56 -04001686/*
1687 * update the file size (if needed) after a write. Should be called with
1688 * the inode->i_lock held
1689 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001690void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001691cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1692 unsigned int bytes_written)
1693{
1694 loff_t end_of_write = offset + bytes_written;
1695
1696 if (end_of_write > cifsi->server_eof)
1697 cifsi->server_eof = end_of_write;
1698}
1699
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001700static ssize_t
1701cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1702 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
1704 int rc = 0;
1705 unsigned int bytes_written = 0;
1706 unsigned int total_written;
1707 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001708 struct cifs_tcon *tcon;
1709 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001710 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001711 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001712 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001713 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
Jeff Layton7da4b492010-10-15 15:34:00 -04001715 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
Al Viro35c265e2014-08-19 20:25:34 -04001717 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1718 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001720 tcon = tlink_tcon(open_file->tlink);
1721 server = tcon->ses->server;
1722
1723 if (!server->ops->sync_write)
1724 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001725
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001726 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 for (total_written = 0; write_size > total_written;
1729 total_written += bytes_written) {
1730 rc = -EAGAIN;
1731 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001732 struct kvec iov[2];
1733 unsigned int len;
1734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 /* we could deadlock if we called
1737 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001738 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001740 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 if (rc != 0)
1742 break;
1743 }
Steve French3e844692005-10-03 13:37:24 -07001744
David Howells2b0143b2015-03-17 22:25:59 +00001745 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001746 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001747 /* iov[0] is reserved for smb header */
1748 iov[1].iov_base = (char *)write_data + total_written;
1749 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001750 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001751 io_parms.tcon = tcon;
1752 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001753 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001754 rc = server->ops->sync_write(xid, &open_file->fid,
1755 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 }
1757 if (rc || (bytes_written == 0)) {
1758 if (total_written)
1759 break;
1760 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001761 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 return rc;
1763 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001764 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001765 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001766 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001767 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001768 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 }
1771
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001772 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
Jeff Layton7da4b492010-10-15 15:34:00 -04001774 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001775 spin_lock(&d_inode(dentry)->i_lock);
1776 if (*offset > d_inode(dentry)->i_size)
1777 i_size_write(d_inode(dentry), *offset);
1778 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 }
David Howells2b0143b2015-03-17 22:25:59 +00001780 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001781 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 return total_written;
1783}
1784
Jeff Layton6508d902010-09-29 19:51:11 -04001785struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1786 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001787{
1788 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001789 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001790 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001791
1792 /* only filter by fsuid on multiuser mounts */
1793 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1794 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001795
Steve French3afca262016-09-22 18:58:16 -05001796 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001797 /* we could simply get the first_list_entry since write-only entries
1798 are always at the end of the list but since the first entry might
1799 have a close pending, we go through the whole list */
1800 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001801 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001802 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001803 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001804 if (!open_file->invalidHandle) {
1805 /* found a good file */
1806 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001807 cifsFileInfo_get(open_file);
1808 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001809 return open_file;
1810 } /* else might as well continue, and look for
1811 another, or simply have the caller reopen it
1812 again rather than trying to fix this handle */
1813 } else /* write only file */
1814 break; /* write only files are last so must be done */
1815 }
Steve French3afca262016-09-22 18:58:16 -05001816 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001817 return NULL;
1818}
Steve French630f3f0c2007-10-25 21:17:17 +00001819
Jeff Layton6508d902010-09-29 19:51:11 -04001820struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1821 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001822{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001823 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001824 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001825 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001826 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001827 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001828 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001829
Steve French60808232006-04-22 15:53:05 +00001830 /* Having a null inode here (because mapping->host was set to zero by
1831 the VFS or MM) should not happen but we had reports of on oops (due to
1832 it being zero) during stress testcases so we need to check for it */
1833
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001834 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001835 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001836 dump_stack();
1837 return NULL;
1838 }
1839
Jeff Laytond3892292010-11-02 16:22:50 -04001840 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001841 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001842
Jeff Layton6508d902010-09-29 19:51:11 -04001843 /* only filter by fsuid on multiuser mounts */
1844 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1845 fsuid_only = false;
1846
Steve French3afca262016-09-22 18:58:16 -05001847 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001848refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001849 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001850 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001851 return NULL;
1852 }
Steve French6148a742005-10-05 12:23:19 -07001853 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001854 if (!any_available && open_file->pid != current->tgid)
1855 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001856 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001857 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001858 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001859 if (!open_file->invalidHandle) {
1860 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001861 cifsFileInfo_get(open_file);
1862 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001863 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001864 } else {
1865 if (!inv_file)
1866 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001867 }
Steve French6148a742005-10-05 12:23:19 -07001868 }
1869 }
Jeff Layton2846d382008-09-22 21:33:33 -04001870 /* couldn't find useable FH with same pid, try any available */
1871 if (!any_available) {
1872 any_available = true;
1873 goto refind_writable;
1874 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001875
1876 if (inv_file) {
1877 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001878 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001879 }
1880
Steve French3afca262016-09-22 18:58:16 -05001881 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001882
1883 if (inv_file) {
1884 rc = cifs_reopen_file(inv_file, false);
1885 if (!rc)
1886 return inv_file;
1887 else {
Steve French3afca262016-09-22 18:58:16 -05001888 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001889 list_move_tail(&inv_file->flist,
1890 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001891 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001892 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001893 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001894 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001895 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001896 goto refind_writable;
1897 }
1898 }
1899
Steve French6148a742005-10-05 12:23:19 -07001900 return NULL;
1901}
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1904{
1905 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001906 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 char *write_data;
1908 int rc = -EFAULT;
1909 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001911 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913 if (!mapping || !mapping->host)
1914 return -EFAULT;
1915
1916 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
1918 offset += (loff_t)from;
1919 write_data = kmap(page);
1920 write_data += from;
1921
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001922 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 kunmap(page);
1924 return -EIO;
1925 }
1926
1927 /* racing with truncate? */
1928 if (offset > mapping->host->i_size) {
1929 kunmap(page);
1930 return 0; /* don't care */
1931 }
1932
1933 /* check to make sure that we are not extending the file */
1934 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001935 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Jeff Layton6508d902010-09-29 19:51:11 -04001937 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001938 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001939 bytes_written = cifs_write(open_file, open_file->pid,
1940 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001941 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001943 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001944 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001945 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001946 else if (bytes_written < 0)
1947 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001948 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001949 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 rc = -EIO;
1951 }
1952
1953 kunmap(page);
1954 return rc;
1955}
1956
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001957static struct cifs_writedata *
1958wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1959 pgoff_t end, pgoff_t *index,
1960 unsigned int *found_pages)
1961{
1962 unsigned int nr_pages;
1963 struct page **pages;
1964 struct cifs_writedata *wdata;
1965
1966 wdata = cifs_writedata_alloc((unsigned int)tofind,
1967 cifs_writev_complete);
1968 if (!wdata)
1969 return NULL;
1970
1971 /*
1972 * find_get_pages_tag seems to return a max of 256 on each
1973 * iteration, so we must call it several times in order to
1974 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03001975 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001976 */
1977 *found_pages = 0;
1978 pages = wdata->pages;
1979 do {
1980 nr_pages = find_get_pages_tag(mapping, index,
1981 PAGECACHE_TAG_DIRTY, tofind,
1982 pages);
1983 *found_pages += nr_pages;
1984 tofind -= nr_pages;
1985 pages += nr_pages;
1986 } while (nr_pages && tofind && *index <= end);
1987
1988 return wdata;
1989}
1990
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04001991static unsigned int
1992wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1993 struct address_space *mapping,
1994 struct writeback_control *wbc,
1995 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1996{
1997 unsigned int nr_pages = 0, i;
1998 struct page *page;
1999
2000 for (i = 0; i < found_pages; i++) {
2001 page = wdata->pages[i];
2002 /*
2003 * At this point we hold neither mapping->tree_lock nor
2004 * lock on the page itself: the page may be truncated or
2005 * invalidated (changing page->mapping to NULL), or even
2006 * swizzled back from swapper_space to tmpfs file
2007 * mapping
2008 */
2009
2010 if (nr_pages == 0)
2011 lock_page(page);
2012 else if (!trylock_page(page))
2013 break;
2014
2015 if (unlikely(page->mapping != mapping)) {
2016 unlock_page(page);
2017 break;
2018 }
2019
2020 if (!wbc->range_cyclic && page->index > end) {
2021 *done = true;
2022 unlock_page(page);
2023 break;
2024 }
2025
2026 if (*next && (page->index != *next)) {
2027 /* Not next consecutive page */
2028 unlock_page(page);
2029 break;
2030 }
2031
2032 if (wbc->sync_mode != WB_SYNC_NONE)
2033 wait_on_page_writeback(page);
2034
2035 if (PageWriteback(page) ||
2036 !clear_page_dirty_for_io(page)) {
2037 unlock_page(page);
2038 break;
2039 }
2040
2041 /*
2042 * This actually clears the dirty bit in the radix tree.
2043 * See cifs_writepage() for more commentary.
2044 */
2045 set_page_writeback(page);
2046 if (page_offset(page) >= i_size_read(mapping->host)) {
2047 *done = true;
2048 unlock_page(page);
2049 end_page_writeback(page);
2050 break;
2051 }
2052
2053 wdata->pages[i] = page;
2054 *next = page->index + 1;
2055 ++nr_pages;
2056 }
2057
2058 /* reset index to refind any pages skipped */
2059 if (nr_pages == 0)
2060 *index = wdata->pages[0]->index + 1;
2061
2062 /* put any pages we aren't going to use */
2063 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002064 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002065 wdata->pages[i] = NULL;
2066 }
2067
2068 return nr_pages;
2069}
2070
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002071static int
2072wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2073 struct address_space *mapping, struct writeback_control *wbc)
2074{
2075 int rc = 0;
2076 struct TCP_Server_Info *server;
2077 unsigned int i;
2078
2079 wdata->sync_mode = wbc->sync_mode;
2080 wdata->nr_pages = nr_pages;
2081 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002082 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002083 wdata->tailsz = min(i_size_read(mapping->host) -
2084 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002085 (loff_t)PAGE_SIZE);
2086 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002087
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002088 if (wdata->cfile != NULL)
2089 cifsFileInfo_put(wdata->cfile);
2090 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2091 if (!wdata->cfile) {
2092 cifs_dbg(VFS, "No writable handles for inode\n");
2093 rc = -EBADF;
2094 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002095 wdata->pid = wdata->cfile->pid;
2096 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2097 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002098 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002099
2100 for (i = 0; i < nr_pages; ++i)
2101 unlock_page(wdata->pages[i]);
2102
2103 return rc;
2104}
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002107 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002109 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002110 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002111 bool done = false, scanned = false, range_whole = false;
2112 pgoff_t end, index;
2113 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002114 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002115
Steve French37c0eb42005-10-05 14:50:29 -07002116 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002117 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002118 * one page at a time via cifs_writepage
2119 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002120 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002121 return generic_writepages(mapping, wbc);
2122
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002123 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002124 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002125 end = -1;
2126 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002127 index = wbc->range_start >> PAGE_SHIFT;
2128 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002129 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002130 range_whole = true;
2131 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002132 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002133 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002134retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002135 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002136 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002137 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002138
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002139 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2140 &wsize, &credits);
2141 if (rc)
2142 break;
Steve French37c0eb42005-10-05 14:50:29 -07002143
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002144 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002145
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002146 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2147 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002148 if (!wdata) {
2149 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002150 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002151 break;
2152 }
2153
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002154 if (found_pages == 0) {
2155 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002156 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002157 break;
2158 }
2159
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002160 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2161 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002162
2163 /* nothing to write? */
2164 if (nr_pages == 0) {
2165 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002166 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002167 continue;
2168 }
2169
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002170 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002171
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002172 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002173
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002174 /* send failure -- clean up the mess */
2175 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002176 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002177 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002178 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002179 redirty_page_for_writepage(wbc,
2180 wdata->pages[i]);
2181 else
2182 SetPageError(wdata->pages[i]);
2183 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002184 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002185 }
Jeff Layton941b8532011-01-11 07:24:01 -05002186 if (rc != -EAGAIN)
2187 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002188 }
2189 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002190
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002191 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2192 index = saved_index;
2193 continue;
2194 }
2195
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002196 wbc->nr_to_write -= nr_pages;
2197 if (wbc->nr_to_write <= 0)
2198 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002199
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002200 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002201 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002202
Steve French37c0eb42005-10-05 14:50:29 -07002203 if (!scanned && !done) {
2204 /*
2205 * We hit the last page and there is more work to be done: wrap
2206 * back to the start of the file
2207 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002208 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002209 index = 0;
2210 goto retry;
2211 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002212
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002213 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002214 mapping->writeback_index = index;
2215
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 return rc;
2217}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002219static int
2220cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002222 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002223 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002225 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002227 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002228 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002229 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002230
2231 /*
2232 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2233 *
2234 * A writepage() implementation always needs to do either this,
2235 * or re-dirty the page with "redirty_page_for_writepage()" in
2236 * the case of a failure.
2237 *
2238 * Just unlocking the page will cause the radix tree tag-bits
2239 * to fail to update with the state of the page correctly.
2240 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002241 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002242retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002243 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002244 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2245 goto retry_write;
2246 else if (rc == -EAGAIN)
2247 redirty_page_for_writepage(wbc, page);
2248 else if (rc != 0)
2249 SetPageError(page);
2250 else
2251 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002252 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002253 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002254 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 return rc;
2256}
2257
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002258static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2259{
2260 int rc = cifs_writepage_locked(page, wbc);
2261 unlock_page(page);
2262 return rc;
2263}
2264
Nick Piggind9414772008-09-24 11:32:59 -04002265static int cifs_write_end(struct file *file, struct address_space *mapping,
2266 loff_t pos, unsigned len, unsigned copied,
2267 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268{
Nick Piggind9414772008-09-24 11:32:59 -04002269 int rc;
2270 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002271 struct cifsFileInfo *cfile = file->private_data;
2272 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2273 __u32 pid;
2274
2275 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2276 pid = cfile->pid;
2277 else
2278 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Joe Perchesf96637b2013-05-04 22:12:25 -05002280 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002281 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002282
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002283 if (PageChecked(page)) {
2284 if (copied == len)
2285 SetPageUptodate(page);
2286 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002287 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002288 SetPageUptodate(page);
2289
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002291 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002292 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002293 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002294
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002295 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 /* this is probably better than directly calling
2297 partialpage_write since in this function the file handle is
2298 known which we might as well leverage */
2299 /* BB check if anything else missing out of ppw
2300 such as updating last write time */
2301 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002302 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002303 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002305
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002306 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002307 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002308 rc = copied;
2309 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002310 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 }
2312
Nick Piggind9414772008-09-24 11:32:59 -04002313 if (rc > 0) {
2314 spin_lock(&inode->i_lock);
2315 if (pos > inode->i_size)
2316 i_size_write(inode, pos);
2317 spin_unlock(&inode->i_lock);
2318 }
2319
2320 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002321 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 return rc;
2324}
2325
Josef Bacik02c24a82011-07-16 20:44:56 -04002326int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2327 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002329 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002331 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002332 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002333 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002334 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002335 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
Josef Bacik02c24a82011-07-16 20:44:56 -04002337 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2338 if (rc)
2339 return rc;
Al Viro59551022016-01-22 15:40:57 -05002340 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002341
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002342 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Al Viro35c265e2014-08-19 20:25:34 -04002344 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2345 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002346
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002347 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002348 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002349 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002350 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002351 rc = 0; /* don't care about it in fsync */
2352 }
2353 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002354
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002355 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002356 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2357 server = tcon->ses->server;
2358 if (server->ops->flush)
2359 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2360 else
2361 rc = -ENOSYS;
2362 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002363
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002364 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002365 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002366 return rc;
2367}
2368
Josef Bacik02c24a82011-07-16 20:44:56 -04002369int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002370{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002371 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002372 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002373 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002374 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002375 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002376 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002377 struct inode *inode = file->f_mapping->host;
2378
2379 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2380 if (rc)
2381 return rc;
Al Viro59551022016-01-22 15:40:57 -05002382 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002383
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002384 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002385
Al Viro35c265e2014-08-19 20:25:34 -04002386 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2387 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002388
2389 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002390 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2391 server = tcon->ses->server;
2392 if (server->ops->flush)
2393 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2394 else
2395 rc = -ENOSYS;
2396 }
Steve Frenchb298f222009-02-21 21:17:43 +00002397
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002398 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002399 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 return rc;
2401}
2402
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403/*
2404 * As file closes, flush all cached write data for this inode checking
2405 * for write behind errors.
2406 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002407int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408{
Al Viro496ad9a2013-01-23 17:07:38 -05002409 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 int rc = 0;
2411
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002412 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002413 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002414
Joe Perchesf96637b2013-05-04 22:12:25 -05002415 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416
2417 return rc;
2418}
2419
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002420static int
2421cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2422{
2423 int rc = 0;
2424 unsigned long i;
2425
2426 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002427 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002428 if (!pages[i]) {
2429 /*
2430 * save number of pages we have already allocated and
2431 * return with ENOMEM error
2432 */
2433 num_pages = i;
2434 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002435 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002436 }
2437 }
2438
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002439 if (rc) {
2440 for (i = 0; i < num_pages; i++)
2441 put_page(pages[i]);
2442 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002443 return rc;
2444}
2445
2446static inline
2447size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2448{
2449 size_t num_pages;
2450 size_t clen;
2451
2452 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002453 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002454
2455 if (cur_len)
2456 *cur_len = clen;
2457
2458 return num_pages;
2459}
2460
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002461static void
Steve French4a5c80d2014-02-07 20:45:12 -06002462cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002463{
2464 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002465 struct cifs_writedata *wdata = container_of(refcount,
2466 struct cifs_writedata, refcount);
2467
2468 for (i = 0; i < wdata->nr_pages; i++)
2469 put_page(wdata->pages[i]);
2470 cifs_writedata_release(refcount);
2471}
2472
2473static void
2474cifs_uncached_writev_complete(struct work_struct *work)
2475{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002476 struct cifs_writedata *wdata = container_of(work,
2477 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002478 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002479 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2480
2481 spin_lock(&inode->i_lock);
2482 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2483 if (cifsi->server_eof > inode->i_size)
2484 i_size_write(inode, cifsi->server_eof);
2485 spin_unlock(&inode->i_lock);
2486
2487 complete(&wdata->done);
2488
Steve French4a5c80d2014-02-07 20:45:12 -06002489 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002490}
2491
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002492static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002493wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2494 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002495{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002496 size_t save_len, copied, bytes, cur_len = *len;
2497 unsigned long i, nr_pages = *num_pages;
2498
2499 save_len = cur_len;
2500 for (i = 0; i < nr_pages; i++) {
2501 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2502 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2503 cur_len -= copied;
2504 /*
2505 * If we didn't copy as much as we expected, then that
2506 * may mean we trod into an unmapped area. Stop copying
2507 * at that point. On the next pass through the big
2508 * loop, we'll likely end up getting a zero-length
2509 * write and bailing out of it.
2510 */
2511 if (copied < bytes)
2512 break;
2513 }
2514 cur_len = save_len - cur_len;
2515 *len = cur_len;
2516
2517 /*
2518 * If we have no data to send, then that probably means that
2519 * the copy above failed altogether. That's most likely because
2520 * the address in the iovec was bogus. Return -EFAULT and let
2521 * the caller free anything we allocated and bail out.
2522 */
2523 if (!cur_len)
2524 return -EFAULT;
2525
2526 /*
2527 * i + 1 now represents the number of pages we actually used in
2528 * the copy phase above.
2529 */
2530 *num_pages = i + 1;
2531 return 0;
2532}
2533
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002534static int
2535cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2536 struct cifsFileInfo *open_file,
2537 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002538{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002539 int rc = 0;
2540 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002541 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002542 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002543 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002544 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002545 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002546 struct TCP_Server_Info *server;
2547
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002548 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2549 pid = open_file->pid;
2550 else
2551 pid = current->tgid;
2552
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002553 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002554
2555 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002556 unsigned int wsize, credits;
2557
2558 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2559 &wsize, &credits);
2560 if (rc)
2561 break;
2562
2563 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002564 wdata = cifs_writedata_alloc(nr_pages,
2565 cifs_uncached_writev_complete);
2566 if (!wdata) {
2567 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002568 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002569 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002570 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002571
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002572 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2573 if (rc) {
2574 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002575 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002576 break;
2577 }
2578
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002579 num_pages = nr_pages;
2580 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2581 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002582 for (i = 0; i < nr_pages; i++)
2583 put_page(wdata->pages[i]);
2584 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002585 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002586 break;
2587 }
2588
2589 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002590 * Bring nr_pages down to the number of pages we actually used,
2591 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002592 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002593 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002594 put_page(wdata->pages[nr_pages - 1]);
2595
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002596 wdata->sync_mode = WB_SYNC_ALL;
2597 wdata->nr_pages = nr_pages;
2598 wdata->offset = (__u64)offset;
2599 wdata->cfile = cifsFileInfo_get(open_file);
2600 wdata->pid = pid;
2601 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002602 wdata->pagesz = PAGE_SIZE;
2603 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002604 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002605
2606 if (!wdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01002607 !(rc = cifs_reopen_file(wdata->cfile, false)))
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002608 rc = server->ops->async_writev(wdata,
2609 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002610 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002611 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002612 kref_put(&wdata->refcount,
2613 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002614 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002615 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002616 iov_iter_advance(from, offset - saved_offset);
2617 continue;
2618 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002619 break;
2620 }
2621
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002622 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002623 offset += cur_len;
2624 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002625 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002626
2627 return rc;
2628}
2629
Al Viroe9d15932015-04-06 22:44:11 -04002630ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002631{
Al Viroe9d15932015-04-06 22:44:11 -04002632 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002633 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002634 struct cifsFileInfo *open_file;
2635 struct cifs_tcon *tcon;
2636 struct cifs_sb_info *cifs_sb;
2637 struct cifs_writedata *wdata, *tmp;
2638 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002639 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002640 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002641
Al Viroe9d15932015-04-06 22:44:11 -04002642 /*
2643 * BB - optimize the way when signing is disabled. We can drop this
2644 * extra memory-to-memory copying and use iovec buffers for constructing
2645 * write request.
2646 */
2647
Al Viro3309dd02015-04-09 12:55:47 -04002648 rc = generic_write_checks(iocb, from);
2649 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002650 return rc;
2651
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002652 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002653 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002654 open_file = file->private_data;
2655 tcon = tlink_tcon(open_file->tlink);
2656
2657 if (!tcon->ses->server->ops->async_writev)
2658 return -ENOSYS;
2659
Al Viro3309dd02015-04-09 12:55:47 -04002660 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2661 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002662
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002663 /*
2664 * If at least one write was successfully sent, then discard any rc
2665 * value from the later writes. If the other write succeeds, then
2666 * we'll end up returning whatever was written. If it fails, then
2667 * we'll get a new rc value from that.
2668 */
2669 if (!list_empty(&wdata_list))
2670 rc = 0;
2671
2672 /*
2673 * Wait for and collect replies for any successful sends in order of
2674 * increasing offset. Once an error is hit or we get a fatal signal
2675 * while waiting, then return without waiting for any more replies.
2676 */
2677restart_loop:
2678 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2679 if (!rc) {
2680 /* FIXME: freezable too? */
2681 rc = wait_for_completion_killable(&wdata->done);
2682 if (rc)
2683 rc = -EINTR;
2684 else if (wdata->result)
2685 rc = wdata->result;
2686 else
2687 total_written += wdata->bytes;
2688
2689 /* resend call if it's a retryable error */
2690 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002691 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002692 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002693
2694 INIT_LIST_HEAD(&tmp_list);
2695 list_del_init(&wdata->list);
2696
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002697 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002698 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002699
2700 rc = cifs_write_from_iter(wdata->offset,
2701 wdata->bytes, &tmp_from,
2702 open_file, cifs_sb, &tmp_list);
2703
2704 list_splice(&tmp_list, &wdata_list);
2705
2706 kref_put(&wdata->refcount,
2707 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002708 goto restart_loop;
2709 }
2710 }
2711 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002712 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002713 }
2714
Al Viroe9d15932015-04-06 22:44:11 -04002715 if (unlikely(!total_written))
2716 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002717
Al Viroe9d15932015-04-06 22:44:11 -04002718 iocb->ki_pos += total_written;
2719 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002720 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002721 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002722}
2723
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002724static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002725cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002726{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002727 struct file *file = iocb->ki_filp;
2728 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2729 struct inode *inode = file->f_mapping->host;
2730 struct cifsInodeInfo *cinode = CIFS_I(inode);
2731 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002732 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002733
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002734 /*
2735 * We need to hold the sem to be sure nobody modifies lock list
2736 * with a brlock that prevents writing.
2737 */
2738 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002739 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002740
Al Viro3309dd02015-04-09 12:55:47 -04002741 rc = generic_write_checks(iocb, from);
2742 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002743 goto out;
2744
Al Viro5f380c72015-04-07 11:28:12 -04002745 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002746 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002747 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002748 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002749 else
2750 rc = -EACCES;
2751out:
Al Viro59551022016-01-22 15:40:57 -05002752 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002753
Christoph Hellwige2592212016-04-07 08:52:01 -07002754 if (rc > 0)
2755 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002756 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002757 return rc;
2758}
2759
2760ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002761cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002762{
Al Viro496ad9a2013-01-23 17:07:38 -05002763 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002764 struct cifsInodeInfo *cinode = CIFS_I(inode);
2765 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2766 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2767 iocb->ki_filp->private_data;
2768 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002769 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002770
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002771 written = cifs_get_writer(cinode);
2772 if (written)
2773 return written;
2774
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002775 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002776 if (cap_unix(tcon->ses) &&
2777 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002778 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002779 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002780 goto out;
2781 }
Al Viro3dae8752014-04-03 12:05:17 -04002782 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002783 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002784 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002785 /*
2786 * For non-oplocked files in strict cache mode we need to write the data
2787 * to the server exactly from the pos to pos+len-1 rather than flush all
2788 * affected pages because it may cause a error with mandatory locks on
2789 * these pages but not on the region from pos to ppos+len-1.
2790 */
Al Viro3dae8752014-04-03 12:05:17 -04002791 written = cifs_user_writev(iocb, from);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002792 if (written > 0 && CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002793 /*
2794 * Windows 7 server can delay breaking level2 oplock if a write
2795 * request comes - break it on the client to prevent reading
2796 * an old data.
2797 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002798 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002799 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2800 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002801 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002802 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002803out:
2804 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002805 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002806}
2807
Jeff Layton0471ca32012-05-16 07:13:16 -04002808static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002809cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002810{
2811 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002812
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002813 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2814 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002815 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002816 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002817 INIT_LIST_HEAD(&rdata->list);
2818 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002819 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002820 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002821
Jeff Layton0471ca32012-05-16 07:13:16 -04002822 return rdata;
2823}
2824
Jeff Layton6993f742012-05-16 07:13:17 -04002825void
2826cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002827{
Jeff Layton6993f742012-05-16 07:13:17 -04002828 struct cifs_readdata *rdata = container_of(refcount,
2829 struct cifs_readdata, refcount);
2830
2831 if (rdata->cfile)
2832 cifsFileInfo_put(rdata->cfile);
2833
Jeff Layton0471ca32012-05-16 07:13:16 -04002834 kfree(rdata);
2835}
2836
Jeff Layton2a1bb132012-05-16 07:13:17 -04002837static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002838cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002839{
2840 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002841 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002842 unsigned int i;
2843
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002844 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002845 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2846 if (!page) {
2847 rc = -ENOMEM;
2848 break;
2849 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002850 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002851 }
2852
2853 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002854 for (i = 0; i < nr_pages; i++) {
2855 put_page(rdata->pages[i]);
2856 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002857 }
2858 }
2859 return rc;
2860}
2861
2862static void
2863cifs_uncached_readdata_release(struct kref *refcount)
2864{
Jeff Layton1c892542012-05-16 07:13:17 -04002865 struct cifs_readdata *rdata = container_of(refcount,
2866 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002867 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002868
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002869 for (i = 0; i < rdata->nr_pages; i++) {
2870 put_page(rdata->pages[i]);
2871 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002872 }
2873 cifs_readdata_release(refcount);
2874}
2875
Jeff Layton1c892542012-05-16 07:13:17 -04002876/**
2877 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2878 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002879 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002880 *
2881 * This function copies data from a list of pages in a readdata response into
2882 * an array of iovecs. It will first calculate where the data should go
2883 * based on the info in the readdata and then copy the data into that spot.
2884 */
Al Viro7f25bba2014-02-04 14:07:43 -05002885static int
2886cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002887{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002888 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002889 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002890
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002891 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002892 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002893 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Pavel Shilovskyc06d74d2017-01-19 13:53:15 -08002894 size_t written;
2895
2896 if (unlikely(iter->type & ITER_PIPE)) {
2897 void *addr = kmap_atomic(page);
2898
2899 written = copy_to_iter(addr, copy, iter);
2900 kunmap_atomic(addr);
2901 } else
2902 written = copy_page_to_iter(page, 0, copy, iter);
Al Viro7f25bba2014-02-04 14:07:43 -05002903 remaining -= written;
2904 if (written < copy && iov_iter_count(iter) > 0)
2905 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002906 }
Al Viro7f25bba2014-02-04 14:07:43 -05002907 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002908}
2909
2910static void
2911cifs_uncached_readv_complete(struct work_struct *work)
2912{
2913 struct cifs_readdata *rdata = container_of(work,
2914 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002915
2916 complete(&rdata->done);
2917 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2918}
2919
2920static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002921cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2922 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002923{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002924 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002925 unsigned int i;
2926 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002927
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002928 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002929 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002930 for (i = 0; i < nr_pages; i++) {
2931 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002932 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002933
Al Viro71335662016-01-09 19:54:50 -05002934 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002935 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002936 rdata->pages[i] = NULL;
2937 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002938 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002939 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002940 }
Al Viro71335662016-01-09 19:54:50 -05002941 n = len;
2942 if (len >= PAGE_SIZE) {
2943 /* enough data to fill the page */
2944 n = PAGE_SIZE;
2945 len -= n;
2946 } else {
2947 zero_user(page, len, PAGE_SIZE - len);
2948 rdata->tailsz = len;
2949 len = 0;
2950 }
2951 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07002952 if (result < 0)
2953 break;
2954
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002955 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002956 }
2957
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002958 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2959 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002960}
2961
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002962static int
2963cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2964 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002966 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002967 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002968 size_t cur_len;
2969 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04002970 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002971 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002972
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002973 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002974
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002975 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2976 pid = open_file->pid;
2977 else
2978 pid = current->tgid;
2979
Jeff Layton1c892542012-05-16 07:13:17 -04002980 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002981 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2982 &rsize, &credits);
2983 if (rc)
2984 break;
2985
2986 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04002987 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002988
Jeff Layton1c892542012-05-16 07:13:17 -04002989 /* allocate a readdata struct */
2990 rdata = cifs_readdata_alloc(npages,
2991 cifs_uncached_readv_complete);
2992 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002993 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04002994 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04002995 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002997
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002998 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002999 if (rc)
3000 goto error;
3001
3002 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003003 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04003004 rdata->offset = offset;
3005 rdata->bytes = cur_len;
3006 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003007 rdata->pagesz = PAGE_SIZE;
3008 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003009 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04003010
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003011 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003012 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003013 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04003014error:
3015 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003016 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04003017 kref_put(&rdata->refcount,
3018 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003019 if (rc == -EAGAIN)
3020 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003021 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 }
Jeff Layton1c892542012-05-16 07:13:17 -04003023
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003024 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003025 offset += cur_len;
3026 len -= cur_len;
3027 } while (len > 0);
3028
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003029 return rc;
3030}
3031
3032ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3033{
3034 struct file *file = iocb->ki_filp;
3035 ssize_t rc;
3036 size_t len;
3037 ssize_t total_read = 0;
3038 loff_t offset = iocb->ki_pos;
3039 struct cifs_sb_info *cifs_sb;
3040 struct cifs_tcon *tcon;
3041 struct cifsFileInfo *open_file;
3042 struct cifs_readdata *rdata, *tmp;
3043 struct list_head rdata_list;
3044
3045 len = iov_iter_count(to);
3046 if (!len)
3047 return 0;
3048
3049 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003050 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003051 open_file = file->private_data;
3052 tcon = tlink_tcon(open_file->tlink);
3053
3054 if (!tcon->ses->server->ops->async_readv)
3055 return -ENOSYS;
3056
3057 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3058 cifs_dbg(FYI, "attempting read on write only file instance\n");
3059
3060 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3061
Jeff Layton1c892542012-05-16 07:13:17 -04003062 /* if at least one read request send succeeded, then reset rc */
3063 if (!list_empty(&rdata_list))
3064 rc = 0;
3065
Al Viroe6a7bcb2014-04-02 19:53:36 -04003066 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003067 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003068again:
Jeff Layton1c892542012-05-16 07:13:17 -04003069 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3070 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003071 /* FIXME: freezable sleep too? */
3072 rc = wait_for_completion_killable(&rdata->done);
3073 if (rc)
3074 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003075 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003076 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003077 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003078 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003079
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003080 list_del_init(&rdata->list);
3081 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003082
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003083 /*
3084 * Got a part of data and then reconnect has
3085 * happened -- fill the buffer and continue
3086 * reading.
3087 */
3088 if (got_bytes && got_bytes < rdata->bytes) {
3089 rc = cifs_readdata_to_iov(rdata, to);
3090 if (rc) {
3091 kref_put(&rdata->refcount,
3092 cifs_uncached_readdata_release);
3093 continue;
3094 }
3095 }
3096
3097 rc = cifs_send_async_read(
3098 rdata->offset + got_bytes,
3099 rdata->bytes - got_bytes,
3100 rdata->cfile, cifs_sb,
3101 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003102
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003103 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003104
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003105 kref_put(&rdata->refcount,
3106 cifs_uncached_readdata_release);
3107 goto again;
3108 } else if (rdata->result)
3109 rc = rdata->result;
3110 else
Jeff Layton1c892542012-05-16 07:13:17 -04003111 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003112
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003113 /* if there was a short read -- discard anything left */
3114 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3115 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003116 }
3117 list_del_init(&rdata->list);
3118 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003120
Al Viroe6a7bcb2014-04-02 19:53:36 -04003121 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003122
Jeff Layton1c892542012-05-16 07:13:17 -04003123 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003124
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003125 /* mask nodata case */
3126 if (rc == -ENODATA)
3127 rc = 0;
3128
Al Viro0165e812014-02-04 14:19:48 -05003129 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003130 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003131 return total_read;
3132 }
3133 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003134}
3135
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003136ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003137cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003138{
Al Viro496ad9a2013-01-23 17:07:38 -05003139 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003140 struct cifsInodeInfo *cinode = CIFS_I(inode);
3141 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3142 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3143 iocb->ki_filp->private_data;
3144 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3145 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003146
3147 /*
3148 * In strict cache mode we need to read from the server all the time
3149 * if we don't have level II oplock because the server can delay mtime
3150 * change - so we can't make a decision about inode invalidating.
3151 * And we can also fail with pagereading if there are mandatory locks
3152 * on pages affected by this read but not on the region from pos to
3153 * pos+len-1.
3154 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003155 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003156 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003157
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003158 if (cap_unix(tcon->ses) &&
3159 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3160 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003161 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003162
3163 /*
3164 * We need to hold the sem to be sure nobody modifies lock list
3165 * with a brlock that prevents reading.
3166 */
3167 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003168 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003169 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003170 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003171 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003172 up_read(&cinode->lock_sem);
3173 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003174}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003176static ssize_t
3177cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178{
3179 int rc = -EACCES;
3180 unsigned int bytes_read = 0;
3181 unsigned int total_read;
3182 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003183 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003185 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003186 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003187 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003188 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003190 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003191 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003192 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003194 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003195 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003197 /* FIXME: set up handlers for larger reads and/or convert to async */
3198 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3199
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303201 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003202 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303203 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003205 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003206 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003207 server = tcon->ses->server;
3208
3209 if (!server->ops->sync_read) {
3210 free_xid(xid);
3211 return -ENOSYS;
3212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003214 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3215 pid = open_file->pid;
3216 else
3217 pid = current->tgid;
3218
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003220 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003222 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3223 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003224 do {
3225 current_read_size = min_t(uint, read_size - total_read,
3226 rsize);
3227 /*
3228 * For windows me and 9x we do not want to request more
3229 * than it negotiated since it will refuse the read
3230 * then.
3231 */
3232 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003233 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003234 current_read_size = min_t(uint,
3235 current_read_size, CIFSMaxBufSize);
3236 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003237 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003238 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003239 if (rc != 0)
3240 break;
3241 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003242 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003243 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003244 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003245 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003246 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003247 &bytes_read, &cur_offset,
3248 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003249 } while (rc == -EAGAIN);
3250
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 if (rc || (bytes_read == 0)) {
3252 if (total_read) {
3253 break;
3254 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003255 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 return rc;
3257 }
3258 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003259 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003260 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 }
3262 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003263 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003264 return total_read;
3265}
3266
Jeff Laytonca83ce32011-04-12 09:13:44 -04003267/*
3268 * If the page is mmap'ed into a process' page tables, then we need to make
3269 * sure that it doesn't change while being written back.
3270 */
3271static int
3272cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3273{
3274 struct page *page = vmf->page;
3275
3276 lock_page(page);
3277 return VM_FAULT_LOCKED;
3278}
3279
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003280static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003281 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003282 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003283 .page_mkwrite = cifs_page_mkwrite,
3284};
3285
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003286int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3287{
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003288 int xid, rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05003289 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003290
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003291 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003292
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003293 if (!CIFS_CACHE_READ(CIFS_I(inode)))
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003294 rc = cifs_zap_mapping(inode);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003295 if (!rc)
3296 rc = generic_file_mmap(file, vma);
3297 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003298 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003299
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003300 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003301 return rc;
3302}
3303
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3305{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 int rc, xid;
3307
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003308 xid = get_xid();
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003309
Jeff Laytonabab0952010-02-12 07:44:18 -05003310 rc = cifs_revalidate_file(file);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003311 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003312 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3313 rc);
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003314 if (!rc)
3315 rc = generic_file_mmap(file, vma);
3316 if (!rc)
Jeff Laytonca83ce32011-04-12 09:13:44 -04003317 vma->vm_ops = &cifs_file_vm_ops;
Matthew Wilcoxee6858f2017-12-15 12:48:32 -08003318
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003319 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 return rc;
3321}
3322
Jeff Layton0471ca32012-05-16 07:13:16 -04003323static void
3324cifs_readv_complete(struct work_struct *work)
3325{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003326 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003327 struct cifs_readdata *rdata = container_of(work,
3328 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003329
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003330 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003331 for (i = 0; i < rdata->nr_pages; i++) {
3332 struct page *page = rdata->pages[i];
3333
Jeff Layton0471ca32012-05-16 07:13:16 -04003334 lru_cache_add_file(page);
3335
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003336 if (rdata->result == 0 ||
3337 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003338 flush_dcache_page(page);
3339 SetPageUptodate(page);
3340 }
3341
3342 unlock_page(page);
3343
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003344 if (rdata->result == 0 ||
3345 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003346 cifs_readpage_to_fscache(rdata->mapping->host, page);
3347
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003348 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003349
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003350 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003351 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003352 }
Jeff Layton6993f742012-05-16 07:13:17 -04003353 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003354}
3355
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003356static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003357cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3358 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003359{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003360 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003361 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003362 u64 eof;
3363 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003364 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003365
3366 /* determine the eof that the server (probably) has */
3367 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003368 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003369 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003370
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003371 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003372 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003373 for (i = 0; i < nr_pages; i++) {
3374 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003375 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003376
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003377 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003378 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003379 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003380 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003381 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003382 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003383 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003384 } else if (page->index > eof_index) {
3385 /*
3386 * The VFS will not try to do readahead past the
3387 * i_size, but it's possible that we have outstanding
3388 * writes with gaps in the middle and the i_size hasn't
3389 * caught up yet. Populate those with zeroed out pages
3390 * to prevent the VFS from repeatedly attempting to
3391 * fill them until the writes are flushed.
3392 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003393 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003394 lru_cache_add_file(page);
3395 flush_dcache_page(page);
3396 SetPageUptodate(page);
3397 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003398 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003399 rdata->pages[i] = NULL;
3400 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003401 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003402 } else {
3403 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003404 lru_cache_add_file(page);
3405 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003406 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003407 rdata->pages[i] = NULL;
3408 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003409 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003410 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003411
Al Viro71335662016-01-09 19:54:50 -05003412 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003413 if (result < 0)
3414 break;
3415
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003416 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003417 }
3418
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003419 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3420 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003421}
3422
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003423static int
3424readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3425 unsigned int rsize, struct list_head *tmplist,
3426 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3427{
3428 struct page *page, *tpage;
3429 unsigned int expected_index;
3430 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003431 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003432
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003433 INIT_LIST_HEAD(tmplist);
3434
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003435 page = list_entry(page_list->prev, struct page, lru);
3436
3437 /*
3438 * Lock the page and put it in the cache. Since no one else
3439 * should have access to this page, we're safe to simply set
3440 * PG_locked without checking it first.
3441 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003442 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003443 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003444 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003445
3446 /* give up if we can't stick it in the cache */
3447 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003448 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003449 return rc;
3450 }
3451
3452 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003453 *offset = (loff_t)page->index << PAGE_SHIFT;
3454 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003455 *nr_pages = 1;
3456 list_move_tail(&page->lru, tmplist);
3457
3458 /* now try and add more pages onto the request */
3459 expected_index = page->index + 1;
3460 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3461 /* discontinuity ? */
3462 if (page->index != expected_index)
3463 break;
3464
3465 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003466 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003467 break;
3468
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003469 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003470 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003471 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003472 break;
3473 }
3474 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003475 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003476 expected_index++;
3477 (*nr_pages)++;
3478 }
3479 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480}
3481
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482static int cifs_readpages(struct file *file, struct address_space *mapping,
3483 struct list_head *page_list, unsigned num_pages)
3484{
Jeff Layton690c5e32011-10-19 15:30:16 -04003485 int rc;
3486 struct list_head tmplist;
3487 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003488 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003489 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003490 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491
Jeff Layton690c5e32011-10-19 15:30:16 -04003492 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303493 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3494 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003495 *
3496 * After this point, every page in the list might have PG_fscache set,
3497 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303498 */
3499 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3500 &num_pages);
3501 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003502 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303503
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003504 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3505 pid = open_file->pid;
3506 else
3507 pid = current->tgid;
3508
Jeff Layton690c5e32011-10-19 15:30:16 -04003509 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003510 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511
Joe Perchesf96637b2013-05-04 22:12:25 -05003512 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3513 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003514
3515 /*
3516 * Start with the page at end of list and move it to private
3517 * list. Do the same with any following pages until we hit
3518 * the rsize limit, hit an index discontinuity, or run out of
3519 * pages. Issue the async read and then start the loop again
3520 * until the list is empty.
3521 *
3522 * Note that list order is important. The page_list is in
3523 * the order of declining indexes. When we put the pages in
3524 * the rdata->pages, then we want them in increasing order.
3525 */
3526 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003527 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003528 loff_t offset;
3529 struct page *page, *tpage;
3530 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003531 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003533 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3534 &rsize, &credits);
3535 if (rc)
3536 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
Jeff Layton690c5e32011-10-19 15:30:16 -04003538 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003539 * Give up immediately if rsize is too small to read an entire
3540 * page. The VFS will fall back to readpage. We should never
3541 * reach this point however since we set ra_pages to 0 when the
3542 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003543 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003544 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003545 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003546 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003548
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003549 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3550 &nr_pages, &offset, &bytes);
3551 if (rc) {
3552 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003554 }
3555
Jeff Layton0471ca32012-05-16 07:13:16 -04003556 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003557 if (!rdata) {
3558 /* best to give up if we're out of mem */
3559 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3560 list_del(&page->lru);
3561 lru_cache_add_file(page);
3562 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003563 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003564 }
3565 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003566 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003567 break;
3568 }
3569
Jeff Layton6993f742012-05-16 07:13:17 -04003570 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003571 rdata->mapping = mapping;
3572 rdata->offset = offset;
3573 rdata->bytes = bytes;
3574 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003575 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003576 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003577 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003578
3579 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3580 list_del(&page->lru);
3581 rdata->pages[rdata->nr_pages++] = page;
3582 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003583
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003584 if (!rdata->cfile->invalidHandle ||
Germano Percossi730fecb2017-04-07 12:29:38 +01003585 !(rc = cifs_reopen_file(rdata->cfile, true)))
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003586 rc = server->ops->async_readv(rdata);
3587 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003588 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003589 for (i = 0; i < rdata->nr_pages; i++) {
3590 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003591 lru_cache_add_file(page);
3592 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003593 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003595 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003596 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 break;
3598 }
Jeff Layton6993f742012-05-16 07:13:17 -04003599
3600 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601 }
3602
David Howells54afa992013-09-04 17:10:39 +00003603 /* Any pages that have been shown to fscache but didn't get added to
3604 * the pagecache must be uncached before they get returned to the
3605 * allocator.
3606 */
3607 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 return rc;
3609}
3610
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003611/*
3612 * cifs_readpage_worker must be called with the page pinned
3613 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614static int cifs_readpage_worker(struct file *file, struct page *page,
3615 loff_t *poffset)
3616{
3617 char *read_data;
3618 int rc;
3619
Suresh Jayaraman56698232010-07-05 18:13:25 +05303620 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003621 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303622 if (rc == 0)
3623 goto read_complete;
3624
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 read_data = kmap(page);
3626 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003627
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003628 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003629
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 if (rc < 0)
3631 goto io_error;
3632 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003633 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003634
Al Viro496ad9a2013-01-23 17:07:38 -05003635 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003636 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003637
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003638 if (PAGE_SIZE > rc)
3639 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640
3641 flush_dcache_page(page);
3642 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303643
3644 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003645 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303646
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003648
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003650 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003651 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303652
3653read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 return rc;
3655}
3656
3657static int cifs_readpage(struct file *file, struct page *page)
3658{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003659 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003661 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003663 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664
3665 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303666 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003667 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303668 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669 }
3670
Joe Perchesf96637b2013-05-04 22:12:25 -05003671 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003672 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673
3674 rc = cifs_readpage_worker(file, page, &offset);
3675
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003676 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677 return rc;
3678}
3679
Steve Frencha403a0a2007-07-26 15:54:16 +00003680static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3681{
3682 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003683 struct cifs_tcon *tcon =
3684 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003685
Steve French3afca262016-09-22 18:58:16 -05003686 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003687 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003688 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003689 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003690 return 1;
3691 }
3692 }
Steve French3afca262016-09-22 18:58:16 -05003693 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003694 return 0;
3695}
3696
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697/* We do not want to update the file size from server for inodes
3698 open for write - to avoid races with writepage extending
3699 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003700 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003701 but this is tricky to do without racing with writebehind
3702 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003703bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003704{
Steve Frencha403a0a2007-07-26 15:54:16 +00003705 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003706 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003707
Steve Frencha403a0a2007-07-26 15:54:16 +00003708 if (is_inode_writable(cifsInode)) {
3709 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003710 struct cifs_sb_info *cifs_sb;
3711
Steve Frenchc32a0b62006-01-12 14:41:28 -08003712 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003713 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003714 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003715 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003716 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003717 }
3718
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003719 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003720 return true;
Steve French7ba52632007-02-08 18:14:13 +00003721
Steve French4b18f2a2008-04-29 00:06:05 +00003722 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003723 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003724 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725}
3726
Nick Piggind9414772008-09-24 11:32:59 -04003727static int cifs_write_begin(struct file *file, struct address_space *mapping,
3728 loff_t pos, unsigned len, unsigned flags,
3729 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003731 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003732 pgoff_t index = pos >> PAGE_SHIFT;
3733 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003734 loff_t page_start = pos & PAGE_MASK;
3735 loff_t i_size;
3736 struct page *page;
3737 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738
Joe Perchesf96637b2013-05-04 22:12:25 -05003739 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003740
Sachin Prabhu466bd312013-09-13 14:11:57 +01003741start:
Nick Piggin54566b22009-01-04 12:00:53 -08003742 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003743 if (!page) {
3744 rc = -ENOMEM;
3745 goto out;
3746 }
Nick Piggind9414772008-09-24 11:32:59 -04003747
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003748 if (PageUptodate(page))
3749 goto out;
Steve French8a236262007-03-06 00:31:00 +00003750
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003751 /*
3752 * If we write a full page it will be up to date, no need to read from
3753 * the server. If the write is short, we'll end up doing a sync write
3754 * instead.
3755 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003756 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003757 goto out;
3758
3759 /*
3760 * optimize away the read when we have an oplock, and we're not
3761 * expecting to use any of the data we'd be reading in. That
3762 * is, when the page lies beyond the EOF, or straddles the EOF
3763 * and the write will cover all of the existing data.
3764 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003765 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003766 i_size = i_size_read(mapping->host);
3767 if (page_start >= i_size ||
3768 (offset == 0 && (pos + len) >= i_size)) {
3769 zero_user_segments(page, 0, offset,
3770 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003771 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003772 /*
3773 * PageChecked means that the parts of the page
3774 * to which we're not writing are considered up
3775 * to date. Once the data is copied to the
3776 * page, it can be set uptodate.
3777 */
3778 SetPageChecked(page);
3779 goto out;
3780 }
3781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782
Sachin Prabhu466bd312013-09-13 14:11:57 +01003783 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003784 /*
3785 * might as well read a page, it is fast enough. If we get
3786 * an error, we don't need to return it. cifs_write_end will
3787 * do a sync write instead since PG_uptodate isn't set.
3788 */
3789 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003790 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003791 oncethru = 1;
3792 goto start;
Steve French8a236262007-03-06 00:31:00 +00003793 } else {
3794 /* we could try using another file handle if there is one -
3795 but how would we lock it to prevent close of that handle
3796 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003797 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003798 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003799out:
3800 *pagep = page;
3801 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003802}
3803
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303804static int cifs_release_page(struct page *page, gfp_t gfp)
3805{
3806 if (PagePrivate(page))
3807 return 0;
3808
3809 return cifs_fscache_release_page(page, gfp);
3810}
3811
Lukas Czernerd47992f2013-05-21 23:17:23 -04003812static void cifs_invalidate_page(struct page *page, unsigned int offset,
3813 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303814{
3815 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3816
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003817 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303818 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3819}
3820
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003821static int cifs_launder_page(struct page *page)
3822{
3823 int rc = 0;
3824 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003825 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003826 struct writeback_control wbc = {
3827 .sync_mode = WB_SYNC_ALL,
3828 .nr_to_write = 0,
3829 .range_start = range_start,
3830 .range_end = range_end,
3831 };
3832
Joe Perchesf96637b2013-05-04 22:12:25 -05003833 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003834
3835 if (clear_page_dirty_for_io(page))
3836 rc = cifs_writepage_locked(page, &wbc);
3837
3838 cifs_fscache_invalidate_page(page, page->mapping->host);
3839 return rc;
3840}
3841
Tejun Heo9b646972010-07-20 22:09:02 +02003842void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003843{
3844 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3845 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003846 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003847 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003848 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003849 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003850 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003851
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003852 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003853 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003854
3855 server->ops->downgrade_oplock(server, cinode,
3856 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3857
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003858 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003859 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003860 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3861 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003862 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003863 }
3864
Jeff Layton3bc303c2009-09-21 06:47:50 -04003865 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003866 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003867 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003868 else
Al Viro8737c932009-12-24 06:47:55 -05003869 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003870 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003871 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003872 rc = filemap_fdatawait(inode->i_mapping);
3873 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003874 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003875 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003876 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003877 }
3878
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003879 rc = cifs_push_locks(cfile);
3880 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003881 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003882
Jeff Layton3bc303c2009-09-21 06:47:50 -04003883 /*
3884 * releasing stale oplock after recent reconnect of smb session using
3885 * a now incorrect file handle is not a data integrity issue but do
3886 * not bother sending an oplock release if session to server still is
3887 * disconnected since oplock already released by the server
3888 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003889 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003890 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3891 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003892 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003893 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003894 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003895}
3896
Steve Frenchdca69282013-11-11 16:42:37 -06003897/*
3898 * The presence of cifs_direct_io() in the address space ops vector
3899 * allowes open() O_DIRECT flags which would have failed otherwise.
3900 *
3901 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3902 * so this method should never be called.
3903 *
3904 * Direct IO is not yet supported in the cached mode.
3905 */
3906static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003907cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003908{
3909 /*
3910 * FIXME
3911 * Eventually need to support direct IO for non forcedirectio mounts
3912 */
3913 return -EINVAL;
3914}
3915
3916
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003917const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918 .readpage = cifs_readpage,
3919 .readpages = cifs_readpages,
3920 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003921 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003922 .write_begin = cifs_write_begin,
3923 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303925 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003926 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303927 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003928 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003930
3931/*
3932 * cifs_readpages requires the server to support a buffer large enough to
3933 * contain the header plus one complete page of data. Otherwise, we need
3934 * to leave cifs_readpages out of the address space operations.
3935 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003936const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003937 .readpage = cifs_readpage,
3938 .writepage = cifs_writepage,
3939 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003940 .write_begin = cifs_write_begin,
3941 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003942 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303943 .releasepage = cifs_release_page,
3944 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003945 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003946};