blob: 07c14f9217cb873c40d30ac7516a3fc5babb5081 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400227 oparms.tcon = tcon;
228 oparms.cifs_sb = cifs_sb;
229 oparms.desired_access = desired_access;
230 oparms.create_options = create_options;
231 oparms.disposition = disposition;
232 oparms.path = full_path;
233 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400234 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400235
236 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300237
238 if (rc)
239 goto out;
240
241 if (tcon->unix_ext)
242 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
243 xid);
244 else
245 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600246 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300247
248out:
249 kfree(buf);
250 return rc;
251}
252
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400253static bool
254cifs_has_mand_locks(struct cifsInodeInfo *cinode)
255{
256 struct cifs_fid_locks *cur;
257 bool has_locks = false;
258
259 down_read(&cinode->lock_sem);
260 list_for_each_entry(cur, &cinode->llist, llist) {
261 if (!list_empty(&cur->locks)) {
262 has_locks = true;
263 break;
264 }
265 }
266 up_read(&cinode->lock_sem);
267 return has_locks;
268}
269
Jeff Layton15ecb432010-10-15 15:34:02 -0400270struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700271cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400272 struct tcon_link *tlink, __u32 oplock)
273{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500274 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000275 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 struct cifsInodeInfo *cinode = CIFS_I(inode);
277 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700278 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700279 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400280 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400281
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700282 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
283 if (cfile == NULL)
284 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400285
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700286 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
287 if (!fdlocks) {
288 kfree(cfile);
289 return NULL;
290 }
291
292 INIT_LIST_HEAD(&fdlocks->locks);
293 fdlocks->cfile = cfile;
294 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700295 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700296 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700297 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700298
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700299 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700300 cfile->pid = current->tgid;
301 cfile->uid = current_fsuid();
302 cfile->dentry = dget(dentry);
303 cfile->f_flags = file->f_flags;
304 cfile->invalidHandle = false;
305 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700306 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700307 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500308 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400309
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100310 cifs_sb_active(inode->i_sb);
311
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400312 /*
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
315 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400316 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500317 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400318 oplock = 0;
319 }
320
Steve French3afca262016-09-22 18:58:16 -0500321 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400322 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700323 oplock = fid->pending_open->oplock;
324 list_del(&fid->pending_open->olist);
325
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400326 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400327 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700328
329 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500330
Jeff Layton15ecb432010-10-15 15:34:02 -0400331 /* if readable file instance put first in list*/
332 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700333 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400334 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700335 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500336 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400337
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400338 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400339 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400340
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700341 file->private_data = cfile;
342 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400343}
344
Jeff Layton764a1b12012-07-25 14:59:54 -0400345struct cifsFileInfo *
346cifsFileInfo_get(struct cifsFileInfo *cifs_file)
347{
Steve French3afca262016-09-22 18:58:16 -0500348 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400349 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500350 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400351 return cifs_file;
352}
353
Steve Frenchcdff08e2010-10-21 22:46:14 +0000354/*
355 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400356 * the filehandle out on the server. Must be called without holding
Steve French3afca262016-09-22 18:58:16 -0500357 * tcon->open_file_lock and cifs_file->file_info_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000358 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400359void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
360{
David Howells2b0143b2015-03-17 22:25:59 +0000361 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000362 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700363 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300364 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100365 struct super_block *sb = inode->i_sb;
366 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000367 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700368 struct cifs_fid fid;
369 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000370 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000371
Steve French3afca262016-09-22 18:58:16 -0500372 spin_lock(&tcon->open_file_lock);
373
374 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400375 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500376 spin_unlock(&cifs_file->file_info_lock);
377 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000378 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400379 }
Steve French3afca262016-09-22 18:58:16 -0500380 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000381
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700382 if (server->ops->get_lease_key)
383 server->ops->get_lease_key(inode, &fid);
384
385 /* store open in pending opens to make sure we don't miss lease break */
386 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
387
Steve Frenchcdff08e2010-10-21 22:46:14 +0000388 /* remove it from the lists */
389 list_del(&cifs_file->flist);
390 list_del(&cifs_file->tlist);
391
392 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500393 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000394 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700395 /*
396 * In strict cache mode we need invalidate mapping on the last
397 * close because it may cause a error when we open this file
398 * again and get at least level II oplock.
399 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300400 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400401 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300402 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000403 }
Steve French3afca262016-09-22 18:58:16 -0500404
405 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000407 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
Jeff Laytonad635942011-07-26 12:20:17 -0400408
Steve Frenchcdff08e2010-10-21 22:46:14 +0000409 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700410 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400411 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700412
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400413 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700414 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400415 server->ops->close(xid, tcon, &cifs_file->fid);
416 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417 }
418
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000419 if (oplock_break_cancelled)
420 cifs_done_oplock_break(cifsi);
421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 cifs_del_pending_open(&open);
423
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700424 /*
425 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000426 * is closed anyway.
427 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700428 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700429 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000430 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400431 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000432 kfree(li);
433 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700434 list_del(&cifs_file->llist->llist);
435 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700436 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000437
438 cifs_put_tlink(cifs_file->tlink);
439 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100440 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000441 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400442}
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446{
447 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400448 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400449 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700451 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000452 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400453 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700454 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300456 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700457 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700458 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400460 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400463 tlink = cifs_sb_tlink(cifs_sb);
464 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400465 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400466 return PTR_ERR(tlink);
467 }
468 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700469 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500471 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530473 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400474 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 }
476
Joe Perchesf96637b2013-05-04 22:12:25 -0500477 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000478 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000479
Namjae Jeon787aded2014-08-22 14:22:51 +0900480 if (file->f_flags & O_DIRECT &&
481 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
483 file->f_op = &cifs_file_direct_nobrl_ops;
484 else
485 file->f_op = &cifs_file_direct_ops;
486 }
487
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700488 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000489 oplock = REQ_OPLOCK;
490 else
491 oplock = 0;
492
Steve French64cc2c62009-03-04 19:54:08 +0000493 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400494 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
495 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000496 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400497 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000498 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700499 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000500 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500501 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000503 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
504 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500505 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
506 tcon->ses->serverName,
507 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000508 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000509 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
510 (rc != -EOPNOTSUPP)) /* path not found or net err */
511 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700512 /*
513 * Else fallthrough to retry open the old way on network i/o
514 * or DFS errors.
515 */
Steve French276a74a2009-03-03 18:00:34 +0000516 }
517
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700518 if (server->ops->get_lease_key)
519 server->ops->get_lease_key(inode, &fid);
520
521 cifs_add_pending_open(&fid, tlink, &open);
522
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300523 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700524 if (server->ops->get_lease_key)
525 server->ops->get_lease_key(inode, &fid);
526
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300527 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700528 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700529 if (rc) {
530 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300531 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700532 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300533 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400534
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700535 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
536 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700537 if (server->ops->close)
538 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700539 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 rc = -ENOMEM;
541 goto out;
542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530544 cifs_fscache_set_inode_cookie(inode, file);
545
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300546 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700547 /*
548 * Time to set mode which we can not set earlier due to
549 * problems creating new read-only files.
550 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300551 struct cifs_unix_set_info_args args = {
552 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800553 .uid = INVALID_UID, /* no change */
554 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300555 .ctime = NO_CHANGE_64,
556 .atime = NO_CHANGE_64,
557 .mtime = NO_CHANGE_64,
558 .device = 0,
559 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700560 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
561 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 }
563
564out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400566 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400567 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 return rc;
569}
570
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400571static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
572
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700573/*
574 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400575 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700576 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400577static int
578cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400580 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000581 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 int rc = 0;
584
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400585 down_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400586 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400587 /* can cache locks - no need to relock */
588 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400589 return rc;
590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400592 if (cap_unix(tcon->ses) &&
593 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
594 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
595 rc = cifs_push_posix_locks(cfile);
596 else
597 rc = tcon->ses->server->ops->push_mand_locks(cfile);
598
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400599 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 return rc;
601}
602
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700603static int
604cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
606 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400607 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400608 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000610 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 struct TCP_Server_Info *server;
612 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000613 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700615 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500617 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400618 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400620 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700621 mutex_lock(&cfile->fh_mutex);
622 if (!cfile->invalidHandle) {
623 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530624 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400625 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530626 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
628
David Howells2b0143b2015-03-17 22:25:59 +0000629 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700631 tcon = tlink_tcon(cfile->tlink);
632 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000633
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700634 /*
635 * Can not grab rename sem here because various ops, including those
636 * that already have the rename sem can end up causing writepage to get
637 * called and if the server was down that means we end up here, and we
638 * can never tell if the caller already has the rename_sem.
639 */
640 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000642 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400644 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000645 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 }
647
Joe Perchesf96637b2013-05-04 22:12:25 -0500648 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
649 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300651 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 oplock = REQ_OPLOCK;
653 else
Steve French4b18f2a2008-04-29 00:06:05 +0000654 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400656 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000657 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400658 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400659 /*
660 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
661 * original open. Must mask them off for a reopen.
662 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700663 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400664 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400665
Jeff Layton2422f672010-06-16 13:40:16 -0400666 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700667 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400668 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000669 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500670 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200671 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000672 goto reopen_success;
673 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 /*
675 * fallthrough to retry open the old way on errors, especially
676 * in the reconnect path it is important to retry hard
677 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000678 }
679
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700680 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000681
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500682 if (backup_cred(cifs_sb))
683 create_options |= CREATE_OPEN_BACKUP_INTENT;
684
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700685 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400686 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700687
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400688 oparms.tcon = tcon;
689 oparms.cifs_sb = cifs_sb;
690 oparms.desired_access = desired_access;
691 oparms.create_options = create_options;
692 oparms.disposition = disposition;
693 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400694 oparms.fid = &cfile->fid;
695 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400696
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700697 /*
698 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400699 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700700 * file might have write behind data that needs to be flushed and server
701 * version of file size can be stale. If we knew for sure that inode was
702 * not dirty locally we could do this.
703 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400704 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400705 if (rc == -ENOENT && oparms.reconnect == false) {
706 /* durable handle timeout is expired - open the file again */
707 rc = server->ops->open(xid, &oparms, &oplock, NULL);
708 /* indicate that we need to relock the file */
709 oparms.reconnect = true;
710 }
711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700713 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500714 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
715 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400716 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 }
Jeff Layton15886172010-10-15 15:33:59 -0400718
719reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700720 cfile->invalidHandle = false;
721 mutex_unlock(&cfile->fh_mutex);
722 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400723
724 if (can_flush) {
725 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400726 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400727
Jeff Layton15886172010-10-15 15:33:59 -0400728 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700729 rc = cifs_get_inode_info_unix(&inode, full_path,
730 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400731 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700732 rc = cifs_get_inode_info(&inode, full_path, NULL,
733 inode->i_sb, xid, NULL);
734 }
735 /*
736 * Else we are writing out data to server already and could deadlock if
737 * we tried to flush data, and since we do not know if we have data that
738 * would invalidate the current end of file on the server we can not go
739 * to the server to get the new inode info.
740 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300741
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400742 server->ops->set_fid(cfile, &cfile->fid, oplock);
743 if (oparms.reconnect)
744 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400745
746reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400748 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 return rc;
750}
751
752int cifs_close(struct inode *inode, struct file *file)
753{
Jeff Layton77970692011-04-05 16:23:47 -0700754 if (file->private_data != NULL) {
755 cifsFileInfo_put(file->private_data);
756 file->private_data = NULL;
757 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
Steve Frenchcdff08e2010-10-21 22:46:14 +0000759 /* return code from the ->release op is always ignored */
760 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761}
762
Steve French52ace1e2016-09-22 19:23:56 -0500763void
764cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
765{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700766 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500767 struct list_head *tmp;
768 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700769 struct list_head tmp_list;
770
771 cifs_dbg(FYI, "Reopen persistent handles");
772 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500773
774 /* list all files open on tree connection, reopen resilient handles */
775 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700776 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500777 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700778 if (!open_file->invalidHandle)
779 continue;
780 cifsFileInfo_get(open_file);
781 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500782 }
783 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700784
785 list_for_each_safe(tmp, tmp1, &tmp_list) {
786 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
787 cifs_reopen_file(open_file, false /* do not flush */);
788 list_del_init(&open_file->rlist);
789 cifsFileInfo_put(open_file);
790 }
Steve French52ace1e2016-09-22 19:23:56 -0500791}
792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793int cifs_closedir(struct inode *inode, struct file *file)
794{
795 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400796 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700797 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700798 struct cifs_tcon *tcon;
799 struct TCP_Server_Info *server;
800 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Joe Perchesf96637b2013-05-04 22:12:25 -0500802 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700804 if (cfile == NULL)
805 return rc;
806
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400807 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700808 tcon = tlink_tcon(cfile->tlink);
809 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
Joe Perchesf96637b2013-05-04 22:12:25 -0500811 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500812 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400813 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700814 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500815 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700816 if (server->ops->close_dir)
817 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
818 else
819 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500820 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700821 /* not much we can do if it fails anyway, ignore rc */
822 rc = 0;
823 } else
Steve French3afca262016-09-22 18:58:16 -0500824 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700826 buf = cfile->srch_inf.ntwrk_buf_start;
827 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500828 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700829 cfile->srch_inf.ntwrk_buf_start = NULL;
830 if (cfile->srch_inf.smallBuf)
831 cifs_small_buf_release(buf);
832 else
833 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700835
836 cifs_put_tlink(cfile->tlink);
837 kfree(file->private_data);
838 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400840 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 return rc;
842}
843
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400844static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300845cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000846{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400847 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000848 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400849 if (!lock)
850 return lock;
851 lock->offset = offset;
852 lock->length = length;
853 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400854 lock->pid = current->tgid;
855 INIT_LIST_HEAD(&lock->blist);
856 init_waitqueue_head(&lock->block_q);
857 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400858}
859
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700860void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400861cifs_del_lock_waiters(struct cifsLockInfo *lock)
862{
863 struct cifsLockInfo *li, *tmp;
864 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
865 list_del_init(&li->blist);
866 wake_up(&li->block_q);
867 }
868}
869
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400870#define CIFS_LOCK_OP 0
871#define CIFS_READ_OP 1
872#define CIFS_WRITE_OP 2
873
874/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400875static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700876cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
877 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400878 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400879{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300880 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700881 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300882 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700884 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885 if (offset + length <= li->offset ||
886 offset >= li->offset + li->length)
887 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400888 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
889 server->ops->compare_fids(cfile, cur_cfile)) {
890 /* shared lock prevents write op through the same fid */
891 if (!(li->type & server->vals->shared_lock_type) ||
892 rw_check != CIFS_WRITE_OP)
893 continue;
894 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700895 if ((type & server->vals->shared_lock_type) &&
896 ((server->ops->compare_fids(cfile, cur_cfile) &&
897 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400898 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700899 if (conf_lock)
900 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700901 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400902 }
903 return false;
904}
905
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700906bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300907cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700908 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400909 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400910{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300911 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700912 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000913 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300914
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700915 list_for_each_entry(cur, &cinode->llist, llist) {
916 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700917 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300918 if (rc)
919 break;
920 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300921
922 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400923}
924
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300925/*
926 * Check if there is another lock that prevents us to set the lock (mandatory
927 * style). If such a lock exists, update the flock structure with its
928 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
929 * or leave it the same if we can't. Returns 0 if we don't need to request to
930 * the server or 1 otherwise.
931 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400932static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300933cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
934 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400935{
936 int rc = 0;
937 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000938 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300939 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400940 bool exist;
941
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700942 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400943
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300944 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400945 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400946 if (exist) {
947 flock->fl_start = conf_lock->offset;
948 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
949 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300950 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400951 flock->fl_type = F_RDLCK;
952 else
953 flock->fl_type = F_WRLCK;
954 } else if (!cinode->can_cache_brlcks)
955 rc = 1;
956 else
957 flock->fl_type = F_UNLCK;
958
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700959 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400960 return rc;
961}
962
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400963static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300964cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400965{
David Howells2b0143b2015-03-17 22:25:59 +0000966 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700967 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700968 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700969 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000970}
971
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300972/*
973 * Set the byte-range lock (mandatory style). Returns:
974 * 1) 0, if we set the lock and don't need to request to the server;
975 * 2) 1, if no locks prevent us but we need to request to the server;
976 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
977 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400978static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300979cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400980 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400981{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400982 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000983 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400984 bool exist;
985 int rc = 0;
986
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987try_again:
988 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700989 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400990
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300991 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400992 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400993 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700994 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700995 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400996 return rc;
997 }
998
999 if (!exist)
1000 rc = 1;
1001 else if (!wait)
1002 rc = -EACCES;
1003 else {
1004 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001005 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001006 rc = wait_event_interruptible(lock->block_q,
1007 (lock->blist.prev == &lock->blist) &&
1008 (lock->blist.next == &lock->blist));
1009 if (!rc)
1010 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001011 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001012 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001013 }
1014
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001015 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001016 return rc;
1017}
1018
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001019/*
1020 * Check if there is another lock that prevents us to set the lock (posix
1021 * style). If such a lock exists, update the flock structure with its
1022 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1023 * or leave it the same if we can't. Returns 0 if we don't need to request to
1024 * the server or 1 otherwise.
1025 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001026static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001027cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1028{
1029 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001030 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001031 unsigned char saved_type = flock->fl_type;
1032
Pavel Shilovsky50792762011-10-29 17:17:57 +04001033 if ((flock->fl_flags & FL_POSIX) == 0)
1034 return 1;
1035
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001036 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001037 posix_test_lock(file, flock);
1038
1039 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1040 flock->fl_type = saved_type;
1041 rc = 1;
1042 }
1043
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001044 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001045 return rc;
1046}
1047
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001048/*
1049 * Set the byte-range lock (posix style). Returns:
1050 * 1) 0, if we set the lock and don't need to request to the server;
1051 * 2) 1, if we need to request to the server;
1052 * 3) <0, if the error occurs while setting the lock.
1053 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001054static int
1055cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1056{
Al Viro496ad9a2013-01-23 17:07:38 -05001057 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001058 int rc = 1;
1059
1060 if ((flock->fl_flags & FL_POSIX) == 0)
1061 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001063try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001064 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001065 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001066 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001067 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001068 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001069
1070 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001071 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001072 if (rc == FILE_LOCK_DEFERRED) {
1073 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1074 if (!rc)
1075 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001076 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001077 }
Steve French9ebb3892012-04-01 13:52:54 -05001078 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001079}
1080
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001081int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001082cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001083{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001084 unsigned int xid;
1085 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001086 struct cifsLockInfo *li, *tmp;
1087 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001088 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001089 LOCKING_ANDX_RANGE *buf, *cur;
1090 int types[] = {LOCKING_ANDX_LARGE_FILES,
1091 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1092 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001093
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001094 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001095 tcon = tlink_tcon(cfile->tlink);
1096
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001097 /*
1098 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1099 * and check it for zero before using.
1100 */
1101 max_buf = tcon->ses->server->maxBuf;
1102 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001103 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001104 return -EINVAL;
1105 }
1106
1107 max_num = (max_buf - sizeof(struct smb_hdr)) /
1108 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001109 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001110 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001111 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001112 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001113 }
1114
1115 for (i = 0; i < 2; i++) {
1116 cur = buf;
1117 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001118 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001119 if (li->type != types[i])
1120 continue;
1121 cur->Pid = cpu_to_le16(li->pid);
1122 cur->LengthLow = cpu_to_le32((u32)li->length);
1123 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1124 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1125 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1126 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001127 stored_rc = cifs_lockv(xid, tcon,
1128 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001129 (__u8)li->type, 0, num,
1130 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001131 if (stored_rc)
1132 rc = stored_rc;
1133 cur = buf;
1134 num = 0;
1135 } else
1136 cur++;
1137 }
1138
1139 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001140 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001141 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001142 if (stored_rc)
1143 rc = stored_rc;
1144 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001145 }
1146
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001147 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001148 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001149 return rc;
1150}
1151
Jeff Layton3d224622016-05-24 06:27:44 -04001152static __u32
1153hash_lockowner(fl_owner_t owner)
1154{
1155 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1156}
1157
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001158struct lock_to_push {
1159 struct list_head llist;
1160 __u64 offset;
1161 __u64 length;
1162 __u32 pid;
1163 __u16 netfid;
1164 __u8 type;
1165};
1166
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001167static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001168cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001169{
David Howells2b0143b2015-03-17 22:25:59 +00001170 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001171 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001172 struct file_lock *flock;
1173 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001174 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001175 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001176 struct list_head locks_to_send, *el;
1177 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001178 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001179
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001180 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001181
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001182 if (!flctx)
1183 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001184
Jeff Laytone084c1b2015-02-16 14:32:03 -05001185 spin_lock(&flctx->flc_lock);
1186 list_for_each(el, &flctx->flc_posix) {
1187 count++;
1188 }
1189 spin_unlock(&flctx->flc_lock);
1190
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001191 INIT_LIST_HEAD(&locks_to_send);
1192
1193 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001194 * Allocating count locks is enough because no FL_POSIX locks can be
1195 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001196 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001197 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001198 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001199 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1200 if (!lck) {
1201 rc = -ENOMEM;
1202 goto err_out;
1203 }
1204 list_add_tail(&lck->llist, &locks_to_send);
1205 }
1206
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001207 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001208 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001209 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001210 if (el == &locks_to_send) {
1211 /*
1212 * The list ended. We don't have enough allocated
1213 * structures - something is really wrong.
1214 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001215 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001216 break;
1217 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001218 length = 1 + flock->fl_end - flock->fl_start;
1219 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1220 type = CIFS_RDLCK;
1221 else
1222 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001223 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001224 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001225 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001226 lck->length = length;
1227 lck->type = type;
1228 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001229 }
Jeff Layton6109c852015-01-16 15:05:57 -05001230 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001231
1232 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001233 int stored_rc;
1234
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001235 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001236 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001237 lck->type, 0);
1238 if (stored_rc)
1239 rc = stored_rc;
1240 list_del(&lck->llist);
1241 kfree(lck);
1242 }
1243
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001244out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001245 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001246 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001247err_out:
1248 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1249 list_del(&lck->llist);
1250 kfree(lck);
1251 }
1252 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001253}
1254
1255static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001256cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001257{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001258 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001259 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001260 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001261 int rc = 0;
1262
1263 /* we are going to update can_cache_brlcks here - need a write access */
1264 down_write(&cinode->lock_sem);
1265 if (!cinode->can_cache_brlcks) {
1266 up_write(&cinode->lock_sem);
1267 return rc;
1268 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001269
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001270 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001271 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1272 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001273 rc = cifs_push_posix_locks(cfile);
1274 else
1275 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001276
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001277 cinode->can_cache_brlcks = false;
1278 up_write(&cinode->lock_sem);
1279 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001280}
1281
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001282static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001283cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001284 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001286 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001287 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001288 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001289 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001290 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001291 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001292 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001294 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001295 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001296 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001297 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001298 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001299 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1300 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001301 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001303 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001304 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001305 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001306 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001307 *lock = 1;
1308 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001309 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001310 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001311 *unlock = 1;
1312 /* Check if unlock includes more than one lock range */
1313 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001314 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001315 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001316 *lock = 1;
1317 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001318 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001319 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001320 *lock = 1;
1321 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001322 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001323 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001324 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001326 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001327}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001329static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001330cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001331 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001332{
1333 int rc = 0;
1334 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001335 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1336 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001337 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001338 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001340 if (posix_lck) {
1341 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001342
1343 rc = cifs_posix_lock_test(file, flock);
1344 if (!rc)
1345 return rc;
1346
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001347 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001348 posix_lock_type = CIFS_RDLCK;
1349 else
1350 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001351 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1352 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001353 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001354 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 return rc;
1356 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001357
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001358 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001359 if (!rc)
1360 return rc;
1361
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001363 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1364 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001365 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001366 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1367 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001368 flock->fl_type = F_UNLCK;
1369 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001370 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1371 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001372 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001373 }
1374
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001375 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001376 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001377 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001378 }
1379
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001380 type &= ~server->vals->exclusive_lock_type;
1381
1382 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1383 type | server->vals->shared_lock_type,
1384 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001385 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001386 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1387 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001388 flock->fl_type = F_RDLCK;
1389 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001390 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1391 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001392 } else
1393 flock->fl_type = F_WRLCK;
1394
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001395 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001396}
1397
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001398void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001399cifs_move_llist(struct list_head *source, struct list_head *dest)
1400{
1401 struct list_head *li, *tmp;
1402 list_for_each_safe(li, tmp, source)
1403 list_move(li, dest);
1404}
1405
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001406void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001407cifs_free_llist(struct list_head *llist)
1408{
1409 struct cifsLockInfo *li, *tmp;
1410 list_for_each_entry_safe(li, tmp, llist, llist) {
1411 cifs_del_lock_waiters(li);
1412 list_del(&li->llist);
1413 kfree(li);
1414 }
1415}
1416
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001417int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001418cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1419 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001420{
1421 int rc = 0, stored_rc;
1422 int types[] = {LOCKING_ANDX_LARGE_FILES,
1423 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1424 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001425 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001426 LOCKING_ANDX_RANGE *buf, *cur;
1427 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001428 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001429 struct cifsLockInfo *li, *tmp;
1430 __u64 length = 1 + flock->fl_end - flock->fl_start;
1431 struct list_head tmp_llist;
1432
1433 INIT_LIST_HEAD(&tmp_llist);
1434
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001435 /*
1436 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1437 * and check it for zero before using.
1438 */
1439 max_buf = tcon->ses->server->maxBuf;
1440 if (!max_buf)
1441 return -EINVAL;
1442
1443 max_num = (max_buf - sizeof(struct smb_hdr)) /
1444 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001445 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001446 if (!buf)
1447 return -ENOMEM;
1448
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001449 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001450 for (i = 0; i < 2; i++) {
1451 cur = buf;
1452 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001453 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001454 if (flock->fl_start > li->offset ||
1455 (flock->fl_start + length) <
1456 (li->offset + li->length))
1457 continue;
1458 if (current->tgid != li->pid)
1459 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001460 if (types[i] != li->type)
1461 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001462 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001463 /*
1464 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001465 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001466 */
1467 list_del(&li->llist);
1468 cifs_del_lock_waiters(li);
1469 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001470 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001471 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001472 cur->Pid = cpu_to_le16(li->pid);
1473 cur->LengthLow = cpu_to_le32((u32)li->length);
1474 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1475 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1476 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1477 /*
1478 * We need to save a lock here to let us add it again to
1479 * the file's list if the unlock range request fails on
1480 * the server.
1481 */
1482 list_move(&li->llist, &tmp_llist);
1483 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001484 stored_rc = cifs_lockv(xid, tcon,
1485 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001486 li->type, num, 0, buf);
1487 if (stored_rc) {
1488 /*
1489 * We failed on the unlock range
1490 * request - add all locks from the tmp
1491 * list to the head of the file's list.
1492 */
1493 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001494 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001495 rc = stored_rc;
1496 } else
1497 /*
1498 * The unlock range request succeed -
1499 * free the tmp list.
1500 */
1501 cifs_free_llist(&tmp_llist);
1502 cur = buf;
1503 num = 0;
1504 } else
1505 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001506 }
1507 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001508 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001509 types[i], num, 0, buf);
1510 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001511 cifs_move_llist(&tmp_llist,
1512 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001513 rc = stored_rc;
1514 } else
1515 cifs_free_llist(&tmp_llist);
1516 }
1517 }
1518
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001519 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001520 kfree(buf);
1521 return rc;
1522}
1523
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001524static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001525cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001526 bool wait_flag, bool posix_lck, int lock, int unlock,
1527 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001528{
1529 int rc = 0;
1530 __u64 length = 1 + flock->fl_end - flock->fl_start;
1531 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1532 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001533 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001534 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001535
1536 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001537 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001538
1539 rc = cifs_posix_lock_set(file, flock);
1540 if (!rc || rc < 0)
1541 return rc;
1542
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001543 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001544 posix_lock_type = CIFS_RDLCK;
1545 else
1546 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001547
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001548 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001549 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001550
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001551 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001552 hash_lockowner(flock->fl_owner),
1553 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001554 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001555 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001556 }
1557
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001559 struct cifsLockInfo *lock;
1560
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001561 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001562 if (!lock)
1563 return -ENOMEM;
1564
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001565 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001566 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001567 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001568 return rc;
1569 }
1570 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001571 goto out;
1572
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001573 /*
1574 * Windows 7 server can delay breaking lease from read to None
1575 * if we set a byte-range lock on a file - break it explicitly
1576 * before sending the lock to the server to be sure the next
1577 * read won't conflict with non-overlapted locks due to
1578 * pagereading.
1579 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001580 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1581 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001582 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001583 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1584 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001585 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001586 }
1587
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001588 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1589 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001590 if (rc) {
1591 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001592 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001593 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001594
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001595 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001596 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001597 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001598
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001599out:
Chengyu Song00b8c952015-03-24 20:18:49 -04001600 if (flock->fl_flags & FL_POSIX && !rc)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001601 rc = locks_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001602 return rc;
1603}
1604
1605int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1606{
1607 int rc, xid;
1608 int lock = 0, unlock = 0;
1609 bool wait_flag = false;
1610 bool posix_lck = false;
1611 struct cifs_sb_info *cifs_sb;
1612 struct cifs_tcon *tcon;
1613 struct cifsInodeInfo *cinode;
1614 struct cifsFileInfo *cfile;
1615 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001616 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001617
1618 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001619 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001620
Joe Perchesf96637b2013-05-04 22:12:25 -05001621 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1622 cmd, flock->fl_flags, flock->fl_type,
1623 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001624
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001625 cfile = (struct cifsFileInfo *)file->private_data;
1626 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001627
1628 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1629 tcon->ses->server);
1630
Al Viro7119e222014-10-22 00:25:12 -04001631 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001632 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001633 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001634
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001635 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001636 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1637 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1638 posix_lck = true;
1639 /*
1640 * BB add code here to normalize offset and length to account for
1641 * negative length which we can not accept over the wire.
1642 */
1643 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001644 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001645 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001646 return rc;
1647 }
1648
1649 if (!lock && !unlock) {
1650 /*
1651 * if no lock or unlock then nothing to do since we do not
1652 * know what it is
1653 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001654 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001655 return -EOPNOTSUPP;
1656 }
1657
1658 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1659 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001660 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 return rc;
1662}
1663
Jeff Layton597b0272012-03-23 14:40:56 -04001664/*
1665 * update the file size (if needed) after a write. Should be called with
1666 * the inode->i_lock held
1667 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001668void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001669cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1670 unsigned int bytes_written)
1671{
1672 loff_t end_of_write = offset + bytes_written;
1673
1674 if (end_of_write > cifsi->server_eof)
1675 cifsi->server_eof = end_of_write;
1676}
1677
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001678static ssize_t
1679cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1680 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681{
1682 int rc = 0;
1683 unsigned int bytes_written = 0;
1684 unsigned int total_written;
1685 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001686 struct cifs_tcon *tcon;
1687 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001688 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001689 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001690 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001691 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Jeff Layton7da4b492010-10-15 15:34:00 -04001693 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
Al Viro35c265e2014-08-19 20:25:34 -04001695 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1696 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001698 tcon = tlink_tcon(open_file->tlink);
1699 server = tcon->ses->server;
1700
1701 if (!server->ops->sync_write)
1702 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001703
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001704 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 for (total_written = 0; write_size > total_written;
1707 total_written += bytes_written) {
1708 rc = -EAGAIN;
1709 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001710 struct kvec iov[2];
1711 unsigned int len;
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 /* we could deadlock if we called
1715 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001716 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001718 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 if (rc != 0)
1720 break;
1721 }
Steve French3e844692005-10-03 13:37:24 -07001722
David Howells2b0143b2015-03-17 22:25:59 +00001723 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001724 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001725 /* iov[0] is reserved for smb header */
1726 iov[1].iov_base = (char *)write_data + total_written;
1727 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001728 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001729 io_parms.tcon = tcon;
1730 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001731 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001732 rc = server->ops->sync_write(xid, &open_file->fid,
1733 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 }
1735 if (rc || (bytes_written == 0)) {
1736 if (total_written)
1737 break;
1738 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001739 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 return rc;
1741 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001742 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001743 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001744 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001745 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001746 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 }
1749
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001750 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Jeff Layton7da4b492010-10-15 15:34:00 -04001752 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001753 spin_lock(&d_inode(dentry)->i_lock);
1754 if (*offset > d_inode(dentry)->i_size)
1755 i_size_write(d_inode(dentry), *offset);
1756 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 }
David Howells2b0143b2015-03-17 22:25:59 +00001758 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001759 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 return total_written;
1761}
1762
Jeff Layton6508d902010-09-29 19:51:11 -04001763struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1764 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001765{
1766 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001767 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001768 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001769
1770 /* only filter by fsuid on multiuser mounts */
1771 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1772 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001773
Steve French3afca262016-09-22 18:58:16 -05001774 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001775 /* we could simply get the first_list_entry since write-only entries
1776 are always at the end of the list but since the first entry might
1777 have a close pending, we go through the whole list */
1778 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001779 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001780 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001781 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001782 if (!open_file->invalidHandle) {
1783 /* found a good file */
1784 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001785 cifsFileInfo_get(open_file);
1786 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001787 return open_file;
1788 } /* else might as well continue, and look for
1789 another, or simply have the caller reopen it
1790 again rather than trying to fix this handle */
1791 } else /* write only file */
1792 break; /* write only files are last so must be done */
1793 }
Steve French3afca262016-09-22 18:58:16 -05001794 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001795 return NULL;
1796}
Steve French630f3f0c2007-10-25 21:17:17 +00001797
Jeff Layton6508d902010-09-29 19:51:11 -04001798struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1799 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001800{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001801 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001802 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001803 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001804 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001805 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001806 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001807
Steve French60808232006-04-22 15:53:05 +00001808 /* Having a null inode here (because mapping->host was set to zero by
1809 the VFS or MM) should not happen but we had reports of on oops (due to
1810 it being zero) during stress testcases so we need to check for it */
1811
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001812 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001813 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001814 dump_stack();
1815 return NULL;
1816 }
1817
Jeff Laytond3892292010-11-02 16:22:50 -04001818 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001819 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001820
Jeff Layton6508d902010-09-29 19:51:11 -04001821 /* only filter by fsuid on multiuser mounts */
1822 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1823 fsuid_only = false;
1824
Steve French3afca262016-09-22 18:58:16 -05001825 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001826refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001827 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001828 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001829 return NULL;
1830 }
Steve French6148a742005-10-05 12:23:19 -07001831 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001832 if (!any_available && open_file->pid != current->tgid)
1833 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001834 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001835 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001836 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001837 if (!open_file->invalidHandle) {
1838 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001839 cifsFileInfo_get(open_file);
1840 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001841 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001842 } else {
1843 if (!inv_file)
1844 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001845 }
Steve French6148a742005-10-05 12:23:19 -07001846 }
1847 }
Jeff Layton2846d382008-09-22 21:33:33 -04001848 /* couldn't find useable FH with same pid, try any available */
1849 if (!any_available) {
1850 any_available = true;
1851 goto refind_writable;
1852 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001853
1854 if (inv_file) {
1855 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001856 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001857 }
1858
Steve French3afca262016-09-22 18:58:16 -05001859 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001860
1861 if (inv_file) {
1862 rc = cifs_reopen_file(inv_file, false);
1863 if (!rc)
1864 return inv_file;
1865 else {
Steve French3afca262016-09-22 18:58:16 -05001866 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001867 list_move_tail(&inv_file->flist,
1868 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001869 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001870 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001871 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001872 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001873 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001874 goto refind_writable;
1875 }
1876 }
1877
Steve French6148a742005-10-05 12:23:19 -07001878 return NULL;
1879}
1880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1882{
1883 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001884 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 char *write_data;
1886 int rc = -EFAULT;
1887 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001889 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
1891 if (!mapping || !mapping->host)
1892 return -EFAULT;
1893
1894 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 offset += (loff_t)from;
1897 write_data = kmap(page);
1898 write_data += from;
1899
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001900 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 kunmap(page);
1902 return -EIO;
1903 }
1904
1905 /* racing with truncate? */
1906 if (offset > mapping->host->i_size) {
1907 kunmap(page);
1908 return 0; /* don't care */
1909 }
1910
1911 /* check to make sure that we are not extending the file */
1912 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001913 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
Jeff Layton6508d902010-09-29 19:51:11 -04001915 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001916 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001917 bytes_written = cifs_write(open_file, open_file->pid,
1918 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001919 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001921 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001922 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001923 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001924 else if (bytes_written < 0)
1925 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001926 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001927 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 rc = -EIO;
1929 }
1930
1931 kunmap(page);
1932 return rc;
1933}
1934
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001935static struct cifs_writedata *
1936wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1937 pgoff_t end, pgoff_t *index,
1938 unsigned int *found_pages)
1939{
1940 unsigned int nr_pages;
1941 struct page **pages;
1942 struct cifs_writedata *wdata;
1943
1944 wdata = cifs_writedata_alloc((unsigned int)tofind,
1945 cifs_writev_complete);
1946 if (!wdata)
1947 return NULL;
1948
1949 /*
1950 * find_get_pages_tag seems to return a max of 256 on each
1951 * iteration, so we must call it several times in order to
1952 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03001953 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001954 */
1955 *found_pages = 0;
1956 pages = wdata->pages;
1957 do {
1958 nr_pages = find_get_pages_tag(mapping, index,
1959 PAGECACHE_TAG_DIRTY, tofind,
1960 pages);
1961 *found_pages += nr_pages;
1962 tofind -= nr_pages;
1963 pages += nr_pages;
1964 } while (nr_pages && tofind && *index <= end);
1965
1966 return wdata;
1967}
1968
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04001969static unsigned int
1970wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1971 struct address_space *mapping,
1972 struct writeback_control *wbc,
1973 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1974{
1975 unsigned int nr_pages = 0, i;
1976 struct page *page;
1977
1978 for (i = 0; i < found_pages; i++) {
1979 page = wdata->pages[i];
1980 /*
1981 * At this point we hold neither mapping->tree_lock nor
1982 * lock on the page itself: the page may be truncated or
1983 * invalidated (changing page->mapping to NULL), or even
1984 * swizzled back from swapper_space to tmpfs file
1985 * mapping
1986 */
1987
1988 if (nr_pages == 0)
1989 lock_page(page);
1990 else if (!trylock_page(page))
1991 break;
1992
1993 if (unlikely(page->mapping != mapping)) {
1994 unlock_page(page);
1995 break;
1996 }
1997
1998 if (!wbc->range_cyclic && page->index > end) {
1999 *done = true;
2000 unlock_page(page);
2001 break;
2002 }
2003
2004 if (*next && (page->index != *next)) {
2005 /* Not next consecutive page */
2006 unlock_page(page);
2007 break;
2008 }
2009
2010 if (wbc->sync_mode != WB_SYNC_NONE)
2011 wait_on_page_writeback(page);
2012
2013 if (PageWriteback(page) ||
2014 !clear_page_dirty_for_io(page)) {
2015 unlock_page(page);
2016 break;
2017 }
2018
2019 /*
2020 * This actually clears the dirty bit in the radix tree.
2021 * See cifs_writepage() for more commentary.
2022 */
2023 set_page_writeback(page);
2024 if (page_offset(page) >= i_size_read(mapping->host)) {
2025 *done = true;
2026 unlock_page(page);
2027 end_page_writeback(page);
2028 break;
2029 }
2030
2031 wdata->pages[i] = page;
2032 *next = page->index + 1;
2033 ++nr_pages;
2034 }
2035
2036 /* reset index to refind any pages skipped */
2037 if (nr_pages == 0)
2038 *index = wdata->pages[0]->index + 1;
2039
2040 /* put any pages we aren't going to use */
2041 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002042 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002043 wdata->pages[i] = NULL;
2044 }
2045
2046 return nr_pages;
2047}
2048
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002049static int
2050wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2051 struct address_space *mapping, struct writeback_control *wbc)
2052{
2053 int rc = 0;
2054 struct TCP_Server_Info *server;
2055 unsigned int i;
2056
2057 wdata->sync_mode = wbc->sync_mode;
2058 wdata->nr_pages = nr_pages;
2059 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002060 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002061 wdata->tailsz = min(i_size_read(mapping->host) -
2062 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002063 (loff_t)PAGE_SIZE);
2064 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002065
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002066 if (wdata->cfile != NULL)
2067 cifsFileInfo_put(wdata->cfile);
2068 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2069 if (!wdata->cfile) {
2070 cifs_dbg(VFS, "No writable handles for inode\n");
2071 rc = -EBADF;
2072 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002073 wdata->pid = wdata->cfile->pid;
2074 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2075 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002076 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002077
2078 for (i = 0; i < nr_pages; ++i)
2079 unlock_page(wdata->pages[i]);
2080
2081 return rc;
2082}
2083
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002085 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002087 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002088 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002089 bool done = false, scanned = false, range_whole = false;
2090 pgoff_t end, index;
2091 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002092 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002093
Steve French37c0eb42005-10-05 14:50:29 -07002094 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002095 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002096 * one page at a time via cifs_writepage
2097 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002098 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002099 return generic_writepages(mapping, wbc);
2100
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002101 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002102 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002103 end = -1;
2104 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002105 index = wbc->range_start >> PAGE_SHIFT;
2106 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002107 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002108 range_whole = true;
2109 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002110 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002111 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002112retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002113 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002114 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002115 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002116
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002117 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2118 &wsize, &credits);
2119 if (rc)
2120 break;
Steve French37c0eb42005-10-05 14:50:29 -07002121
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002122 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002123
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002124 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2125 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002126 if (!wdata) {
2127 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002128 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002129 break;
2130 }
2131
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002132 if (found_pages == 0) {
2133 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002134 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002135 break;
2136 }
2137
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002138 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2139 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002140
2141 /* nothing to write? */
2142 if (nr_pages == 0) {
2143 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002144 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002145 continue;
2146 }
2147
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002148 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002149
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002150 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002151
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002152 /* send failure -- clean up the mess */
2153 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002154 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002155 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002156 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002157 redirty_page_for_writepage(wbc,
2158 wdata->pages[i]);
2159 else
2160 SetPageError(wdata->pages[i]);
2161 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002162 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002163 }
Jeff Layton941b8532011-01-11 07:24:01 -05002164 if (rc != -EAGAIN)
2165 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002166 }
2167 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002168
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002169 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2170 index = saved_index;
2171 continue;
2172 }
2173
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002174 wbc->nr_to_write -= nr_pages;
2175 if (wbc->nr_to_write <= 0)
2176 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002177
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002178 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002179 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002180
Steve French37c0eb42005-10-05 14:50:29 -07002181 if (!scanned && !done) {
2182 /*
2183 * We hit the last page and there is more work to be done: wrap
2184 * back to the start of the file
2185 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002186 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002187 index = 0;
2188 goto retry;
2189 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002190
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002191 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002192 mapping->writeback_index = index;
2193
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 return rc;
2195}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002197static int
2198cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002200 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002201 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002203 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002205 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002206 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002207 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002208
2209 /*
2210 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2211 *
2212 * A writepage() implementation always needs to do either this,
2213 * or re-dirty the page with "redirty_page_for_writepage()" in
2214 * the case of a failure.
2215 *
2216 * Just unlocking the page will cause the radix tree tag-bits
2217 * to fail to update with the state of the page correctly.
2218 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002219 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002220retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002221 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002222 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2223 goto retry_write;
2224 else if (rc == -EAGAIN)
2225 redirty_page_for_writepage(wbc, page);
2226 else if (rc != 0)
2227 SetPageError(page);
2228 else
2229 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002230 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002231 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002232 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 return rc;
2234}
2235
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002236static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2237{
2238 int rc = cifs_writepage_locked(page, wbc);
2239 unlock_page(page);
2240 return rc;
2241}
2242
Nick Piggind9414772008-09-24 11:32:59 -04002243static int cifs_write_end(struct file *file, struct address_space *mapping,
2244 loff_t pos, unsigned len, unsigned copied,
2245 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246{
Nick Piggind9414772008-09-24 11:32:59 -04002247 int rc;
2248 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002249 struct cifsFileInfo *cfile = file->private_data;
2250 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2251 __u32 pid;
2252
2253 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2254 pid = cfile->pid;
2255 else
2256 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Joe Perchesf96637b2013-05-04 22:12:25 -05002258 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002259 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002260
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002261 if (PageChecked(page)) {
2262 if (copied == len)
2263 SetPageUptodate(page);
2264 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002265 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002266 SetPageUptodate(page);
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002269 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002270 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002271 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002272
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002273 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 /* this is probably better than directly calling
2275 partialpage_write since in this function the file handle is
2276 known which we might as well leverage */
2277 /* BB check if anything else missing out of ppw
2278 such as updating last write time */
2279 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002280 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002281 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002283
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002284 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002285 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002286 rc = copied;
2287 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002288 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 }
2290
Nick Piggind9414772008-09-24 11:32:59 -04002291 if (rc > 0) {
2292 spin_lock(&inode->i_lock);
2293 if (pos > inode->i_size)
2294 i_size_write(inode, pos);
2295 spin_unlock(&inode->i_lock);
2296 }
2297
2298 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002299 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002300
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 return rc;
2302}
2303
Josef Bacik02c24a82011-07-16 20:44:56 -04002304int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2305 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002307 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002309 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002310 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002311 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002312 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002313 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314
Josef Bacik02c24a82011-07-16 20:44:56 -04002315 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2316 if (rc)
2317 return rc;
Al Viro59551022016-01-22 15:40:57 -05002318 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002319
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002320 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
Al Viro35c265e2014-08-19 20:25:34 -04002322 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2323 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002324
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002325 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002326 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002327 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002328 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002329 rc = 0; /* don't care about it in fsync */
2330 }
2331 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002332
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002333 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002334 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2335 server = tcon->ses->server;
2336 if (server->ops->flush)
2337 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2338 else
2339 rc = -ENOSYS;
2340 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002341
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002342 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002343 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002344 return rc;
2345}
2346
Josef Bacik02c24a82011-07-16 20:44:56 -04002347int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002348{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002349 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002350 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002351 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002352 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002353 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002354 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002355 struct inode *inode = file->f_mapping->host;
2356
2357 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2358 if (rc)
2359 return rc;
Al Viro59551022016-01-22 15:40:57 -05002360 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002361
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002362 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002363
Al Viro35c265e2014-08-19 20:25:34 -04002364 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2365 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002366
2367 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002368 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2369 server = tcon->ses->server;
2370 if (server->ops->flush)
2371 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2372 else
2373 rc = -ENOSYS;
2374 }
Steve Frenchb298f222009-02-21 21:17:43 +00002375
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002376 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002377 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 return rc;
2379}
2380
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381/*
2382 * As file closes, flush all cached write data for this inode checking
2383 * for write behind errors.
2384 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002385int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386{
Al Viro496ad9a2013-01-23 17:07:38 -05002387 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 int rc = 0;
2389
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002390 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002391 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002392
Joe Perchesf96637b2013-05-04 22:12:25 -05002393 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
2395 return rc;
2396}
2397
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002398static int
2399cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2400{
2401 int rc = 0;
2402 unsigned long i;
2403
2404 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002405 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002406 if (!pages[i]) {
2407 /*
2408 * save number of pages we have already allocated and
2409 * return with ENOMEM error
2410 */
2411 num_pages = i;
2412 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002413 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002414 }
2415 }
2416
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002417 if (rc) {
2418 for (i = 0; i < num_pages; i++)
2419 put_page(pages[i]);
2420 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002421 return rc;
2422}
2423
2424static inline
2425size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2426{
2427 size_t num_pages;
2428 size_t clen;
2429
2430 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002431 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002432
2433 if (cur_len)
2434 *cur_len = clen;
2435
2436 return num_pages;
2437}
2438
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002439static void
Steve French4a5c80d2014-02-07 20:45:12 -06002440cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002441{
2442 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002443 struct cifs_writedata *wdata = container_of(refcount,
2444 struct cifs_writedata, refcount);
2445
2446 for (i = 0; i < wdata->nr_pages; i++)
2447 put_page(wdata->pages[i]);
2448 cifs_writedata_release(refcount);
2449}
2450
2451static void
2452cifs_uncached_writev_complete(struct work_struct *work)
2453{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002454 struct cifs_writedata *wdata = container_of(work,
2455 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002456 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002457 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2458
2459 spin_lock(&inode->i_lock);
2460 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2461 if (cifsi->server_eof > inode->i_size)
2462 i_size_write(inode, cifsi->server_eof);
2463 spin_unlock(&inode->i_lock);
2464
2465 complete(&wdata->done);
2466
Steve French4a5c80d2014-02-07 20:45:12 -06002467 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002468}
2469
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002470static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002471wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2472 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002473{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002474 size_t save_len, copied, bytes, cur_len = *len;
2475 unsigned long i, nr_pages = *num_pages;
2476
2477 save_len = cur_len;
2478 for (i = 0; i < nr_pages; i++) {
2479 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2480 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2481 cur_len -= copied;
2482 /*
2483 * If we didn't copy as much as we expected, then that
2484 * may mean we trod into an unmapped area. Stop copying
2485 * at that point. On the next pass through the big
2486 * loop, we'll likely end up getting a zero-length
2487 * write and bailing out of it.
2488 */
2489 if (copied < bytes)
2490 break;
2491 }
2492 cur_len = save_len - cur_len;
2493 *len = cur_len;
2494
2495 /*
2496 * If we have no data to send, then that probably means that
2497 * the copy above failed altogether. That's most likely because
2498 * the address in the iovec was bogus. Return -EFAULT and let
2499 * the caller free anything we allocated and bail out.
2500 */
2501 if (!cur_len)
2502 return -EFAULT;
2503
2504 /*
2505 * i + 1 now represents the number of pages we actually used in
2506 * the copy phase above.
2507 */
2508 *num_pages = i + 1;
2509 return 0;
2510}
2511
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002512static int
2513cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2514 struct cifsFileInfo *open_file,
2515 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002516{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002517 int rc = 0;
2518 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002519 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002520 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002521 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002522 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002523 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002524 struct TCP_Server_Info *server;
2525
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002526 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2527 pid = open_file->pid;
2528 else
2529 pid = current->tgid;
2530
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002531 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002532
2533 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002534 unsigned int wsize, credits;
2535
2536 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2537 &wsize, &credits);
2538 if (rc)
2539 break;
2540
2541 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002542 wdata = cifs_writedata_alloc(nr_pages,
2543 cifs_uncached_writev_complete);
2544 if (!wdata) {
2545 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002546 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002547 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002548 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002549
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002550 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2551 if (rc) {
2552 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002553 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002554 break;
2555 }
2556
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002557 num_pages = nr_pages;
2558 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2559 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002560 for (i = 0; i < nr_pages; i++)
2561 put_page(wdata->pages[i]);
2562 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002563 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002564 break;
2565 }
2566
2567 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002568 * Bring nr_pages down to the number of pages we actually used,
2569 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002570 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002571 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002572 put_page(wdata->pages[nr_pages - 1]);
2573
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002574 wdata->sync_mode = WB_SYNC_ALL;
2575 wdata->nr_pages = nr_pages;
2576 wdata->offset = (__u64)offset;
2577 wdata->cfile = cifsFileInfo_get(open_file);
2578 wdata->pid = pid;
2579 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002580 wdata->pagesz = PAGE_SIZE;
2581 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002582 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002583
2584 if (!wdata->cfile->invalidHandle ||
2585 !cifs_reopen_file(wdata->cfile, false))
2586 rc = server->ops->async_writev(wdata,
2587 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002588 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002589 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002590 kref_put(&wdata->refcount,
2591 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002592 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002593 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002594 iov_iter_advance(from, offset - saved_offset);
2595 continue;
2596 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002597 break;
2598 }
2599
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002600 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002601 offset += cur_len;
2602 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002603 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002604
2605 return rc;
2606}
2607
Al Viroe9d15932015-04-06 22:44:11 -04002608ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002609{
Al Viroe9d15932015-04-06 22:44:11 -04002610 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002611 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002612 struct cifsFileInfo *open_file;
2613 struct cifs_tcon *tcon;
2614 struct cifs_sb_info *cifs_sb;
2615 struct cifs_writedata *wdata, *tmp;
2616 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002617 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002618 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002619
Al Viroe9d15932015-04-06 22:44:11 -04002620 /*
2621 * BB - optimize the way when signing is disabled. We can drop this
2622 * extra memory-to-memory copying and use iovec buffers for constructing
2623 * write request.
2624 */
2625
Al Viro3309dd02015-04-09 12:55:47 -04002626 rc = generic_write_checks(iocb, from);
2627 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002628 return rc;
2629
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002630 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002631 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002632 open_file = file->private_data;
2633 tcon = tlink_tcon(open_file->tlink);
2634
2635 if (!tcon->ses->server->ops->async_writev)
2636 return -ENOSYS;
2637
Al Viro3309dd02015-04-09 12:55:47 -04002638 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2639 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002640
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002641 /*
2642 * If at least one write was successfully sent, then discard any rc
2643 * value from the later writes. If the other write succeeds, then
2644 * we'll end up returning whatever was written. If it fails, then
2645 * we'll get a new rc value from that.
2646 */
2647 if (!list_empty(&wdata_list))
2648 rc = 0;
2649
2650 /*
2651 * Wait for and collect replies for any successful sends in order of
2652 * increasing offset. Once an error is hit or we get a fatal signal
2653 * while waiting, then return without waiting for any more replies.
2654 */
2655restart_loop:
2656 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2657 if (!rc) {
2658 /* FIXME: freezable too? */
2659 rc = wait_for_completion_killable(&wdata->done);
2660 if (rc)
2661 rc = -EINTR;
2662 else if (wdata->result)
2663 rc = wdata->result;
2664 else
2665 total_written += wdata->bytes;
2666
2667 /* resend call if it's a retryable error */
2668 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002669 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002670 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002671
2672 INIT_LIST_HEAD(&tmp_list);
2673 list_del_init(&wdata->list);
2674
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002675 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002676 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002677
2678 rc = cifs_write_from_iter(wdata->offset,
2679 wdata->bytes, &tmp_from,
2680 open_file, cifs_sb, &tmp_list);
2681
2682 list_splice(&tmp_list, &wdata_list);
2683
2684 kref_put(&wdata->refcount,
2685 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002686 goto restart_loop;
2687 }
2688 }
2689 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002690 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002691 }
2692
Al Viroe9d15932015-04-06 22:44:11 -04002693 if (unlikely(!total_written))
2694 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002695
Al Viroe9d15932015-04-06 22:44:11 -04002696 iocb->ki_pos += total_written;
2697 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002698 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002699 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002700}
2701
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002702static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002703cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002704{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002705 struct file *file = iocb->ki_filp;
2706 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2707 struct inode *inode = file->f_mapping->host;
2708 struct cifsInodeInfo *cinode = CIFS_I(inode);
2709 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002710 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002711
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002712 /*
2713 * We need to hold the sem to be sure nobody modifies lock list
2714 * with a brlock that prevents writing.
2715 */
2716 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002717 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002718
Al Viro3309dd02015-04-09 12:55:47 -04002719 rc = generic_write_checks(iocb, from);
2720 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002721 goto out;
2722
Al Viro5f380c72015-04-07 11:28:12 -04002723 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002724 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002725 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002726 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002727 else
2728 rc = -EACCES;
2729out:
Al Viro59551022016-01-22 15:40:57 -05002730 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002731
Christoph Hellwige2592212016-04-07 08:52:01 -07002732 if (rc > 0)
2733 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002734 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002735 return rc;
2736}
2737
2738ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002739cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002740{
Al Viro496ad9a2013-01-23 17:07:38 -05002741 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002742 struct cifsInodeInfo *cinode = CIFS_I(inode);
2743 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2744 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2745 iocb->ki_filp->private_data;
2746 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002747 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002748
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002749 written = cifs_get_writer(cinode);
2750 if (written)
2751 return written;
2752
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002753 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002754 if (cap_unix(tcon->ses) &&
2755 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002756 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002757 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002758 goto out;
2759 }
Al Viro3dae8752014-04-03 12:05:17 -04002760 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002761 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002762 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002763 /*
2764 * For non-oplocked files in strict cache mode we need to write the data
2765 * to the server exactly from the pos to pos+len-1 rather than flush all
2766 * affected pages because it may cause a error with mandatory locks on
2767 * these pages but not on the region from pos to ppos+len-1.
2768 */
Al Viro3dae8752014-04-03 12:05:17 -04002769 written = cifs_user_writev(iocb, from);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002770 if (written > 0 && CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002771 /*
2772 * Windows 7 server can delay breaking level2 oplock if a write
2773 * request comes - break it on the client to prevent reading
2774 * an old data.
2775 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002776 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002777 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2778 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002779 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002780 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002781out:
2782 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002783 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002784}
2785
Jeff Layton0471ca32012-05-16 07:13:16 -04002786static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002787cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002788{
2789 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002790
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002791 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2792 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002793 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002794 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002795 INIT_LIST_HEAD(&rdata->list);
2796 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002797 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002798 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002799
Jeff Layton0471ca32012-05-16 07:13:16 -04002800 return rdata;
2801}
2802
Jeff Layton6993f742012-05-16 07:13:17 -04002803void
2804cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002805{
Jeff Layton6993f742012-05-16 07:13:17 -04002806 struct cifs_readdata *rdata = container_of(refcount,
2807 struct cifs_readdata, refcount);
2808
2809 if (rdata->cfile)
2810 cifsFileInfo_put(rdata->cfile);
2811
Jeff Layton0471ca32012-05-16 07:13:16 -04002812 kfree(rdata);
2813}
2814
Jeff Layton2a1bb132012-05-16 07:13:17 -04002815static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002816cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002817{
2818 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002819 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002820 unsigned int i;
2821
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002822 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002823 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2824 if (!page) {
2825 rc = -ENOMEM;
2826 break;
2827 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002828 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002829 }
2830
2831 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002832 for (i = 0; i < nr_pages; i++) {
2833 put_page(rdata->pages[i]);
2834 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002835 }
2836 }
2837 return rc;
2838}
2839
2840static void
2841cifs_uncached_readdata_release(struct kref *refcount)
2842{
Jeff Layton1c892542012-05-16 07:13:17 -04002843 struct cifs_readdata *rdata = container_of(refcount,
2844 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002845 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002846
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002847 for (i = 0; i < rdata->nr_pages; i++) {
2848 put_page(rdata->pages[i]);
2849 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002850 }
2851 cifs_readdata_release(refcount);
2852}
2853
Jeff Layton1c892542012-05-16 07:13:17 -04002854/**
2855 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2856 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002857 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002858 *
2859 * This function copies data from a list of pages in a readdata response into
2860 * an array of iovecs. It will first calculate where the data should go
2861 * based on the info in the readdata and then copy the data into that spot.
2862 */
Al Viro7f25bba2014-02-04 14:07:43 -05002863static int
2864cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002865{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002866 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002867 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002868
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002869 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002870 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002871 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Al Viro7f25bba2014-02-04 14:07:43 -05002872 size_t written = copy_page_to_iter(page, 0, copy, iter);
2873 remaining -= written;
2874 if (written < copy && iov_iter_count(iter) > 0)
2875 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002876 }
Al Viro7f25bba2014-02-04 14:07:43 -05002877 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002878}
2879
2880static void
2881cifs_uncached_readv_complete(struct work_struct *work)
2882{
2883 struct cifs_readdata *rdata = container_of(work,
2884 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002885
2886 complete(&rdata->done);
2887 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2888}
2889
2890static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002891cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2892 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002893{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002894 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002895 unsigned int i;
2896 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002897
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002898 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002899 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002900 for (i = 0; i < nr_pages; i++) {
2901 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002902 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002903
Al Viro71335662016-01-09 19:54:50 -05002904 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002905 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002906 rdata->pages[i] = NULL;
2907 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002908 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002909 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002910 }
Al Viro71335662016-01-09 19:54:50 -05002911 n = len;
2912 if (len >= PAGE_SIZE) {
2913 /* enough data to fill the page */
2914 n = PAGE_SIZE;
2915 len -= n;
2916 } else {
2917 zero_user(page, len, PAGE_SIZE - len);
2918 rdata->tailsz = len;
2919 len = 0;
2920 }
2921 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07002922 if (result < 0)
2923 break;
2924
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002925 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002926 }
2927
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002928 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2929 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002930}
2931
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002932static int
2933cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2934 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002936 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002937 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002938 size_t cur_len;
2939 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04002940 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002941 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002942
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002943 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002944
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002945 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2946 pid = open_file->pid;
2947 else
2948 pid = current->tgid;
2949
Jeff Layton1c892542012-05-16 07:13:17 -04002950 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002951 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2952 &rsize, &credits);
2953 if (rc)
2954 break;
2955
2956 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04002957 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002958
Jeff Layton1c892542012-05-16 07:13:17 -04002959 /* allocate a readdata struct */
2960 rdata = cifs_readdata_alloc(npages,
2961 cifs_uncached_readv_complete);
2962 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002963 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04002964 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04002965 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002967
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002968 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002969 if (rc)
2970 goto error;
2971
2972 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002973 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002974 rdata->offset = offset;
2975 rdata->bytes = cur_len;
2976 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002977 rdata->pagesz = PAGE_SIZE;
2978 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002979 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04002980
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002981 if (!rdata->cfile->invalidHandle ||
2982 !cifs_reopen_file(rdata->cfile, true))
2983 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04002984error:
2985 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002986 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04002987 kref_put(&rdata->refcount,
2988 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002989 if (rc == -EAGAIN)
2990 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002991 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 }
Jeff Layton1c892542012-05-16 07:13:17 -04002993
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002994 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04002995 offset += cur_len;
2996 len -= cur_len;
2997 } while (len > 0);
2998
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002999 return rc;
3000}
3001
3002ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3003{
3004 struct file *file = iocb->ki_filp;
3005 ssize_t rc;
3006 size_t len;
3007 ssize_t total_read = 0;
3008 loff_t offset = iocb->ki_pos;
3009 struct cifs_sb_info *cifs_sb;
3010 struct cifs_tcon *tcon;
3011 struct cifsFileInfo *open_file;
3012 struct cifs_readdata *rdata, *tmp;
3013 struct list_head rdata_list;
3014
3015 len = iov_iter_count(to);
3016 if (!len)
3017 return 0;
3018
3019 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003020 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003021 open_file = file->private_data;
3022 tcon = tlink_tcon(open_file->tlink);
3023
3024 if (!tcon->ses->server->ops->async_readv)
3025 return -ENOSYS;
3026
3027 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3028 cifs_dbg(FYI, "attempting read on write only file instance\n");
3029
3030 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3031
Jeff Layton1c892542012-05-16 07:13:17 -04003032 /* if at least one read request send succeeded, then reset rc */
3033 if (!list_empty(&rdata_list))
3034 rc = 0;
3035
Al Viroe6a7bcb2014-04-02 19:53:36 -04003036 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003037 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003038again:
Jeff Layton1c892542012-05-16 07:13:17 -04003039 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3040 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003041 /* FIXME: freezable sleep too? */
3042 rc = wait_for_completion_killable(&rdata->done);
3043 if (rc)
3044 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003045 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003046 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003047 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003048 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003049
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003050 list_del_init(&rdata->list);
3051 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003052
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003053 /*
3054 * Got a part of data and then reconnect has
3055 * happened -- fill the buffer and continue
3056 * reading.
3057 */
3058 if (got_bytes && got_bytes < rdata->bytes) {
3059 rc = cifs_readdata_to_iov(rdata, to);
3060 if (rc) {
3061 kref_put(&rdata->refcount,
3062 cifs_uncached_readdata_release);
3063 continue;
3064 }
3065 }
3066
3067 rc = cifs_send_async_read(
3068 rdata->offset + got_bytes,
3069 rdata->bytes - got_bytes,
3070 rdata->cfile, cifs_sb,
3071 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003072
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003073 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003074
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003075 kref_put(&rdata->refcount,
3076 cifs_uncached_readdata_release);
3077 goto again;
3078 } else if (rdata->result)
3079 rc = rdata->result;
3080 else
Jeff Layton1c892542012-05-16 07:13:17 -04003081 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003082
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003083 /* if there was a short read -- discard anything left */
3084 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3085 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003086 }
3087 list_del_init(&rdata->list);
3088 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003090
Al Viroe6a7bcb2014-04-02 19:53:36 -04003091 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003092
Jeff Layton1c892542012-05-16 07:13:17 -04003093 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003094
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003095 /* mask nodata case */
3096 if (rc == -ENODATA)
3097 rc = 0;
3098
Al Viro0165e812014-02-04 14:19:48 -05003099 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003100 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003101 return total_read;
3102 }
3103 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003104}
3105
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003106ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003107cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003108{
Al Viro496ad9a2013-01-23 17:07:38 -05003109 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003110 struct cifsInodeInfo *cinode = CIFS_I(inode);
3111 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3112 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3113 iocb->ki_filp->private_data;
3114 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3115 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003116
3117 /*
3118 * In strict cache mode we need to read from the server all the time
3119 * if we don't have level II oplock because the server can delay mtime
3120 * change - so we can't make a decision about inode invalidating.
3121 * And we can also fail with pagereading if there are mandatory locks
3122 * on pages affected by this read but not on the region from pos to
3123 * pos+len-1.
3124 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003125 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003126 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003127
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003128 if (cap_unix(tcon->ses) &&
3129 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3130 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003131 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003132
3133 /*
3134 * We need to hold the sem to be sure nobody modifies lock list
3135 * with a brlock that prevents reading.
3136 */
3137 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003138 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003139 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003140 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003141 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003142 up_read(&cinode->lock_sem);
3143 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003144}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003146static ssize_t
3147cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148{
3149 int rc = -EACCES;
3150 unsigned int bytes_read = 0;
3151 unsigned int total_read;
3152 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003153 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003155 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003156 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003157 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003158 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003160 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003161 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003162 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003164 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003165 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003167 /* FIXME: set up handlers for larger reads and/or convert to async */
3168 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3169
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303171 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003172 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303173 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003175 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003176 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003177 server = tcon->ses->server;
3178
3179 if (!server->ops->sync_read) {
3180 free_xid(xid);
3181 return -ENOSYS;
3182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003184 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3185 pid = open_file->pid;
3186 else
3187 pid = current->tgid;
3188
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003190 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003192 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3193 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003194 do {
3195 current_read_size = min_t(uint, read_size - total_read,
3196 rsize);
3197 /*
3198 * For windows me and 9x we do not want to request more
3199 * than it negotiated since it will refuse the read
3200 * then.
3201 */
3202 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003203 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003204 current_read_size = min_t(uint,
3205 current_read_size, CIFSMaxBufSize);
3206 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003207 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003208 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 if (rc != 0)
3210 break;
3211 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003212 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003213 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003214 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003215 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003216 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003217 &bytes_read, &cur_offset,
3218 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003219 } while (rc == -EAGAIN);
3220
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 if (rc || (bytes_read == 0)) {
3222 if (total_read) {
3223 break;
3224 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003225 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 return rc;
3227 }
3228 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003229 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003230 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 }
3232 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003233 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 return total_read;
3235}
3236
Jeff Laytonca83ce32011-04-12 09:13:44 -04003237/*
3238 * If the page is mmap'ed into a process' page tables, then we need to make
3239 * sure that it doesn't change while being written back.
3240 */
3241static int
3242cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3243{
3244 struct page *page = vmf->page;
3245
3246 lock_page(page);
3247 return VM_FAULT_LOCKED;
3248}
3249
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003250static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003251 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003252 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003253 .page_mkwrite = cifs_page_mkwrite,
3254};
3255
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003256int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3257{
3258 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003259 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003260
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003261 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003262
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003263 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003264 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003265 if (rc)
3266 return rc;
3267 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003268
3269 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003270 if (rc == 0)
3271 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003272 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003273 return rc;
3274}
3275
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3277{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278 int rc, xid;
3279
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003280 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003281 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003283 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3284 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003285 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 return rc;
3287 }
3288 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003289 if (rc == 0)
3290 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003291 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 return rc;
3293}
3294
Jeff Layton0471ca32012-05-16 07:13:16 -04003295static void
3296cifs_readv_complete(struct work_struct *work)
3297{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003298 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003299 struct cifs_readdata *rdata = container_of(work,
3300 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003301
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003302 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003303 for (i = 0; i < rdata->nr_pages; i++) {
3304 struct page *page = rdata->pages[i];
3305
Jeff Layton0471ca32012-05-16 07:13:16 -04003306 lru_cache_add_file(page);
3307
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003308 if (rdata->result == 0 ||
3309 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003310 flush_dcache_page(page);
3311 SetPageUptodate(page);
3312 }
3313
3314 unlock_page(page);
3315
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003316 if (rdata->result == 0 ||
3317 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003318 cifs_readpage_to_fscache(rdata->mapping->host, page);
3319
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003320 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003321
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003322 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003323 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003324 }
Jeff Layton6993f742012-05-16 07:13:17 -04003325 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003326}
3327
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003328static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003329cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3330 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003331{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003332 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003333 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003334 u64 eof;
3335 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003336 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003337
3338 /* determine the eof that the server (probably) has */
3339 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003340 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003341 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003342
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003343 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003344 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003345 for (i = 0; i < nr_pages; i++) {
3346 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003347 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003348
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003349 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003350 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003351 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003352 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003353 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003354 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003355 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003356 } else if (page->index > eof_index) {
3357 /*
3358 * The VFS will not try to do readahead past the
3359 * i_size, but it's possible that we have outstanding
3360 * writes with gaps in the middle and the i_size hasn't
3361 * caught up yet. Populate those with zeroed out pages
3362 * to prevent the VFS from repeatedly attempting to
3363 * fill them until the writes are flushed.
3364 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003365 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003366 lru_cache_add_file(page);
3367 flush_dcache_page(page);
3368 SetPageUptodate(page);
3369 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003370 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003371 rdata->pages[i] = NULL;
3372 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003373 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003374 } else {
3375 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003376 lru_cache_add_file(page);
3377 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003378 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003379 rdata->pages[i] = NULL;
3380 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003381 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003382 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003383
Al Viro71335662016-01-09 19:54:50 -05003384 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003385 if (result < 0)
3386 break;
3387
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003388 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003389 }
3390
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003391 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3392 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003393}
3394
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003395static int
3396readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3397 unsigned int rsize, struct list_head *tmplist,
3398 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3399{
3400 struct page *page, *tpage;
3401 unsigned int expected_index;
3402 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003403 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003404
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003405 INIT_LIST_HEAD(tmplist);
3406
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003407 page = list_entry(page_list->prev, struct page, lru);
3408
3409 /*
3410 * Lock the page and put it in the cache. Since no one else
3411 * should have access to this page, we're safe to simply set
3412 * PG_locked without checking it first.
3413 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003414 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003415 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003416 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003417
3418 /* give up if we can't stick it in the cache */
3419 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003420 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003421 return rc;
3422 }
3423
3424 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003425 *offset = (loff_t)page->index << PAGE_SHIFT;
3426 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003427 *nr_pages = 1;
3428 list_move_tail(&page->lru, tmplist);
3429
3430 /* now try and add more pages onto the request */
3431 expected_index = page->index + 1;
3432 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3433 /* discontinuity ? */
3434 if (page->index != expected_index)
3435 break;
3436
3437 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003438 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003439 break;
3440
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003441 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003442 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003443 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003444 break;
3445 }
3446 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003447 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003448 expected_index++;
3449 (*nr_pages)++;
3450 }
3451 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452}
3453
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454static int cifs_readpages(struct file *file, struct address_space *mapping,
3455 struct list_head *page_list, unsigned num_pages)
3456{
Jeff Layton690c5e32011-10-19 15:30:16 -04003457 int rc;
3458 struct list_head tmplist;
3459 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003460 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003461 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003462 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463
Jeff Layton690c5e32011-10-19 15:30:16 -04003464 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303465 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3466 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003467 *
3468 * After this point, every page in the list might have PG_fscache set,
3469 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303470 */
3471 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3472 &num_pages);
3473 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003474 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303475
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003476 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3477 pid = open_file->pid;
3478 else
3479 pid = current->tgid;
3480
Jeff Layton690c5e32011-10-19 15:30:16 -04003481 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003482 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483
Joe Perchesf96637b2013-05-04 22:12:25 -05003484 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3485 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003486
3487 /*
3488 * Start with the page at end of list and move it to private
3489 * list. Do the same with any following pages until we hit
3490 * the rsize limit, hit an index discontinuity, or run out of
3491 * pages. Issue the async read and then start the loop again
3492 * until the list is empty.
3493 *
3494 * Note that list order is important. The page_list is in
3495 * the order of declining indexes. When we put the pages in
3496 * the rdata->pages, then we want them in increasing order.
3497 */
3498 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003499 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003500 loff_t offset;
3501 struct page *page, *tpage;
3502 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003503 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003505 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3506 &rsize, &credits);
3507 if (rc)
3508 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509
Jeff Layton690c5e32011-10-19 15:30:16 -04003510 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003511 * Give up immediately if rsize is too small to read an entire
3512 * page. The VFS will fall back to readpage. We should never
3513 * reach this point however since we set ra_pages to 0 when the
3514 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003515 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003516 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003517 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003518 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003521 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3522 &nr_pages, &offset, &bytes);
3523 if (rc) {
3524 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003526 }
3527
Jeff Layton0471ca32012-05-16 07:13:16 -04003528 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003529 if (!rdata) {
3530 /* best to give up if we're out of mem */
3531 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3532 list_del(&page->lru);
3533 lru_cache_add_file(page);
3534 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003535 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003536 }
3537 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003538 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003539 break;
3540 }
3541
Jeff Layton6993f742012-05-16 07:13:17 -04003542 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003543 rdata->mapping = mapping;
3544 rdata->offset = offset;
3545 rdata->bytes = bytes;
3546 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003547 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003548 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003549 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003550
3551 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3552 list_del(&page->lru);
3553 rdata->pages[rdata->nr_pages++] = page;
3554 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003555
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003556 if (!rdata->cfile->invalidHandle ||
3557 !cifs_reopen_file(rdata->cfile, true))
3558 rc = server->ops->async_readv(rdata);
3559 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003560 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003561 for (i = 0; i < rdata->nr_pages; i++) {
3562 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003563 lru_cache_add_file(page);
3564 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003565 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003567 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003568 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 break;
3570 }
Jeff Layton6993f742012-05-16 07:13:17 -04003571
3572 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 }
3574
David Howells54afa992013-09-04 17:10:39 +00003575 /* Any pages that have been shown to fscache but didn't get added to
3576 * the pagecache must be uncached before they get returned to the
3577 * allocator.
3578 */
3579 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 return rc;
3581}
3582
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003583/*
3584 * cifs_readpage_worker must be called with the page pinned
3585 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003586static int cifs_readpage_worker(struct file *file, struct page *page,
3587 loff_t *poffset)
3588{
3589 char *read_data;
3590 int rc;
3591
Suresh Jayaraman56698232010-07-05 18:13:25 +05303592 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003593 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303594 if (rc == 0)
3595 goto read_complete;
3596
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 read_data = kmap(page);
3598 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003599
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003600 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003601
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 if (rc < 0)
3603 goto io_error;
3604 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003605 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003606
Al Viro496ad9a2013-01-23 17:07:38 -05003607 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003608 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003609
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003610 if (PAGE_SIZE > rc)
3611 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612
3613 flush_dcache_page(page);
3614 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303615
3616 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003617 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303618
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003620
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003622 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003623 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303624
3625read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 return rc;
3627}
3628
3629static int cifs_readpage(struct file *file, struct page *page)
3630{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003631 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003633 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003635 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636
3637 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303638 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003639 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303640 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 }
3642
Joe Perchesf96637b2013-05-04 22:12:25 -05003643 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003644 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645
3646 rc = cifs_readpage_worker(file, page, &offset);
3647
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003648 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003649 return rc;
3650}
3651
Steve Frencha403a0a2007-07-26 15:54:16 +00003652static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3653{
3654 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003655 struct cifs_tcon *tcon =
3656 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003657
Steve French3afca262016-09-22 18:58:16 -05003658 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003659 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003660 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003661 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003662 return 1;
3663 }
3664 }
Steve French3afca262016-09-22 18:58:16 -05003665 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003666 return 0;
3667}
3668
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669/* We do not want to update the file size from server for inodes
3670 open for write - to avoid races with writepage extending
3671 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003672 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 but this is tricky to do without racing with writebehind
3674 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003675bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003676{
Steve Frencha403a0a2007-07-26 15:54:16 +00003677 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003678 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003679
Steve Frencha403a0a2007-07-26 15:54:16 +00003680 if (is_inode_writable(cifsInode)) {
3681 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003682 struct cifs_sb_info *cifs_sb;
3683
Steve Frenchc32a0b62006-01-12 14:41:28 -08003684 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003685 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003686 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003687 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003688 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003689 }
3690
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003691 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003692 return true;
Steve French7ba52632007-02-08 18:14:13 +00003693
Steve French4b18f2a2008-04-29 00:06:05 +00003694 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003695 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003696 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697}
3698
Nick Piggind9414772008-09-24 11:32:59 -04003699static int cifs_write_begin(struct file *file, struct address_space *mapping,
3700 loff_t pos, unsigned len, unsigned flags,
3701 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003703 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003704 pgoff_t index = pos >> PAGE_SHIFT;
3705 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003706 loff_t page_start = pos & PAGE_MASK;
3707 loff_t i_size;
3708 struct page *page;
3709 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710
Joe Perchesf96637b2013-05-04 22:12:25 -05003711 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003712
Sachin Prabhu466bd312013-09-13 14:11:57 +01003713start:
Nick Piggin54566b22009-01-04 12:00:53 -08003714 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003715 if (!page) {
3716 rc = -ENOMEM;
3717 goto out;
3718 }
Nick Piggind9414772008-09-24 11:32:59 -04003719
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003720 if (PageUptodate(page))
3721 goto out;
Steve French8a236262007-03-06 00:31:00 +00003722
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003723 /*
3724 * If we write a full page it will be up to date, no need to read from
3725 * the server. If the write is short, we'll end up doing a sync write
3726 * instead.
3727 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003728 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003729 goto out;
3730
3731 /*
3732 * optimize away the read when we have an oplock, and we're not
3733 * expecting to use any of the data we'd be reading in. That
3734 * is, when the page lies beyond the EOF, or straddles the EOF
3735 * and the write will cover all of the existing data.
3736 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003737 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003738 i_size = i_size_read(mapping->host);
3739 if (page_start >= i_size ||
3740 (offset == 0 && (pos + len) >= i_size)) {
3741 zero_user_segments(page, 0, offset,
3742 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003743 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003744 /*
3745 * PageChecked means that the parts of the page
3746 * to which we're not writing are considered up
3747 * to date. Once the data is copied to the
3748 * page, it can be set uptodate.
3749 */
3750 SetPageChecked(page);
3751 goto out;
3752 }
3753 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003754
Sachin Prabhu466bd312013-09-13 14:11:57 +01003755 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003756 /*
3757 * might as well read a page, it is fast enough. If we get
3758 * an error, we don't need to return it. cifs_write_end will
3759 * do a sync write instead since PG_uptodate isn't set.
3760 */
3761 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003762 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003763 oncethru = 1;
3764 goto start;
Steve French8a236262007-03-06 00:31:00 +00003765 } else {
3766 /* we could try using another file handle if there is one -
3767 but how would we lock it to prevent close of that handle
3768 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003769 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003770 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003771out:
3772 *pagep = page;
3773 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774}
3775
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303776static int cifs_release_page(struct page *page, gfp_t gfp)
3777{
3778 if (PagePrivate(page))
3779 return 0;
3780
3781 return cifs_fscache_release_page(page, gfp);
3782}
3783
Lukas Czernerd47992f2013-05-21 23:17:23 -04003784static void cifs_invalidate_page(struct page *page, unsigned int offset,
3785 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303786{
3787 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3788
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003789 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303790 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3791}
3792
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003793static int cifs_launder_page(struct page *page)
3794{
3795 int rc = 0;
3796 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003797 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003798 struct writeback_control wbc = {
3799 .sync_mode = WB_SYNC_ALL,
3800 .nr_to_write = 0,
3801 .range_start = range_start,
3802 .range_end = range_end,
3803 };
3804
Joe Perchesf96637b2013-05-04 22:12:25 -05003805 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003806
3807 if (clear_page_dirty_for_io(page))
3808 rc = cifs_writepage_locked(page, &wbc);
3809
3810 cifs_fscache_invalidate_page(page, page->mapping->host);
3811 return rc;
3812}
3813
Tejun Heo9b646972010-07-20 22:09:02 +02003814void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003815{
3816 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3817 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003818 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003819 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003820 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003821 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003822 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003823
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003824 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003825 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003826
3827 server->ops->downgrade_oplock(server, cinode,
3828 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3829
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003830 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003831 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003832 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3833 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003834 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003835 }
3836
Jeff Layton3bc303c2009-09-21 06:47:50 -04003837 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003838 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003839 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003840 else
Al Viro8737c932009-12-24 06:47:55 -05003841 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003842 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003843 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003844 rc = filemap_fdatawait(inode->i_mapping);
3845 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003846 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003847 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003848 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003849 }
3850
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003851 rc = cifs_push_locks(cfile);
3852 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003853 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003854
Jeff Layton3bc303c2009-09-21 06:47:50 -04003855 /*
3856 * releasing stale oplock after recent reconnect of smb session using
3857 * a now incorrect file handle is not a data integrity issue but do
3858 * not bother sending an oplock release if session to server still is
3859 * disconnected since oplock already released by the server
3860 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003861 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003862 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3863 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003864 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003865 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003866 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003867}
3868
Steve Frenchdca69282013-11-11 16:42:37 -06003869/*
3870 * The presence of cifs_direct_io() in the address space ops vector
3871 * allowes open() O_DIRECT flags which would have failed otherwise.
3872 *
3873 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3874 * so this method should never be called.
3875 *
3876 * Direct IO is not yet supported in the cached mode.
3877 */
3878static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003879cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003880{
3881 /*
3882 * FIXME
3883 * Eventually need to support direct IO for non forcedirectio mounts
3884 */
3885 return -EINVAL;
3886}
3887
3888
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003889const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 .readpage = cifs_readpage,
3891 .readpages = cifs_readpages,
3892 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003893 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003894 .write_begin = cifs_write_begin,
3895 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303897 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003898 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303899 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003900 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003902
3903/*
3904 * cifs_readpages requires the server to support a buffer large enough to
3905 * contain the header plus one complete page of data. Otherwise, we need
3906 * to leave cifs_readpages out of the address space operations.
3907 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003908const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003909 .readpage = cifs_readpage,
3910 .writepage = cifs_writepage,
3911 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003912 .write_begin = cifs_write_begin,
3913 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003914 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303915 .releasepage = cifs_release_page,
3916 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003917 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003918};