blob: 7f5f6176c6f15caff307e078320122141c119ab9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
Nakajima Akirabc8ebdc42015-02-13 15:35:58 +0900143 cifs_remap(cifs_sb));
Jeff Layton608712f2010-10-15 15:33:56 -0400144 cifs_put_tlink(tlink);
145
146 if (rc)
147 goto posix_open_ret;
148
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
151
152 if (!pinode)
153 goto posix_open_ret; /* caller does not need info */
154
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
156
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
161 if (!*pinode) {
162 rc = -ENOMEM;
163 goto posix_open_ret;
164 }
165 } else {
166 cifs_fattr_to_inode(*pinode, &fattr);
167 }
168
169posix_open_ret:
170 kfree(presp_data);
171 return rc;
172}
173
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174static int
175cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
177 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300178{
179 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700180 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300181 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500182 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300183 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700184 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400185 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400227 oparms.tcon = tcon;
228 oparms.cifs_sb = cifs_sb;
229 oparms.desired_access = desired_access;
230 oparms.create_options = create_options;
231 oparms.disposition = disposition;
232 oparms.path = full_path;
233 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400234 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400235
236 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300237
238 if (rc)
239 goto out;
240
241 if (tcon->unix_ext)
242 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
243 xid);
244 else
245 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Steve French42eacf92014-02-10 14:08:16 -0600246 xid, fid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300247
248out:
249 kfree(buf);
250 return rc;
251}
252
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400253static bool
254cifs_has_mand_locks(struct cifsInodeInfo *cinode)
255{
256 struct cifs_fid_locks *cur;
257 bool has_locks = false;
258
259 down_read(&cinode->lock_sem);
260 list_for_each_entry(cur, &cinode->llist, llist) {
261 if (!list_empty(&cur->locks)) {
262 has_locks = true;
263 break;
264 }
265 }
266 up_read(&cinode->lock_sem);
267 return has_locks;
268}
269
Jeff Layton15ecb432010-10-15 15:34:02 -0400270struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700271cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400272 struct tcon_link *tlink, __u32 oplock)
273{
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500274 struct dentry *dentry = file_dentry(file);
David Howells2b0143b2015-03-17 22:25:59 +0000275 struct inode *inode = d_inode(dentry);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700276 struct cifsInodeInfo *cinode = CIFS_I(inode);
277 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700278 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700279 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400280 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400281
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700282 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
283 if (cfile == NULL)
284 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400285
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700286 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
287 if (!fdlocks) {
288 kfree(cfile);
289 return NULL;
290 }
291
292 INIT_LIST_HEAD(&fdlocks->locks);
293 fdlocks->cfile = cfile;
294 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700295 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700296 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700297 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700298
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700299 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700300 cfile->pid = current->tgid;
301 cfile->uid = current_fsuid();
302 cfile->dentry = dget(dentry);
303 cfile->f_flags = file->f_flags;
304 cfile->invalidHandle = false;
305 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700306 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700307 mutex_init(&cfile->fh_mutex);
Steve French3afca262016-09-22 18:58:16 -0500308 spin_lock_init(&cfile->file_info_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400309
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100310 cifs_sb_active(inode->i_sb);
311
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400312 /*
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
315 */
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400316 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500317 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400318 oplock = 0;
319 }
320
Steve French3afca262016-09-22 18:58:16 -0500321 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400322 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700323 oplock = fid->pending_open->oplock;
324 list_del(&fid->pending_open->olist);
325
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400326 fid->purge_cache = false;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400327 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700328
329 list_add(&cfile->tlist, &tcon->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500330
Jeff Layton15ecb432010-10-15 15:34:02 -0400331 /* if readable file instance put first in list*/
332 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700333 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400334 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700335 list_add_tail(&cfile->flist, &cinode->openFileList);
Steve French3afca262016-09-22 18:58:16 -0500336 spin_unlock(&tcon->open_file_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400337
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400338 if (fid->purge_cache)
Jeff Layton4f73c7d2014-04-30 09:31:47 -0400339 cifs_zap_mapping(inode);
Pavel Shilovsky42873b02013-09-05 21:30:16 +0400340
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700341 file->private_data = cfile;
342 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400343}
344
Jeff Layton764a1b12012-07-25 14:59:54 -0400345struct cifsFileInfo *
346cifsFileInfo_get(struct cifsFileInfo *cifs_file)
347{
Steve French3afca262016-09-22 18:58:16 -0500348 spin_lock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400349 cifsFileInfo_get_locked(cifs_file);
Steve French3afca262016-09-22 18:58:16 -0500350 spin_unlock(&cifs_file->file_info_lock);
Jeff Layton764a1b12012-07-25 14:59:54 -0400351 return cifs_file;
352}
353
Steve Frenchcdff08e2010-10-21 22:46:14 +0000354/*
355 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400356 * the filehandle out on the server. Must be called without holding
Steve French3afca262016-09-22 18:58:16 -0500357 * tcon->open_file_lock and cifs_file->file_info_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000358 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400359void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
360{
David Howells2b0143b2015-03-17 22:25:59 +0000361 struct inode *inode = d_inode(cifs_file->dentry);
Steve French96daf2b2011-05-27 04:34:02 +0000362 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700363 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300364 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100365 struct super_block *sb = inode->i_sb;
366 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000367 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700368 struct cifs_fid fid;
369 struct cifs_pending_open open;
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000370 bool oplock_break_cancelled;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000371
Steve French3afca262016-09-22 18:58:16 -0500372 spin_lock(&tcon->open_file_lock);
373
374 spin_lock(&cifs_file->file_info_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400375 if (--cifs_file->count > 0) {
Steve French3afca262016-09-22 18:58:16 -0500376 spin_unlock(&cifs_file->file_info_lock);
377 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000378 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400379 }
Steve French3afca262016-09-22 18:58:16 -0500380 spin_unlock(&cifs_file->file_info_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000381
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700382 if (server->ops->get_lease_key)
383 server->ops->get_lease_key(inode, &fid);
384
385 /* store open in pending opens to make sure we don't miss lease break */
386 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
387
Steve Frenchcdff08e2010-10-21 22:46:14 +0000388 /* remove it from the lists */
389 list_del(&cifs_file->flist);
390 list_del(&cifs_file->tlist);
391
392 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500393 cifs_dbg(FYI, "closing last open instance for inode %p\n",
David Howells2b0143b2015-03-17 22:25:59 +0000394 d_inode(cifs_file->dentry));
Pavel Shilovsky25364132012-09-18 16:20:27 -0700395 /*
396 * In strict cache mode we need invalidate mapping on the last
397 * close because it may cause a error when we open this file
398 * again and get at least level II oplock.
399 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300400 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
Jeff Laytonaff8d5c2014-04-30 09:31:45 -0400401 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300402 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000403 }
Steve French3afca262016-09-22 18:58:16 -0500404
405 spin_unlock(&tcon->open_file_lock);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000406
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000407 oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
Jeff Laytonad635942011-07-26 12:20:17 -0400408
Steve Frenchcdff08e2010-10-21 22:46:14 +0000409 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700410 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400411 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700412
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400413 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700414 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400415 server->ops->close(xid, tcon, &cifs_file->fid);
416 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417 }
418
Sachin Prabhuca7df8e2015-01-15 12:22:04 +0000419 if (oplock_break_cancelled)
420 cifs_done_oplock_break(cifsi);
421
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700422 cifs_del_pending_open(&open);
423
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700424 /*
425 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000426 * is closed anyway.
427 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700428 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700429 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000430 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400431 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000432 kfree(li);
433 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700434 list_del(&cifs_file->llist->llist);
435 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700436 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000437
438 cifs_put_tlink(cifs_file->tlink);
439 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100440 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000441 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400442}
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446{
447 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400448 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400449 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700451 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000452 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400453 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700454 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300456 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700457 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700458 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400460 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400463 tlink = cifs_sb_tlink(cifs_sb);
464 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400465 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400466 return PTR_ERR(tlink);
467 }
468 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700469 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Goldwyn Rodrigues1f1735c2016-04-18 06:41:52 -0500471 full_path = build_path_from_dentry(file_dentry(file));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530473 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400474 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 }
476
Joe Perchesf96637b2013-05-04 22:12:25 -0500477 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000478 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000479
Namjae Jeon787aded2014-08-22 14:22:51 +0900480 if (file->f_flags & O_DIRECT &&
481 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
482 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
483 file->f_op = &cifs_file_direct_nobrl_ops;
484 else
485 file->f_op = &cifs_file_direct_ops;
486 }
487
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700488 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000489 oplock = REQ_OPLOCK;
490 else
491 oplock = 0;
492
Steve French64cc2c62009-03-04 19:54:08 +0000493 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400494 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
495 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000496 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400497 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000498 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700499 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000500 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500501 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000503 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
504 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500505 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
506 tcon->ses->serverName,
507 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000508 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000509 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
510 (rc != -EOPNOTSUPP)) /* path not found or net err */
511 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700512 /*
513 * Else fallthrough to retry open the old way on network i/o
514 * or DFS errors.
515 */
Steve French276a74a2009-03-03 18:00:34 +0000516 }
517
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700518 if (server->ops->get_lease_key)
519 server->ops->get_lease_key(inode, &fid);
520
521 cifs_add_pending_open(&fid, tlink, &open);
522
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300523 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700524 if (server->ops->get_lease_key)
525 server->ops->get_lease_key(inode, &fid);
526
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300527 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700528 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700529 if (rc) {
530 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300531 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700532 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300533 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400534
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700535 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
536 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700537 if (server->ops->close)
538 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700539 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 rc = -ENOMEM;
541 goto out;
542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530544 cifs_fscache_set_inode_cookie(inode, file);
545
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300546 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700547 /*
548 * Time to set mode which we can not set earlier due to
549 * problems creating new read-only files.
550 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300551 struct cifs_unix_set_info_args args = {
552 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800553 .uid = INVALID_UID, /* no change */
554 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300555 .ctime = NO_CHANGE_64,
556 .atime = NO_CHANGE_64,
557 .mtime = NO_CHANGE_64,
558 .device = 0,
559 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700560 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
561 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 }
563
564out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400566 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400567 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 return rc;
569}
570
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400571static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
572
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700573/*
574 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400575 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700576 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400577static int
578cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400580 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +0000581 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400582 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 int rc = 0;
584
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400585 down_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400586 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400587 /* can cache locks - no need to relock */
588 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400589 return rc;
590 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400592 if (cap_unix(tcon->ses) &&
593 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
594 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
595 rc = cifs_push_posix_locks(cfile);
596 else
597 rc = tcon->ses->server->ops->push_mand_locks(cfile);
598
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400599 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 return rc;
601}
602
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700603static int
604cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
606 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400607 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400608 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000610 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 struct TCP_Server_Info *server;
612 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000613 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700615 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500617 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400618 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400620 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700621 mutex_lock(&cfile->fh_mutex);
622 if (!cfile->invalidHandle) {
623 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530624 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400625 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530626 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 }
628
David Howells2b0143b2015-03-17 22:25:59 +0000629 inode = d_inode(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700631 tcon = tlink_tcon(cfile->tlink);
632 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000633
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700634 /*
635 * Can not grab rename sem here because various ops, including those
636 * that already have the rename sem can end up causing writepage to get
637 * called and if the server was down that means we end up here, and we
638 * can never tell if the caller already has the rename_sem.
639 */
640 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000642 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400644 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000645 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 }
647
Joe Perchesf96637b2013-05-04 22:12:25 -0500648 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
649 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300651 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 oplock = REQ_OPLOCK;
653 else
Steve French4b18f2a2008-04-29 00:06:05 +0000654 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400656 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000657 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400658 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400659 /*
660 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
661 * original open. Must mask them off for a reopen.
662 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700663 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400664 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400665
Jeff Layton2422f672010-06-16 13:40:16 -0400666 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700667 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400668 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000669 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500670 cifs_dbg(FYI, "posix reopen succeeded\n");
Andi Shytife090e42013-07-29 20:04:35 +0200671 oparms.reconnect = true;
Steve French7fc8f4e2009-02-23 20:43:11 +0000672 goto reopen_success;
673 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700674 /*
675 * fallthrough to retry open the old way on errors, especially
676 * in the reconnect path it is important to retry hard
677 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000678 }
679
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700680 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000681
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500682 if (backup_cred(cifs_sb))
683 create_options |= CREATE_OPEN_BACKUP_INTENT;
684
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700685 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400686 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700687
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400688 oparms.tcon = tcon;
689 oparms.cifs_sb = cifs_sb;
690 oparms.desired_access = desired_access;
691 oparms.create_options = create_options;
692 oparms.disposition = disposition;
693 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400694 oparms.fid = &cfile->fid;
695 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400696
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700697 /*
698 * Can not refresh inode by passing in file_info buf to be returned by
Pavel Shilovskyd81b8a42014-01-16 15:53:36 +0400699 * ops->open and then calling get_inode_info with returned buf since
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700700 * file might have write behind data that needs to be flushed and server
701 * version of file size can be stale. If we knew for sure that inode was
702 * not dirty locally we could do this.
703 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400704 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400705 if (rc == -ENOENT && oparms.reconnect == false) {
706 /* durable handle timeout is expired - open the file again */
707 rc = server->ops->open(xid, &oparms, &oplock, NULL);
708 /* indicate that we need to relock the file */
709 oparms.reconnect = true;
710 }
711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700713 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500714 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
715 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400716 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 }
Jeff Layton15886172010-10-15 15:33:59 -0400718
719reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700720 cfile->invalidHandle = false;
721 mutex_unlock(&cfile->fh_mutex);
722 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400723
724 if (can_flush) {
725 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400726 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400727
Jeff Layton15886172010-10-15 15:33:59 -0400728 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700729 rc = cifs_get_inode_info_unix(&inode, full_path,
730 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400731 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700732 rc = cifs_get_inode_info(&inode, full_path, NULL,
733 inode->i_sb, xid, NULL);
734 }
735 /*
736 * Else we are writing out data to server already and could deadlock if
737 * we tried to flush data, and since we do not know if we have data that
738 * would invalidate the current end of file on the server we can not go
739 * to the server to get the new inode info.
740 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300741
Pavel Shilovskyde740252016-10-11 15:34:07 -0700742 /*
743 * If the server returned a read oplock and we have mandatory brlocks,
744 * set oplock level to None.
745 */
746 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
747 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
748 oplock = 0;
749 }
750
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400751 server->ops->set_fid(cfile, &cfile->fid, oplock);
752 if (oparms.reconnect)
753 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400754
755reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400757 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 return rc;
759}
760
761int cifs_close(struct inode *inode, struct file *file)
762{
Jeff Layton77970692011-04-05 16:23:47 -0700763 if (file->private_data != NULL) {
764 cifsFileInfo_put(file->private_data);
765 file->private_data = NULL;
766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Steve Frenchcdff08e2010-10-21 22:46:14 +0000768 /* return code from the ->release op is always ignored */
769 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
771
Steve French52ace1e2016-09-22 19:23:56 -0500772void
773cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
774{
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700775 struct cifsFileInfo *open_file;
Steve French52ace1e2016-09-22 19:23:56 -0500776 struct list_head *tmp;
777 struct list_head *tmp1;
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700778 struct list_head tmp_list;
779
780 cifs_dbg(FYI, "Reopen persistent handles");
781 INIT_LIST_HEAD(&tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500782
783 /* list all files open on tree connection, reopen resilient handles */
784 spin_lock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700785 list_for_each(tmp, &tcon->openFileList) {
Steve French52ace1e2016-09-22 19:23:56 -0500786 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700787 if (!open_file->invalidHandle)
788 continue;
789 cifsFileInfo_get(open_file);
790 list_add_tail(&open_file->rlist, &tmp_list);
Steve French52ace1e2016-09-22 19:23:56 -0500791 }
792 spin_unlock(&tcon->open_file_lock);
Pavel Shilovskyf2cca6a2016-10-07 17:26:36 -0700793
794 list_for_each_safe(tmp, tmp1, &tmp_list) {
795 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
796 cifs_reopen_file(open_file, false /* do not flush */);
797 list_del_init(&open_file->rlist);
798 cifsFileInfo_put(open_file);
799 }
Steve French52ace1e2016-09-22 19:23:56 -0500800}
801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802int cifs_closedir(struct inode *inode, struct file *file)
803{
804 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400805 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700806 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700807 struct cifs_tcon *tcon;
808 struct TCP_Server_Info *server;
809 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
Joe Perchesf96637b2013-05-04 22:12:25 -0500811 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700813 if (cfile == NULL)
814 return rc;
815
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400816 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700817 tcon = tlink_tcon(cfile->tlink);
818 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
Joe Perchesf96637b2013-05-04 22:12:25 -0500820 cifs_dbg(FYI, "Freeing private data in close dir\n");
Steve French3afca262016-09-22 18:58:16 -0500821 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky52755802014-08-18 20:49:57 +0400822 if (server->ops->dir_needs_close(cfile)) {
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700823 cfile->invalidHandle = true;
Steve French3afca262016-09-22 18:58:16 -0500824 spin_unlock(&cfile->file_info_lock);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700825 if (server->ops->close_dir)
826 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
827 else
828 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500829 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700830 /* not much we can do if it fails anyway, ignore rc */
831 rc = 0;
832 } else
Steve French3afca262016-09-22 18:58:16 -0500833 spin_unlock(&cfile->file_info_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700835 buf = cfile->srch_inf.ntwrk_buf_start;
836 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500837 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700838 cfile->srch_inf.ntwrk_buf_start = NULL;
839 if (cfile->srch_inf.smallBuf)
840 cifs_small_buf_release(buf);
841 else
842 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700844
845 cifs_put_tlink(cfile->tlink);
846 kfree(file->private_data);
847 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400849 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 return rc;
851}
852
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400853static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300854cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000855{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400856 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000857 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400858 if (!lock)
859 return lock;
860 lock->offset = offset;
861 lock->length = length;
862 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400863 lock->pid = current->tgid;
864 INIT_LIST_HEAD(&lock->blist);
865 init_waitqueue_head(&lock->block_q);
866 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400867}
868
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700869void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400870cifs_del_lock_waiters(struct cifsLockInfo *lock)
871{
872 struct cifsLockInfo *li, *tmp;
873 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
874 list_del_init(&li->blist);
875 wake_up(&li->block_q);
876 }
877}
878
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400879#define CIFS_LOCK_OP 0
880#define CIFS_READ_OP 1
881#define CIFS_WRITE_OP 2
882
883/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400884static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700885cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
886 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400887 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400888{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300889 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700890 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300891 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400892
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700893 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400894 if (offset + length <= li->offset ||
895 offset >= li->offset + li->length)
896 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400897 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
898 server->ops->compare_fids(cfile, cur_cfile)) {
899 /* shared lock prevents write op through the same fid */
900 if (!(li->type & server->vals->shared_lock_type) ||
901 rw_check != CIFS_WRITE_OP)
902 continue;
903 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700904 if ((type & server->vals->shared_lock_type) &&
905 ((server->ops->compare_fids(cfile, cur_cfile) &&
906 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700908 if (conf_lock)
909 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700910 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400911 }
912 return false;
913}
914
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700915bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300916cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700917 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400918 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400919{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300920 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700921 struct cifs_fid_locks *cur;
David Howells2b0143b2015-03-17 22:25:59 +0000922 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300923
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700924 list_for_each_entry(cur, &cinode->llist, llist) {
925 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700926 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300927 if (rc)
928 break;
929 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300930
931 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400932}
933
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300934/*
935 * Check if there is another lock that prevents us to set the lock (mandatory
936 * style). If such a lock exists, update the flock structure with its
937 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
938 * or leave it the same if we can't. Returns 0 if we don't need to request to
939 * the server or 1 otherwise.
940 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400941static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300942cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
943 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400944{
945 int rc = 0;
946 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000947 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300948 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400949 bool exist;
950
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700951 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400952
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300953 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400954 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400955 if (exist) {
956 flock->fl_start = conf_lock->offset;
957 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
958 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300959 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400960 flock->fl_type = F_RDLCK;
961 else
962 flock->fl_type = F_WRLCK;
963 } else if (!cinode->can_cache_brlcks)
964 rc = 1;
965 else
966 flock->fl_type = F_UNLCK;
967
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700968 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400969 return rc;
970}
971
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400972static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300973cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400974{
David Howells2b0143b2015-03-17 22:25:59 +0000975 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700976 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700977 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700978 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000979}
980
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300981/*
982 * Set the byte-range lock (mandatory style). Returns:
983 * 1) 0, if we set the lock and don't need to request to the server;
984 * 2) 1, if no locks prevent us but we need to request to the server;
985 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
986 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400987static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300988cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400989 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400990{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400991 struct cifsLockInfo *conf_lock;
David Howells2b0143b2015-03-17 22:25:59 +0000992 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400993 bool exist;
994 int rc = 0;
995
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400996try_again:
997 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700998 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400999
Pavel Shilovsky55157df2012-02-28 14:04:17 +03001000 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04001001 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001002 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001003 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001004 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001005 return rc;
1006 }
1007
1008 if (!exist)
1009 rc = 1;
1010 else if (!wait)
1011 rc = -EACCES;
1012 else {
1013 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001014 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001015 rc = wait_event_interruptible(lock->block_q,
1016 (lock->blist.prev == &lock->blist) &&
1017 (lock->blist.next == &lock->blist));
1018 if (!rc)
1019 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001020 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001021 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001022 }
1023
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001024 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001025 return rc;
1026}
1027
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001028/*
1029 * Check if there is another lock that prevents us to set the lock (posix
1030 * style). If such a lock exists, update the flock structure with its
1031 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1032 * or leave it the same if we can't. Returns 0 if we don't need to request to
1033 * the server or 1 otherwise.
1034 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001036cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1037{
1038 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -05001039 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001040 unsigned char saved_type = flock->fl_type;
1041
Pavel Shilovsky50792762011-10-29 17:17:57 +04001042 if ((flock->fl_flags & FL_POSIX) == 0)
1043 return 1;
1044
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001045 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001046 posix_test_lock(file, flock);
1047
1048 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1049 flock->fl_type = saved_type;
1050 rc = 1;
1051 }
1052
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001053 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001054 return rc;
1055}
1056
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +03001057/*
1058 * Set the byte-range lock (posix style). Returns:
1059 * 1) 0, if we set the lock and don't need to request to the server;
1060 * 2) 1, if we need to request to the server;
1061 * 3) <0, if the error occurs while setting the lock.
1062 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001063static int
1064cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1065{
Al Viro496ad9a2013-01-23 17:07:38 -05001066 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001067 int rc = 1;
1068
1069 if ((flock->fl_flags & FL_POSIX) == 0)
1070 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001071
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001072try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001073 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001074 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001075 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001076 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001077 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001078
1079 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001080 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001081 if (rc == FILE_LOCK_DEFERRED) {
1082 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1083 if (!rc)
1084 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001085 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001086 }
Steve French9ebb3892012-04-01 13:52:54 -05001087 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001088}
1089
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001090int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001091cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001092{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001093 unsigned int xid;
1094 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001095 struct cifsLockInfo *li, *tmp;
1096 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001097 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001098 LOCKING_ANDX_RANGE *buf, *cur;
1099 int types[] = {LOCKING_ANDX_LARGE_FILES,
1100 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1101 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001102
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001103 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001104 tcon = tlink_tcon(cfile->tlink);
1105
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001106 /*
1107 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1108 * and check it for zero before using.
1109 */
1110 max_buf = tcon->ses->server->maxBuf;
1111 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001112 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001113 return -EINVAL;
1114 }
1115
1116 max_num = (max_buf - sizeof(struct smb_hdr)) /
1117 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001118 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001119 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001120 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001121 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001122 }
1123
1124 for (i = 0; i < 2; i++) {
1125 cur = buf;
1126 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001127 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001128 if (li->type != types[i])
1129 continue;
1130 cur->Pid = cpu_to_le16(li->pid);
1131 cur->LengthLow = cpu_to_le32((u32)li->length);
1132 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1133 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1134 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1135 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001136 stored_rc = cifs_lockv(xid, tcon,
1137 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001138 (__u8)li->type, 0, num,
1139 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001140 if (stored_rc)
1141 rc = stored_rc;
1142 cur = buf;
1143 num = 0;
1144 } else
1145 cur++;
1146 }
1147
1148 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001149 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001150 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001151 if (stored_rc)
1152 rc = stored_rc;
1153 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001154 }
1155
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001156 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001157 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001158 return rc;
1159}
1160
Jeff Layton3d224622016-05-24 06:27:44 -04001161static __u32
1162hash_lockowner(fl_owner_t owner)
1163{
1164 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1165}
1166
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001167struct lock_to_push {
1168 struct list_head llist;
1169 __u64 offset;
1170 __u64 length;
1171 __u32 pid;
1172 __u16 netfid;
1173 __u8 type;
1174};
1175
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001176static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001177cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001178{
David Howells2b0143b2015-03-17 22:25:59 +00001179 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001180 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001181 struct file_lock *flock;
1182 struct file_lock_context *flctx = inode->i_flctx;
Jeff Laytone084c1b2015-02-16 14:32:03 -05001183 unsigned int count = 0, i;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001184 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001185 struct list_head locks_to_send, *el;
1186 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001187 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001188
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001189 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001190
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001191 if (!flctx)
1192 goto out;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001193
Jeff Laytone084c1b2015-02-16 14:32:03 -05001194 spin_lock(&flctx->flc_lock);
1195 list_for_each(el, &flctx->flc_posix) {
1196 count++;
1197 }
1198 spin_unlock(&flctx->flc_lock);
1199
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001200 INIT_LIST_HEAD(&locks_to_send);
1201
1202 /*
Jeff Laytone084c1b2015-02-16 14:32:03 -05001203 * Allocating count locks is enough because no FL_POSIX locks can be
1204 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001205 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001206 */
Jeff Laytone084c1b2015-02-16 14:32:03 -05001207 for (i = 0; i < count; i++) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001208 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1209 if (!lck) {
1210 rc = -ENOMEM;
1211 goto err_out;
1212 }
1213 list_add_tail(&lck->llist, &locks_to_send);
1214 }
1215
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001216 el = locks_to_send.next;
Jeff Layton6109c852015-01-16 15:05:57 -05001217 spin_lock(&flctx->flc_lock);
Jeff Laytonbd61e0a2015-01-16 15:05:55 -05001218 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
Pavel Shilovskyce858522012-03-17 09:46:55 +03001219 if (el == &locks_to_send) {
1220 /*
1221 * The list ended. We don't have enough allocated
1222 * structures - something is really wrong.
1223 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001224 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001225 break;
1226 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001227 length = 1 + flock->fl_end - flock->fl_start;
1228 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1229 type = CIFS_RDLCK;
1230 else
1231 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001232 lck = list_entry(el, struct lock_to_push, llist);
Jeff Layton3d224622016-05-24 06:27:44 -04001233 lck->pid = hash_lockowner(flock->fl_owner);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001234 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001235 lck->length = length;
1236 lck->type = type;
1237 lck->offset = flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001238 }
Jeff Layton6109c852015-01-16 15:05:57 -05001239 spin_unlock(&flctx->flc_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001240
1241 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001242 int stored_rc;
1243
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001244 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001245 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001246 lck->type, 0);
1247 if (stored_rc)
1248 rc = stored_rc;
1249 list_del(&lck->llist);
1250 kfree(lck);
1251 }
1252
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001253out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001254 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001255 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001256err_out:
1257 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1258 list_del(&lck->llist);
1259 kfree(lck);
1260 }
1261 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001262}
1263
1264static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001265cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001266{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001267 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
David Howells2b0143b2015-03-17 22:25:59 +00001268 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001269 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001270 int rc = 0;
1271
1272 /* we are going to update can_cache_brlcks here - need a write access */
1273 down_write(&cinode->lock_sem);
1274 if (!cinode->can_cache_brlcks) {
1275 up_write(&cinode->lock_sem);
1276 return rc;
1277 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001278
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001279 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001280 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1281 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001282 rc = cifs_push_posix_locks(cfile);
1283 else
1284 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001285
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001286 cinode->can_cache_brlcks = false;
1287 up_write(&cinode->lock_sem);
1288 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001289}
1290
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001291static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001292cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001293 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001295 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001296 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001297 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001298 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001299 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001300 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001301 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001303 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001304 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001305 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001306 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001307 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001308 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1309 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001310 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001312 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001313 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001314 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001315 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001316 *lock = 1;
1317 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001318 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001319 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001320 *unlock = 1;
1321 /* Check if unlock includes more than one lock range */
1322 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001323 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001324 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001325 *lock = 1;
1326 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001327 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001328 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001329 *lock = 1;
1330 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001331 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001332 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001335 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001336}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001338static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001339cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001340 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001341{
1342 int rc = 0;
1343 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001344 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1345 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001346 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001347 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001349 if (posix_lck) {
1350 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001351
1352 rc = cifs_posix_lock_test(file, flock);
1353 if (!rc)
1354 return rc;
1355
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001356 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001357 posix_lock_type = CIFS_RDLCK;
1358 else
1359 posix_lock_type = CIFS_WRLCK;
Jeff Layton3d224622016-05-24 06:27:44 -04001360 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1361 hash_lockowner(flock->fl_owner),
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001362 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001363 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 return rc;
1365 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001366
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001367 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001368 if (!rc)
1369 return rc;
1370
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001371 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001372 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1373 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001374 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001375 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1376 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001377 flock->fl_type = F_UNLCK;
1378 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001379 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1380 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001381 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001382 }
1383
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001384 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001385 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001386 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387 }
1388
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001389 type &= ~server->vals->exclusive_lock_type;
1390
1391 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1392 type | server->vals->shared_lock_type,
1393 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001394 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001395 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1396 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001397 flock->fl_type = F_RDLCK;
1398 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001399 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1400 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001401 } else
1402 flock->fl_type = F_WRLCK;
1403
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001404 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001405}
1406
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001407void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001408cifs_move_llist(struct list_head *source, struct list_head *dest)
1409{
1410 struct list_head *li, *tmp;
1411 list_for_each_safe(li, tmp, source)
1412 list_move(li, dest);
1413}
1414
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001415void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001416cifs_free_llist(struct list_head *llist)
1417{
1418 struct cifsLockInfo *li, *tmp;
1419 list_for_each_entry_safe(li, tmp, llist, llist) {
1420 cifs_del_lock_waiters(li);
1421 list_del(&li->llist);
1422 kfree(li);
1423 }
1424}
1425
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001426int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001427cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1428 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001429{
1430 int rc = 0, stored_rc;
1431 int types[] = {LOCKING_ANDX_LARGE_FILES,
1432 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1433 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001434 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001435 LOCKING_ANDX_RANGE *buf, *cur;
1436 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
David Howells2b0143b2015-03-17 22:25:59 +00001437 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001438 struct cifsLockInfo *li, *tmp;
1439 __u64 length = 1 + flock->fl_end - flock->fl_start;
1440 struct list_head tmp_llist;
1441
1442 INIT_LIST_HEAD(&tmp_llist);
1443
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001444 /*
1445 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1446 * and check it for zero before using.
1447 */
1448 max_buf = tcon->ses->server->maxBuf;
1449 if (!max_buf)
1450 return -EINVAL;
1451
1452 max_num = (max_buf - sizeof(struct smb_hdr)) /
1453 sizeof(LOCKING_ANDX_RANGE);
Fabian Frederick4b99d392014-12-10 15:41:17 -08001454 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001455 if (!buf)
1456 return -ENOMEM;
1457
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001458 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001459 for (i = 0; i < 2; i++) {
1460 cur = buf;
1461 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001462 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001463 if (flock->fl_start > li->offset ||
1464 (flock->fl_start + length) <
1465 (li->offset + li->length))
1466 continue;
1467 if (current->tgid != li->pid)
1468 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001469 if (types[i] != li->type)
1470 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001471 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001472 /*
1473 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001474 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001475 */
1476 list_del(&li->llist);
1477 cifs_del_lock_waiters(li);
1478 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001479 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001480 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001481 cur->Pid = cpu_to_le16(li->pid);
1482 cur->LengthLow = cpu_to_le32((u32)li->length);
1483 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1484 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1485 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1486 /*
1487 * We need to save a lock here to let us add it again to
1488 * the file's list if the unlock range request fails on
1489 * the server.
1490 */
1491 list_move(&li->llist, &tmp_llist);
1492 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001493 stored_rc = cifs_lockv(xid, tcon,
1494 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001495 li->type, num, 0, buf);
1496 if (stored_rc) {
1497 /*
1498 * We failed on the unlock range
1499 * request - add all locks from the tmp
1500 * list to the head of the file's list.
1501 */
1502 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001503 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001504 rc = stored_rc;
1505 } else
1506 /*
1507 * The unlock range request succeed -
1508 * free the tmp list.
1509 */
1510 cifs_free_llist(&tmp_llist);
1511 cur = buf;
1512 num = 0;
1513 } else
1514 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001515 }
1516 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001517 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001518 types[i], num, 0, buf);
1519 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001520 cifs_move_llist(&tmp_llist,
1521 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001522 rc = stored_rc;
1523 } else
1524 cifs_free_llist(&tmp_llist);
1525 }
1526 }
1527
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001528 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001529 kfree(buf);
1530 return rc;
1531}
1532
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001533static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001534cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001535 bool wait_flag, bool posix_lck, int lock, int unlock,
1536 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001537{
1538 int rc = 0;
1539 __u64 length = 1 + flock->fl_end - flock->fl_start;
1540 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1541 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001542 struct TCP_Server_Info *server = tcon->ses->server;
David Howells2b0143b2015-03-17 22:25:59 +00001543 struct inode *inode = d_inode(cfile->dentry);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001544
1545 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001546 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001547
1548 rc = cifs_posix_lock_set(file, flock);
1549 if (!rc || rc < 0)
1550 return rc;
1551
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001552 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001553 posix_lock_type = CIFS_RDLCK;
1554 else
1555 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001556
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001557 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001558 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001559
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001560 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
Jeff Layton3d224622016-05-24 06:27:44 -04001561 hash_lockowner(flock->fl_owner),
1562 flock->fl_start, length,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001563 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001564 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001565 }
1566
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001567 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001568 struct cifsLockInfo *lock;
1569
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001570 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001571 if (!lock)
1572 return -ENOMEM;
1573
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001574 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001575 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001576 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001577 return rc;
1578 }
1579 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001580 goto out;
1581
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001582 /*
1583 * Windows 7 server can delay breaking lease from read to None
1584 * if we set a byte-range lock on a file - break it explicitly
1585 * before sending the lock to the server to be sure the next
1586 * read won't conflict with non-overlapted locks due to
1587 * pagereading.
1588 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001589 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1590 CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04001591 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001592 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1593 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04001594 CIFS_I(inode)->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001595 }
1596
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001597 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1598 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001599 if (rc) {
1600 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001601 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001602 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001603
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001604 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001605 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001606 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001607
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001608out:
Chengyu Song00b8c952015-03-24 20:18:49 -04001609 if (flock->fl_flags & FL_POSIX && !rc)
Benjamin Coddington4f656362015-10-22 13:38:14 -04001610 rc = locks_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001611 return rc;
1612}
1613
1614int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1615{
1616 int rc, xid;
1617 int lock = 0, unlock = 0;
1618 bool wait_flag = false;
1619 bool posix_lck = false;
1620 struct cifs_sb_info *cifs_sb;
1621 struct cifs_tcon *tcon;
1622 struct cifsInodeInfo *cinode;
1623 struct cifsFileInfo *cfile;
1624 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001625 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001626
1627 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001628 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001629
Joe Perchesf96637b2013-05-04 22:12:25 -05001630 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1631 cmd, flock->fl_flags, flock->fl_type,
1632 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001633
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001634 cfile = (struct cifsFileInfo *)file->private_data;
1635 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001636
1637 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1638 tcon->ses->server);
1639
Al Viro7119e222014-10-22 00:25:12 -04001640 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001641 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001642 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001643
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001644 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001645 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1646 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1647 posix_lck = true;
1648 /*
1649 * BB add code here to normalize offset and length to account for
1650 * negative length which we can not accept over the wire.
1651 */
1652 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001653 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001654 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001655 return rc;
1656 }
1657
1658 if (!lock && !unlock) {
1659 /*
1660 * if no lock or unlock then nothing to do since we do not
1661 * know what it is
1662 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001663 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001664 return -EOPNOTSUPP;
1665 }
1666
1667 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1668 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001669 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 return rc;
1671}
1672
Jeff Layton597b0272012-03-23 14:40:56 -04001673/*
1674 * update the file size (if needed) after a write. Should be called with
1675 * the inode->i_lock held
1676 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001677void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001678cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1679 unsigned int bytes_written)
1680{
1681 loff_t end_of_write = offset + bytes_written;
1682
1683 if (end_of_write > cifsi->server_eof)
1684 cifsi->server_eof = end_of_write;
1685}
1686
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001687static ssize_t
1688cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1689 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690{
1691 int rc = 0;
1692 unsigned int bytes_written = 0;
1693 unsigned int total_written;
1694 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001695 struct cifs_tcon *tcon;
1696 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001697 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001698 struct dentry *dentry = open_file->dentry;
David Howells2b0143b2015-03-17 22:25:59 +00001699 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001700 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Jeff Layton7da4b492010-10-15 15:34:00 -04001702 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Al Viro35c265e2014-08-19 20:25:34 -04001704 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1705 write_size, *offset, dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001707 tcon = tlink_tcon(open_file->tlink);
1708 server = tcon->ses->server;
1709
1710 if (!server->ops->sync_write)
1711 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001712
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001713 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 for (total_written = 0; write_size > total_written;
1716 total_written += bytes_written) {
1717 rc = -EAGAIN;
1718 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001719 struct kvec iov[2];
1720 unsigned int len;
1721
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 /* we could deadlock if we called
1724 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001725 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001727 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 if (rc != 0)
1729 break;
1730 }
Steve French3e844692005-10-03 13:37:24 -07001731
David Howells2b0143b2015-03-17 22:25:59 +00001732 len = min(server->ops->wp_retry_size(d_inode(dentry)),
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04001733 (unsigned int)write_size - total_written);
Jeff Laytonca83ce32011-04-12 09:13:44 -04001734 /* iov[0] is reserved for smb header */
1735 iov[1].iov_base = (char *)write_data + total_written;
1736 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001737 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001738 io_parms.tcon = tcon;
1739 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001740 io_parms.length = len;
Steve Frenchdb8b6312014-09-22 05:13:55 -05001741 rc = server->ops->sync_write(xid, &open_file->fid,
1742 &io_parms, &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 }
1744 if (rc || (bytes_written == 0)) {
1745 if (total_written)
1746 break;
1747 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001748 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 return rc;
1750 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001751 } else {
David Howells2b0143b2015-03-17 22:25:59 +00001752 spin_lock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001753 cifs_update_eof(cifsi, *offset, bytes_written);
David Howells2b0143b2015-03-17 22:25:59 +00001754 spin_unlock(&d_inode(dentry)->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001755 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001756 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 }
1758
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001759 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
Jeff Layton7da4b492010-10-15 15:34:00 -04001761 if (total_written > 0) {
David Howells2b0143b2015-03-17 22:25:59 +00001762 spin_lock(&d_inode(dentry)->i_lock);
1763 if (*offset > d_inode(dentry)->i_size)
1764 i_size_write(d_inode(dentry), *offset);
1765 spin_unlock(&d_inode(dentry)->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 }
David Howells2b0143b2015-03-17 22:25:59 +00001767 mark_inode_dirty_sync(d_inode(dentry));
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001768 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 return total_written;
1770}
1771
Jeff Layton6508d902010-09-29 19:51:11 -04001772struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1773 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001774{
1775 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001776 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001777 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Layton6508d902010-09-29 19:51:11 -04001778
1779 /* only filter by fsuid on multiuser mounts */
1780 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1781 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001782
Steve French3afca262016-09-22 18:58:16 -05001783 spin_lock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001784 /* we could simply get the first_list_entry since write-only entries
1785 are always at the end of the list but since the first entry might
1786 have a close pending, we go through the whole list */
1787 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001788 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001789 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001790 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001791 if (!open_file->invalidHandle) {
1792 /* found a good file */
1793 /* lock it so it will not be closed on us */
Steve French3afca262016-09-22 18:58:16 -05001794 cifsFileInfo_get(open_file);
1795 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001796 return open_file;
1797 } /* else might as well continue, and look for
1798 another, or simply have the caller reopen it
1799 again rather than trying to fix this handle */
1800 } else /* write only file */
1801 break; /* write only files are last so must be done */
1802 }
Steve French3afca262016-09-22 18:58:16 -05001803 spin_unlock(&tcon->open_file_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001804 return NULL;
1805}
Steve French630f3f0c2007-10-25 21:17:17 +00001806
Jeff Layton6508d902010-09-29 19:51:11 -04001807struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1808 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001809{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001810 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001811 struct cifs_sb_info *cifs_sb;
Steve French3afca262016-09-22 18:58:16 -05001812 struct cifs_tcon *tcon;
Jeff Layton2846d382008-09-22 21:33:33 -04001813 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001814 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001815 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001816
Steve French60808232006-04-22 15:53:05 +00001817 /* Having a null inode here (because mapping->host was set to zero by
1818 the VFS or MM) should not happen but we had reports of on oops (due to
1819 it being zero) during stress testcases so we need to check for it */
1820
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001821 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001822 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001823 dump_stack();
1824 return NULL;
1825 }
1826
Jeff Laytond3892292010-11-02 16:22:50 -04001827 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
Steve French3afca262016-09-22 18:58:16 -05001828 tcon = cifs_sb_master_tcon(cifs_sb);
Jeff Laytond3892292010-11-02 16:22:50 -04001829
Jeff Layton6508d902010-09-29 19:51:11 -04001830 /* only filter by fsuid on multiuser mounts */
1831 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1832 fsuid_only = false;
1833
Steve French3afca262016-09-22 18:58:16 -05001834 spin_lock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001835refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001836 if (refind > MAX_REOPEN_ATT) {
Steve French3afca262016-09-22 18:58:16 -05001837 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001838 return NULL;
1839 }
Steve French6148a742005-10-05 12:23:19 -07001840 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001841 if (!any_available && open_file->pid != current->tgid)
1842 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001843 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001844 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001845 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001846 if (!open_file->invalidHandle) {
1847 /* found a good writable file */
Steve French3afca262016-09-22 18:58:16 -05001848 cifsFileInfo_get(open_file);
1849 spin_unlock(&tcon->open_file_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001850 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001851 } else {
1852 if (!inv_file)
1853 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001854 }
Steve French6148a742005-10-05 12:23:19 -07001855 }
1856 }
Jeff Layton2846d382008-09-22 21:33:33 -04001857 /* couldn't find useable FH with same pid, try any available */
1858 if (!any_available) {
1859 any_available = true;
1860 goto refind_writable;
1861 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001862
1863 if (inv_file) {
1864 any_available = false;
Steve French3afca262016-09-22 18:58:16 -05001865 cifsFileInfo_get(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001866 }
1867
Steve French3afca262016-09-22 18:58:16 -05001868 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001869
1870 if (inv_file) {
1871 rc = cifs_reopen_file(inv_file, false);
1872 if (!rc)
1873 return inv_file;
1874 else {
Steve French3afca262016-09-22 18:58:16 -05001875 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001876 list_move_tail(&inv_file->flist,
1877 &cifs_inode->openFileList);
Steve French3afca262016-09-22 18:58:16 -05001878 spin_unlock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001879 cifsFileInfo_put(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001880 ++refind;
David Disseldorpe1e9bda2015-03-13 14:20:29 +01001881 inv_file = NULL;
Steve French3afca262016-09-22 18:58:16 -05001882 spin_lock(&tcon->open_file_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001883 goto refind_writable;
1884 }
1885 }
1886
Steve French6148a742005-10-05 12:23:19 -07001887 return NULL;
1888}
1889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1891{
1892 struct address_space *mapping = page->mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001893 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 char *write_data;
1895 int rc = -EFAULT;
1896 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001898 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
1900 if (!mapping || !mapping->host)
1901 return -EFAULT;
1902
1903 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
1905 offset += (loff_t)from;
1906 write_data = kmap(page);
1907 write_data += from;
1908
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001909 if ((to > PAGE_SIZE) || (from > to)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 kunmap(page);
1911 return -EIO;
1912 }
1913
1914 /* racing with truncate? */
1915 if (offset > mapping->host->i_size) {
1916 kunmap(page);
1917 return 0; /* don't care */
1918 }
1919
1920 /* check to make sure that we are not extending the file */
1921 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001922 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
Jeff Layton6508d902010-09-29 19:51:11 -04001924 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001925 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001926 bytes_written = cifs_write(open_file, open_file->pid,
1927 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001928 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 /* Does mm or vfs already set times? */
Deepa Dinamanic2050a42016-09-14 07:48:06 -07001930 inode->i_atime = inode->i_mtime = current_time(inode);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001931 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001932 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001933 else if (bytes_written < 0)
1934 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001935 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001936 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 rc = -EIO;
1938 }
1939
1940 kunmap(page);
1941 return rc;
1942}
1943
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001944static struct cifs_writedata *
1945wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
1946 pgoff_t end, pgoff_t *index,
1947 unsigned int *found_pages)
1948{
1949 unsigned int nr_pages;
1950 struct page **pages;
1951 struct cifs_writedata *wdata;
1952
1953 wdata = cifs_writedata_alloc((unsigned int)tofind,
1954 cifs_writev_complete);
1955 if (!wdata)
1956 return NULL;
1957
1958 /*
1959 * find_get_pages_tag seems to return a max of 256 on each
1960 * iteration, so we must call it several times in order to
1961 * fill the array or the wsize is effectively limited to
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +03001962 * 256 * PAGE_SIZE.
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04001963 */
1964 *found_pages = 0;
1965 pages = wdata->pages;
1966 do {
1967 nr_pages = find_get_pages_tag(mapping, index,
1968 PAGECACHE_TAG_DIRTY, tofind,
1969 pages);
1970 *found_pages += nr_pages;
1971 tofind -= nr_pages;
1972 pages += nr_pages;
1973 } while (nr_pages && tofind && *index <= end);
1974
1975 return wdata;
1976}
1977
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04001978static unsigned int
1979wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
1980 struct address_space *mapping,
1981 struct writeback_control *wbc,
1982 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
1983{
1984 unsigned int nr_pages = 0, i;
1985 struct page *page;
1986
1987 for (i = 0; i < found_pages; i++) {
1988 page = wdata->pages[i];
1989 /*
1990 * At this point we hold neither mapping->tree_lock nor
1991 * lock on the page itself: the page may be truncated or
1992 * invalidated (changing page->mapping to NULL), or even
1993 * swizzled back from swapper_space to tmpfs file
1994 * mapping
1995 */
1996
1997 if (nr_pages == 0)
1998 lock_page(page);
1999 else if (!trylock_page(page))
2000 break;
2001
2002 if (unlikely(page->mapping != mapping)) {
2003 unlock_page(page);
2004 break;
2005 }
2006
2007 if (!wbc->range_cyclic && page->index > end) {
2008 *done = true;
2009 unlock_page(page);
2010 break;
2011 }
2012
2013 if (*next && (page->index != *next)) {
2014 /* Not next consecutive page */
2015 unlock_page(page);
2016 break;
2017 }
2018
2019 if (wbc->sync_mode != WB_SYNC_NONE)
2020 wait_on_page_writeback(page);
2021
2022 if (PageWriteback(page) ||
2023 !clear_page_dirty_for_io(page)) {
2024 unlock_page(page);
2025 break;
2026 }
2027
2028 /*
2029 * This actually clears the dirty bit in the radix tree.
2030 * See cifs_writepage() for more commentary.
2031 */
2032 set_page_writeback(page);
2033 if (page_offset(page) >= i_size_read(mapping->host)) {
2034 *done = true;
2035 unlock_page(page);
2036 end_page_writeback(page);
2037 break;
2038 }
2039
2040 wdata->pages[i] = page;
2041 *next = page->index + 1;
2042 ++nr_pages;
2043 }
2044
2045 /* reset index to refind any pages skipped */
2046 if (nr_pages == 0)
2047 *index = wdata->pages[0]->index + 1;
2048
2049 /* put any pages we aren't going to use */
2050 for (i = nr_pages; i < found_pages; i++) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002051 put_page(wdata->pages[i]);
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002052 wdata->pages[i] = NULL;
2053 }
2054
2055 return nr_pages;
2056}
2057
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002058static int
2059wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2060 struct address_space *mapping, struct writeback_control *wbc)
2061{
2062 int rc = 0;
2063 struct TCP_Server_Info *server;
2064 unsigned int i;
2065
2066 wdata->sync_mode = wbc->sync_mode;
2067 wdata->nr_pages = nr_pages;
2068 wdata->offset = page_offset(wdata->pages[0]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002069 wdata->pagesz = PAGE_SIZE;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002070 wdata->tailsz = min(i_size_read(mapping->host) -
2071 page_offset(wdata->pages[nr_pages - 1]),
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002072 (loff_t)PAGE_SIZE);
2073 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002074
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002075 if (wdata->cfile != NULL)
2076 cifsFileInfo_put(wdata->cfile);
2077 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2078 if (!wdata->cfile) {
2079 cifs_dbg(VFS, "No writable handles for inode\n");
2080 rc = -EBADF;
2081 } else {
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002082 wdata->pid = wdata->cfile->pid;
2083 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2084 rc = server->ops->async_writev(wdata, cifs_writedata_release);
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002085 }
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002086
2087 for (i = 0; i < nr_pages; ++i)
2088 unlock_page(wdata->pages[i]);
2089
2090 return rc;
2091}
2092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07002094 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002096 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002097 struct TCP_Server_Info *server;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002098 bool done = false, scanned = false, range_whole = false;
2099 pgoff_t end, index;
2100 struct cifs_writedata *wdata;
Steve French37c0eb42005-10-05 14:50:29 -07002101 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00002102
Steve French37c0eb42005-10-05 14:50:29 -07002103 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002104 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07002105 * one page at a time via cifs_writepage
2106 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002107 if (cifs_sb->wsize < PAGE_SIZE)
Steve French37c0eb42005-10-05 14:50:29 -07002108 return generic_writepages(mapping, wbc);
2109
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002110 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07002111 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002112 end = -1;
2113 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002114 index = wbc->range_start >> PAGE_SHIFT;
2115 end = wbc->range_end >> PAGE_SHIFT;
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002116 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002117 range_whole = true;
2118 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002119 }
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002120 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
Steve French37c0eb42005-10-05 14:50:29 -07002121retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002122 while (!done && index <= end) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002123 unsigned int i, nr_pages, found_pages, wsize, credits;
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002124 pgoff_t next = 0, tofind, saved_index = index;
Steve French37c0eb42005-10-05 14:50:29 -07002125
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002126 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2127 &wsize, &credits);
2128 if (rc)
2129 break;
Steve French37c0eb42005-10-05 14:50:29 -07002130
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002131 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07002132
Pavel Shilovsky90ac1382014-06-19 16:11:00 +04002133 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2134 &found_pages);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002135 if (!wdata) {
2136 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002137 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002138 break;
2139 }
2140
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002141 if (found_pages == 0) {
2142 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002143 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002144 break;
2145 }
2146
Pavel Shilovsky7e48ff82014-06-19 15:01:03 +04002147 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2148 end, &index, &next, &done);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002149
2150 /* nothing to write? */
2151 if (nr_pages == 0) {
2152 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002153 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002154 continue;
2155 }
2156
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002157 wdata->credits = credits;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002158
Pavel Shilovsky619aa482014-06-19 15:28:37 +04002159 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
Jeff Layton941b8532011-01-11 07:24:01 -05002160
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002161 /* send failure -- clean up the mess */
2162 if (rc != 0) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002163 add_credits_and_wake_if(server, wdata->credits, 0);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002164 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002165 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002166 redirty_page_for_writepage(wbc,
2167 wdata->pages[i]);
2168 else
2169 SetPageError(wdata->pages[i]);
2170 end_page_writeback(wdata->pages[i]);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002171 put_page(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002172 }
Jeff Layton941b8532011-01-11 07:24:01 -05002173 if (rc != -EAGAIN)
2174 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002175 }
2176 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002177
Pavel Shilovsky66231a42014-06-19 16:15:16 +04002178 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2179 index = saved_index;
2180 continue;
2181 }
2182
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002183 wbc->nr_to_write -= nr_pages;
2184 if (wbc->nr_to_write <= 0)
2185 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002186
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002187 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002188 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002189
Steve French37c0eb42005-10-05 14:50:29 -07002190 if (!scanned && !done) {
2191 /*
2192 * We hit the last page and there is more work to be done: wrap
2193 * back to the start of the file
2194 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002195 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002196 index = 0;
2197 goto retry;
2198 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002199
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002200 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002201 mapping->writeback_index = index;
2202
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 return rc;
2204}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002206static int
2207cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002209 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002210 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002212 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213/* BB add check for wbc flags */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002214 get_page(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002215 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002216 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002217
2218 /*
2219 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2220 *
2221 * A writepage() implementation always needs to do either this,
2222 * or re-dirty the page with "redirty_page_for_writepage()" in
2223 * the case of a failure.
2224 *
2225 * Just unlocking the page will cause the radix tree tag-bits
2226 * to fail to update with the state of the page correctly.
2227 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002228 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002229retry_write:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002230 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002231 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2232 goto retry_write;
2233 else if (rc == -EAGAIN)
2234 redirty_page_for_writepage(wbc, page);
2235 else if (rc != 0)
2236 SetPageError(page);
2237 else
2238 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002239 end_page_writeback(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002240 put_page(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002241 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 return rc;
2243}
2244
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002245static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2246{
2247 int rc = cifs_writepage_locked(page, wbc);
2248 unlock_page(page);
2249 return rc;
2250}
2251
Nick Piggind9414772008-09-24 11:32:59 -04002252static int cifs_write_end(struct file *file, struct address_space *mapping,
2253 loff_t pos, unsigned len, unsigned copied,
2254 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255{
Nick Piggind9414772008-09-24 11:32:59 -04002256 int rc;
2257 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002258 struct cifsFileInfo *cfile = file->private_data;
2259 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2260 __u32 pid;
2261
2262 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2263 pid = cfile->pid;
2264 else
2265 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
Joe Perchesf96637b2013-05-04 22:12:25 -05002267 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002268 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002269
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002270 if (PageChecked(page)) {
2271 if (copied == len)
2272 SetPageUptodate(page);
2273 ClearPageChecked(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002274 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002275 SetPageUptodate(page);
2276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002278 char *page_data;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002279 unsigned offset = pos & (PAGE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002280 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002281
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002282 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 /* this is probably better than directly calling
2284 partialpage_write since in this function the file handle is
2285 known which we might as well leverage */
2286 /* BB check if anything else missing out of ppw
2287 such as updating last write time */
2288 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002289 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002290 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002292
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002293 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002294 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002295 rc = copied;
2296 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002297 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 }
2299
Nick Piggind9414772008-09-24 11:32:59 -04002300 if (rc > 0) {
2301 spin_lock(&inode->i_lock);
2302 if (pos > inode->i_size)
2303 i_size_write(inode, pos);
2304 spin_unlock(&inode->i_lock);
2305 }
2306
2307 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002308 put_page(page);
Nick Piggind9414772008-09-24 11:32:59 -04002309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 return rc;
2311}
2312
Josef Bacik02c24a82011-07-16 20:44:56 -04002313int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2314 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002316 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002318 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002319 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002320 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002321 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002322 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
Josef Bacik02c24a82011-07-16 20:44:56 -04002324 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2325 if (rc)
2326 return rc;
Al Viro59551022016-01-22 15:40:57 -05002327 inode_lock(inode);
Josef Bacik02c24a82011-07-16 20:44:56 -04002328
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002329 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
Al Viro35c265e2014-08-19 20:25:34 -04002331 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2332 file, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002333
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002334 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002335 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002336 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002337 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002338 rc = 0; /* don't care about it in fsync */
2339 }
2340 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002341
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002342 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002343 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2344 server = tcon->ses->server;
2345 if (server->ops->flush)
2346 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2347 else
2348 rc = -ENOSYS;
2349 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002350
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002351 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002352 inode_unlock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002353 return rc;
2354}
2355
Josef Bacik02c24a82011-07-16 20:44:56 -04002356int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002357{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002358 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002359 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002360 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002361 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002362 struct cifsFileInfo *smbfile = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04002363 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Josef Bacik02c24a82011-07-16 20:44:56 -04002364 struct inode *inode = file->f_mapping->host;
2365
2366 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2367 if (rc)
2368 return rc;
Al Viro59551022016-01-22 15:40:57 -05002369 inode_lock(inode);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002370
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002371 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002372
Al Viro35c265e2014-08-19 20:25:34 -04002373 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2374 file, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002375
2376 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002377 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2378 server = tcon->ses->server;
2379 if (server->ops->flush)
2380 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2381 else
2382 rc = -ENOSYS;
2383 }
Steve Frenchb298f222009-02-21 21:17:43 +00002384
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002385 free_xid(xid);
Al Viro59551022016-01-22 15:40:57 -05002386 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 return rc;
2388}
2389
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390/*
2391 * As file closes, flush all cached write data for this inode checking
2392 * for write behind errors.
2393 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002394int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395{
Al Viro496ad9a2013-01-23 17:07:38 -05002396 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 int rc = 0;
2398
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002399 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002400 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002401
Joe Perchesf96637b2013-05-04 22:12:25 -05002402 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
2404 return rc;
2405}
2406
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002407static int
2408cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2409{
2410 int rc = 0;
2411 unsigned long i;
2412
2413 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002414 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002415 if (!pages[i]) {
2416 /*
2417 * save number of pages we have already allocated and
2418 * return with ENOMEM error
2419 */
2420 num_pages = i;
2421 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002422 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002423 }
2424 }
2425
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002426 if (rc) {
2427 for (i = 0; i < num_pages; i++)
2428 put_page(pages[i]);
2429 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002430 return rc;
2431}
2432
2433static inline
2434size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2435{
2436 size_t num_pages;
2437 size_t clen;
2438
2439 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002440 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002441
2442 if (cur_len)
2443 *cur_len = clen;
2444
2445 return num_pages;
2446}
2447
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002448static void
Steve French4a5c80d2014-02-07 20:45:12 -06002449cifs_uncached_writedata_release(struct kref *refcount)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002450{
2451 int i;
Steve French4a5c80d2014-02-07 20:45:12 -06002452 struct cifs_writedata *wdata = container_of(refcount,
2453 struct cifs_writedata, refcount);
2454
2455 for (i = 0; i < wdata->nr_pages; i++)
2456 put_page(wdata->pages[i]);
2457 cifs_writedata_release(refcount);
2458}
2459
2460static void
2461cifs_uncached_writev_complete(struct work_struct *work)
2462{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002463 struct cifs_writedata *wdata = container_of(work,
2464 struct cifs_writedata, work);
David Howells2b0143b2015-03-17 22:25:59 +00002465 struct inode *inode = d_inode(wdata->cfile->dentry);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002466 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2467
2468 spin_lock(&inode->i_lock);
2469 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2470 if (cifsi->server_eof > inode->i_size)
2471 i_size_write(inode, cifsi->server_eof);
2472 spin_unlock(&inode->i_lock);
2473
2474 complete(&wdata->done);
2475
Steve French4a5c80d2014-02-07 20:45:12 -06002476 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002477}
2478
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002479static int
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002480wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2481 size_t *len, unsigned long *num_pages)
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002482{
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002483 size_t save_len, copied, bytes, cur_len = *len;
2484 unsigned long i, nr_pages = *num_pages;
2485
2486 save_len = cur_len;
2487 for (i = 0; i < nr_pages; i++) {
2488 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2489 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2490 cur_len -= copied;
2491 /*
2492 * If we didn't copy as much as we expected, then that
2493 * may mean we trod into an unmapped area. Stop copying
2494 * at that point. On the next pass through the big
2495 * loop, we'll likely end up getting a zero-length
2496 * write and bailing out of it.
2497 */
2498 if (copied < bytes)
2499 break;
2500 }
2501 cur_len = save_len - cur_len;
2502 *len = cur_len;
2503
2504 /*
2505 * If we have no data to send, then that probably means that
2506 * the copy above failed altogether. That's most likely because
2507 * the address in the iovec was bogus. Return -EFAULT and let
2508 * the caller free anything we allocated and bail out.
2509 */
2510 if (!cur_len)
2511 return -EFAULT;
2512
2513 /*
2514 * i + 1 now represents the number of pages we actually used in
2515 * the copy phase above.
2516 */
2517 *num_pages = i + 1;
2518 return 0;
2519}
2520
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002521static int
2522cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2523 struct cifsFileInfo *open_file,
2524 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002525{
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002526 int rc = 0;
2527 size_t cur_len;
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002528 unsigned long nr_pages, num_pages, i;
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002529 struct cifs_writedata *wdata;
Al Virofc56b982016-09-21 18:18:23 -04002530 struct iov_iter saved_from = *from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002531 loff_t saved_offset = offset;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002532 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002533 struct TCP_Server_Info *server;
2534
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002535 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2536 pid = open_file->pid;
2537 else
2538 pid = current->tgid;
2539
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002540 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002541
2542 do {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002543 unsigned int wsize, credits;
2544
2545 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2546 &wsize, &credits);
2547 if (rc)
2548 break;
2549
2550 nr_pages = get_numpages(wsize, len, &cur_len);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002551 wdata = cifs_writedata_alloc(nr_pages,
2552 cifs_uncached_writev_complete);
2553 if (!wdata) {
2554 rc = -ENOMEM;
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002555 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002556 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002557 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002558
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002559 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2560 if (rc) {
2561 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002562 add_credits_and_wake_if(server, credits, 0);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002563 break;
2564 }
2565
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002566 num_pages = nr_pages;
2567 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2568 if (rc) {
Jeff Layton5d81de82014-02-14 07:20:35 -05002569 for (i = 0; i < nr_pages; i++)
2570 put_page(wdata->pages[i]);
2571 kfree(wdata);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002572 add_credits_and_wake_if(server, credits, 0);
Jeff Layton5d81de82014-02-14 07:20:35 -05002573 break;
2574 }
2575
2576 /*
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002577 * Bring nr_pages down to the number of pages we actually used,
2578 * and free any pages that we didn't use.
Jeff Layton5d81de82014-02-14 07:20:35 -05002579 */
Pavel Shilovsky66386c02014-06-20 15:48:40 +04002580 for ( ; nr_pages > num_pages; nr_pages--)
Jeff Layton5d81de82014-02-14 07:20:35 -05002581 put_page(wdata->pages[nr_pages - 1]);
2582
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002583 wdata->sync_mode = WB_SYNC_ALL;
2584 wdata->nr_pages = nr_pages;
2585 wdata->offset = (__u64)offset;
2586 wdata->cfile = cifsFileInfo_get(open_file);
2587 wdata->pid = pid;
2588 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002589 wdata->pagesz = PAGE_SIZE;
2590 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002591 wdata->credits = credits;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002592
2593 if (!wdata->cfile->invalidHandle ||
2594 !cifs_reopen_file(wdata->cfile, false))
2595 rc = server->ops->async_writev(wdata,
2596 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002597 if (rc) {
Pavel Shilovskycb7e9ea2014-06-05 19:03:27 +04002598 add_credits_and_wake_if(server, wdata->credits, 0);
Steve French4a5c80d2014-02-07 20:45:12 -06002599 kref_put(&wdata->refcount,
2600 cifs_uncached_writedata_release);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002601 if (rc == -EAGAIN) {
Al Virofc56b982016-09-21 18:18:23 -04002602 *from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002603 iov_iter_advance(from, offset - saved_offset);
2604 continue;
2605 }
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002606 break;
2607 }
2608
Pavel Shilovsky43de94e2014-06-20 16:10:52 +04002609 list_add_tail(&wdata->list, wdata_list);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002610 offset += cur_len;
2611 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002612 } while (len > 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002613
2614 return rc;
2615}
2616
Al Viroe9d15932015-04-06 22:44:11 -04002617ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002618{
Al Viroe9d15932015-04-06 22:44:11 -04002619 struct file *file = iocb->ki_filp;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002620 ssize_t total_written = 0;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002621 struct cifsFileInfo *open_file;
2622 struct cifs_tcon *tcon;
2623 struct cifs_sb_info *cifs_sb;
2624 struct cifs_writedata *wdata, *tmp;
2625 struct list_head wdata_list;
Al Virofc56b982016-09-21 18:18:23 -04002626 struct iov_iter saved_from = *from;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002627 int rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002628
Al Viroe9d15932015-04-06 22:44:11 -04002629 /*
2630 * BB - optimize the way when signing is disabled. We can drop this
2631 * extra memory-to-memory copying and use iovec buffers for constructing
2632 * write request.
2633 */
2634
Al Viro3309dd02015-04-09 12:55:47 -04002635 rc = generic_write_checks(iocb, from);
2636 if (rc <= 0)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002637 return rc;
2638
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002639 INIT_LIST_HEAD(&wdata_list);
Al Viro7119e222014-10-22 00:25:12 -04002640 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002641 open_file = file->private_data;
2642 tcon = tlink_tcon(open_file->tlink);
2643
2644 if (!tcon->ses->server->ops->async_writev)
2645 return -ENOSYS;
2646
Al Viro3309dd02015-04-09 12:55:47 -04002647 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2648 open_file, cifs_sb, &wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002649
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002650 /*
2651 * If at least one write was successfully sent, then discard any rc
2652 * value from the later writes. If the other write succeeds, then
2653 * we'll end up returning whatever was written. If it fails, then
2654 * we'll get a new rc value from that.
2655 */
2656 if (!list_empty(&wdata_list))
2657 rc = 0;
2658
2659 /*
2660 * Wait for and collect replies for any successful sends in order of
2661 * increasing offset. Once an error is hit or we get a fatal signal
2662 * while waiting, then return without waiting for any more replies.
2663 */
2664restart_loop:
2665 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2666 if (!rc) {
2667 /* FIXME: freezable too? */
2668 rc = wait_for_completion_killable(&wdata->done);
2669 if (rc)
2670 rc = -EINTR;
2671 else if (wdata->result)
2672 rc = wdata->result;
2673 else
2674 total_written += wdata->bytes;
2675
2676 /* resend call if it's a retryable error */
2677 if (rc == -EAGAIN) {
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002678 struct list_head tmp_list;
Al Virofc56b982016-09-21 18:18:23 -04002679 struct iov_iter tmp_from = saved_from;
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002680
2681 INIT_LIST_HEAD(&tmp_list);
2682 list_del_init(&wdata->list);
2683
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002684 iov_iter_advance(&tmp_from,
Al Viroe9d15932015-04-06 22:44:11 -04002685 wdata->offset - iocb->ki_pos);
Pavel Shilovsky6ec0b012014-06-20 16:30:46 +04002686
2687 rc = cifs_write_from_iter(wdata->offset,
2688 wdata->bytes, &tmp_from,
2689 open_file, cifs_sb, &tmp_list);
2690
2691 list_splice(&tmp_list, &wdata_list);
2692
2693 kref_put(&wdata->refcount,
2694 cifs_uncached_writedata_release);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002695 goto restart_loop;
2696 }
2697 }
2698 list_del_init(&wdata->list);
Steve French4a5c80d2014-02-07 20:45:12 -06002699 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002700 }
2701
Al Viroe9d15932015-04-06 22:44:11 -04002702 if (unlikely(!total_written))
2703 return rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002704
Al Viroe9d15932015-04-06 22:44:11 -04002705 iocb->ki_pos += total_written;
2706 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002707 cifs_stats_bytes_written(tcon, total_written);
Al Viroe9d15932015-04-06 22:44:11 -04002708 return total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002709}
2710
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002711static ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002712cifs_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002713{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002714 struct file *file = iocb->ki_filp;
2715 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2716 struct inode *inode = file->f_mapping->host;
2717 struct cifsInodeInfo *cinode = CIFS_I(inode);
2718 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Al Viro5f380c72015-04-07 11:28:12 -04002719 ssize_t rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002720
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002721 /*
2722 * We need to hold the sem to be sure nobody modifies lock list
2723 * with a brlock that prevents writing.
2724 */
2725 down_read(&cinode->lock_sem);
Al Viro59551022016-01-22 15:40:57 -05002726 inode_lock(inode);
Al Viro5f380c72015-04-07 11:28:12 -04002727
Al Viro3309dd02015-04-09 12:55:47 -04002728 rc = generic_write_checks(iocb, from);
2729 if (rc <= 0)
Al Viro5f380c72015-04-07 11:28:12 -04002730 goto out;
2731
Al Viro5f380c72015-04-07 11:28:12 -04002732 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002733 server->vals->exclusive_lock_type, NULL,
Al Viro5f380c72015-04-07 11:28:12 -04002734 CIFS_WRITE_OP))
Al Viro3dae8752014-04-03 12:05:17 -04002735 rc = __generic_file_write_iter(iocb, from);
Al Viro5f380c72015-04-07 11:28:12 -04002736 else
2737 rc = -EACCES;
2738out:
Al Viro59551022016-01-22 15:40:57 -05002739 inode_unlock(inode);
Al Viro19dfc1f2014-04-03 10:27:17 -04002740
Christoph Hellwige2592212016-04-07 08:52:01 -07002741 if (rc > 0)
2742 rc = generic_write_sync(iocb, rc);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002743 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002744 return rc;
2745}
2746
2747ssize_t
Al Viro3dae8752014-04-03 12:05:17 -04002748cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002749{
Al Viro496ad9a2013-01-23 17:07:38 -05002750 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002751 struct cifsInodeInfo *cinode = CIFS_I(inode);
2752 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2753 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2754 iocb->ki_filp->private_data;
2755 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002756 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002757
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002758 written = cifs_get_writer(cinode);
2759 if (written)
2760 return written;
2761
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002762 if (CIFS_CACHE_WRITE(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002763 if (cap_unix(tcon->ses) &&
2764 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002765 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
Al Viro3dae8752014-04-03 12:05:17 -04002766 written = generic_file_write_iter(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002767 goto out;
2768 }
Al Viro3dae8752014-04-03 12:05:17 -04002769 written = cifs_writev(iocb, from);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002770 goto out;
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002771 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002772 /*
2773 * For non-oplocked files in strict cache mode we need to write the data
2774 * to the server exactly from the pos to pos+len-1 rather than flush all
2775 * affected pages because it may cause a error with mandatory locks on
2776 * these pages but not on the region from pos to ppos+len-1.
2777 */
Al Viro3dae8752014-04-03 12:05:17 -04002778 written = cifs_user_writev(iocb, from);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002779 if (written > 0 && CIFS_CACHE_READ(cinode)) {
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002780 /*
2781 * Windows 7 server can delay breaking level2 oplock if a write
2782 * request comes - break it on the client to prevent reading
2783 * an old data.
2784 */
Jeff Layton4f73c7d2014-04-30 09:31:47 -04002785 cifs_zap_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002786 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2787 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04002788 cinode->oplock = 0;
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002789 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00002790out:
2791 cifs_put_writer(cinode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002792 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002793}
2794
Jeff Layton0471ca32012-05-16 07:13:16 -04002795static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002796cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002797{
2798 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002799
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002800 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2801 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002802 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002803 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002804 INIT_LIST_HEAD(&rdata->list);
2805 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002806 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002807 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002808
Jeff Layton0471ca32012-05-16 07:13:16 -04002809 return rdata;
2810}
2811
Jeff Layton6993f742012-05-16 07:13:17 -04002812void
2813cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002814{
Jeff Layton6993f742012-05-16 07:13:17 -04002815 struct cifs_readdata *rdata = container_of(refcount,
2816 struct cifs_readdata, refcount);
2817
2818 if (rdata->cfile)
2819 cifsFileInfo_put(rdata->cfile);
2820
Jeff Layton0471ca32012-05-16 07:13:16 -04002821 kfree(rdata);
2822}
2823
Jeff Layton2a1bb132012-05-16 07:13:17 -04002824static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002825cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002826{
2827 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002828 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002829 unsigned int i;
2830
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002831 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002832 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2833 if (!page) {
2834 rc = -ENOMEM;
2835 break;
2836 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002837 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002838 }
2839
2840 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002841 for (i = 0; i < nr_pages; i++) {
2842 put_page(rdata->pages[i]);
2843 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002844 }
2845 }
2846 return rc;
2847}
2848
2849static void
2850cifs_uncached_readdata_release(struct kref *refcount)
2851{
Jeff Layton1c892542012-05-16 07:13:17 -04002852 struct cifs_readdata *rdata = container_of(refcount,
2853 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002854 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002855
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002856 for (i = 0; i < rdata->nr_pages; i++) {
2857 put_page(rdata->pages[i]);
2858 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002859 }
2860 cifs_readdata_release(refcount);
2861}
2862
Jeff Layton1c892542012-05-16 07:13:17 -04002863/**
2864 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2865 * @rdata: the readdata response with list of pages holding data
Al Viro7f25bba2014-02-04 14:07:43 -05002866 * @iter: destination for our data
Jeff Layton1c892542012-05-16 07:13:17 -04002867 *
2868 * This function copies data from a list of pages in a readdata response into
2869 * an array of iovecs. It will first calculate where the data should go
2870 * based on the info in the readdata and then copy the data into that spot.
2871 */
Al Viro7f25bba2014-02-04 14:07:43 -05002872static int
2873cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
Jeff Layton1c892542012-05-16 07:13:17 -04002874{
Pavel Shilovsky34a54d62014-07-10 10:03:29 +04002875 size_t remaining = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002876 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002877
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002878 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002879 struct page *page = rdata->pages[i];
Geert Uytterhoevene686bd82014-04-13 20:46:21 +02002880 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
Al Viro7f25bba2014-02-04 14:07:43 -05002881 size_t written = copy_page_to_iter(page, 0, copy, iter);
2882 remaining -= written;
2883 if (written < copy && iov_iter_count(iter) > 0)
2884 break;
Jeff Layton1c892542012-05-16 07:13:17 -04002885 }
Al Viro7f25bba2014-02-04 14:07:43 -05002886 return remaining ? -EFAULT : 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002887}
2888
2889static void
2890cifs_uncached_readv_complete(struct work_struct *work)
2891{
2892 struct cifs_readdata *rdata = container_of(work,
2893 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002894
2895 complete(&rdata->done);
2896 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2897}
2898
2899static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002900cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2901 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002902{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002903 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002904 unsigned int i;
2905 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002906
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002907 rdata->got_bytes = 0;
Jeff Layton8321fec2012-09-19 06:22:32 -07002908 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002909 for (i = 0; i < nr_pages; i++) {
2910 struct page *page = rdata->pages[i];
Al Viro71335662016-01-09 19:54:50 -05002911 size_t n;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002912
Al Viro71335662016-01-09 19:54:50 -05002913 if (len <= 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002914 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002915 rdata->pages[i] = NULL;
2916 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002917 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002918 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002919 }
Al Viro71335662016-01-09 19:54:50 -05002920 n = len;
2921 if (len >= PAGE_SIZE) {
2922 /* enough data to fill the page */
2923 n = PAGE_SIZE;
2924 len -= n;
2925 } else {
2926 zero_user(page, len, PAGE_SIZE - len);
2927 rdata->tailsz = len;
2928 len = 0;
2929 }
2930 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07002931 if (result < 0)
2932 break;
2933
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002934 rdata->got_bytes += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002935 }
2936
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04002937 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
2938 rdata->got_bytes : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002939}
2940
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002941static int
2942cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
2943 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944{
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002945 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002946 unsigned int npages, rsize, credits;
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04002947 size_t cur_len;
2948 int rc;
Jeff Layton1c892542012-05-16 07:13:17 -04002949 pid_t pid;
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002950 struct TCP_Server_Info *server;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002951
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002952 server = tlink_tcon(open_file->tlink)->ses->server;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002953
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002954 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2955 pid = open_file->pid;
2956 else
2957 pid = current->tgid;
2958
Jeff Layton1c892542012-05-16 07:13:17 -04002959 do {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002960 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
2961 &rsize, &credits);
2962 if (rc)
2963 break;
2964
2965 cur_len = min_t(const size_t, len, rsize);
Jeff Layton1c892542012-05-16 07:13:17 -04002966 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002967
Jeff Layton1c892542012-05-16 07:13:17 -04002968 /* allocate a readdata struct */
2969 rdata = cifs_readdata_alloc(npages,
2970 cifs_uncached_readv_complete);
2971 if (!rdata) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002972 add_credits_and_wake_if(server, credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04002973 rc = -ENOMEM;
Jeff Laytonbae9f742014-04-15 12:48:49 -04002974 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002976
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002977 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002978 if (rc)
2979 goto error;
2980
2981 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002982 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002983 rdata->offset = offset;
2984 rdata->bytes = cur_len;
2985 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002986 rdata->pagesz = PAGE_SIZE;
2987 rdata->read_into_pages = cifs_uncached_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002988 rdata->credits = credits;
Jeff Layton1c892542012-05-16 07:13:17 -04002989
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002990 if (!rdata->cfile->invalidHandle ||
2991 !cifs_reopen_file(rdata->cfile, true))
2992 rc = server->ops->async_readv(rdata);
Jeff Layton1c892542012-05-16 07:13:17 -04002993error:
2994 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04002995 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Layton1c892542012-05-16 07:13:17 -04002996 kref_put(&rdata->refcount,
2997 cifs_uncached_readdata_release);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04002998 if (rc == -EAGAIN)
2999 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04003000 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 }
Jeff Layton1c892542012-05-16 07:13:17 -04003002
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003003 list_add_tail(&rdata->list, rdata_list);
Jeff Layton1c892542012-05-16 07:13:17 -04003004 offset += cur_len;
3005 len -= cur_len;
3006 } while (len > 0);
3007
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003008 return rc;
3009}
3010
3011ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3012{
3013 struct file *file = iocb->ki_filp;
3014 ssize_t rc;
3015 size_t len;
3016 ssize_t total_read = 0;
3017 loff_t offset = iocb->ki_pos;
3018 struct cifs_sb_info *cifs_sb;
3019 struct cifs_tcon *tcon;
3020 struct cifsFileInfo *open_file;
3021 struct cifs_readdata *rdata, *tmp;
3022 struct list_head rdata_list;
3023
3024 len = iov_iter_count(to);
3025 if (!len)
3026 return 0;
3027
3028 INIT_LIST_HEAD(&rdata_list);
Al Viro7119e222014-10-22 00:25:12 -04003029 cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky0ada36b2014-06-25 10:42:28 +04003030 open_file = file->private_data;
3031 tcon = tlink_tcon(open_file->tlink);
3032
3033 if (!tcon->ses->server->ops->async_readv)
3034 return -ENOSYS;
3035
3036 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3037 cifs_dbg(FYI, "attempting read on write only file instance\n");
3038
3039 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3040
Jeff Layton1c892542012-05-16 07:13:17 -04003041 /* if at least one read request send succeeded, then reset rc */
3042 if (!list_empty(&rdata_list))
3043 rc = 0;
3044
Al Viroe6a7bcb2014-04-02 19:53:36 -04003045 len = iov_iter_count(to);
Jeff Layton1c892542012-05-16 07:13:17 -04003046 /* the loop below should proceed in the order of increasing offsets */
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003047again:
Jeff Layton1c892542012-05-16 07:13:17 -04003048 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3049 if (!rc) {
Jeff Layton1c892542012-05-16 07:13:17 -04003050 /* FIXME: freezable sleep too? */
3051 rc = wait_for_completion_killable(&rdata->done);
3052 if (rc)
3053 rc = -EINTR;
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003054 else if (rdata->result == -EAGAIN) {
Al Viro74027f42014-02-04 13:47:26 -05003055 /* resend call if it's a retryable error */
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003056 struct list_head tmp_list;
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003057 unsigned int got_bytes = rdata->got_bytes;
Jeff Layton1c892542012-05-16 07:13:17 -04003058
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003059 list_del_init(&rdata->list);
3060 INIT_LIST_HEAD(&tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003061
Pavel Shilovskyd913ed12014-07-10 11:31:48 +04003062 /*
3063 * Got a part of data and then reconnect has
3064 * happened -- fill the buffer and continue
3065 * reading.
3066 */
3067 if (got_bytes && got_bytes < rdata->bytes) {
3068 rc = cifs_readdata_to_iov(rdata, to);
3069 if (rc) {
3070 kref_put(&rdata->refcount,
3071 cifs_uncached_readdata_release);
3072 continue;
3073 }
3074 }
3075
3076 rc = cifs_send_async_read(
3077 rdata->offset + got_bytes,
3078 rdata->bytes - got_bytes,
3079 rdata->cfile, cifs_sb,
3080 &tmp_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003081
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003082 list_splice(&tmp_list, &rdata_list);
Pavel Shilovsky25f40252014-06-25 10:45:07 +04003083
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003084 kref_put(&rdata->refcount,
3085 cifs_uncached_readdata_release);
3086 goto again;
3087 } else if (rdata->result)
3088 rc = rdata->result;
3089 else
Jeff Layton1c892542012-05-16 07:13:17 -04003090 rc = cifs_readdata_to_iov(rdata, to);
Pavel Shilovskyfb8a3e52014-07-10 11:50:39 +04003091
Pavel Shilovsky2e8a05d2014-07-10 10:21:15 +04003092 /* if there was a short read -- discard anything left */
3093 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3094 rc = -ENODATA;
Jeff Layton1c892542012-05-16 07:13:17 -04003095 }
3096 list_del_init(&rdata->list);
3097 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003099
Al Viroe6a7bcb2014-04-02 19:53:36 -04003100 total_read = len - iov_iter_count(to);
Al Viro7f25bba2014-02-04 14:07:43 -05003101
Jeff Layton1c892542012-05-16 07:13:17 -04003102 cifs_stats_bytes_read(tcon, total_read);
Jeff Layton1c892542012-05-16 07:13:17 -04003103
Pavel Shilovsky09a47072012-09-18 16:20:29 -07003104 /* mask nodata case */
3105 if (rc == -ENODATA)
3106 rc = 0;
3107
Al Viro0165e812014-02-04 14:19:48 -05003108 if (total_read) {
Al Viroe6a7bcb2014-04-02 19:53:36 -04003109 iocb->ki_pos += total_read;
Al Viro0165e812014-02-04 14:19:48 -05003110 return total_read;
3111 }
3112 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003113}
3114
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003115ssize_t
Al Viroe6a7bcb2014-04-02 19:53:36 -04003116cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003117{
Al Viro496ad9a2013-01-23 17:07:38 -05003118 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003119 struct cifsInodeInfo *cinode = CIFS_I(inode);
3120 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3121 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3122 iocb->ki_filp->private_data;
3123 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3124 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003125
3126 /*
3127 * In strict cache mode we need to read from the server all the time
3128 * if we don't have level II oplock because the server can delay mtime
3129 * change - so we can't make a decision about inode invalidating.
3130 * And we can also fail with pagereading if there are mandatory locks
3131 * on pages affected by this read but not on the region from pos to
3132 * pos+len-1.
3133 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003134 if (!CIFS_CACHE_READ(cinode))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003135 return cifs_user_readv(iocb, to);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003136
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003137 if (cap_unix(tcon->ses) &&
3138 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3139 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003140 return generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003141
3142 /*
3143 * We need to hold the sem to be sure nobody modifies lock list
3144 * with a brlock that prevents reading.
3145 */
3146 down_read(&cinode->lock_sem);
Al Viroe6a7bcb2014-04-02 19:53:36 -04003147 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003148 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04003149 NULL, CIFS_READ_OP))
Al Viroe6a7bcb2014-04-02 19:53:36 -04003150 rc = generic_file_read_iter(iocb, to);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07003151 up_read(&cinode->lock_sem);
3152 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03003153}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003155static ssize_t
3156cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157{
3158 int rc = -EACCES;
3159 unsigned int bytes_read = 0;
3160 unsigned int total_read;
3161 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003162 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003164 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003165 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003166 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003167 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003169 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08003170 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003171 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003173 xid = get_xid();
Al Viro7119e222014-10-22 00:25:12 -04003174 cifs_sb = CIFS_FILE_SB(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003176 /* FIXME: set up handlers for larger reads and/or convert to async */
3177 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3178
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303180 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003181 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303182 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003184 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003185 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003186 server = tcon->ses->server;
3187
3188 if (!server->ops->sync_read) {
3189 free_xid(xid);
3190 return -ENOSYS;
3191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003193 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3194 pid = open_file->pid;
3195 else
3196 pid = current->tgid;
3197
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003199 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003201 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3202 total_read += bytes_read, cur_offset += bytes_read) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003203 do {
3204 current_read_size = min_t(uint, read_size - total_read,
3205 rsize);
3206 /*
3207 * For windows me and 9x we do not want to request more
3208 * than it negotiated since it will refuse the read
3209 * then.
3210 */
3211 if ((tcon->ses) && !(tcon->ses->capabilities &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003212 tcon->ses->server->vals->cap_large_files)) {
Pavel Shilovskye374d902014-06-25 16:19:02 +04003213 current_read_size = min_t(uint,
3214 current_read_size, CIFSMaxBufSize);
3215 }
Steve Frenchcdff08e2010-10-21 22:46:14 +00003216 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003217 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 if (rc != 0)
3219 break;
3220 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003221 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003222 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003223 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003224 io_parms.length = current_read_size;
Steve Frenchdb8b6312014-09-22 05:13:55 -05003225 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003226 &bytes_read, &cur_offset,
3227 &buf_type);
Pavel Shilovskye374d902014-06-25 16:19:02 +04003228 } while (rc == -EAGAIN);
3229
Linus Torvalds1da177e2005-04-16 15:20:36 -07003230 if (rc || (bytes_read == 0)) {
3231 if (total_read) {
3232 break;
3233 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003234 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 return rc;
3236 }
3237 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003238 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003239 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240 }
3241 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003242 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 return total_read;
3244}
3245
Jeff Laytonca83ce32011-04-12 09:13:44 -04003246/*
3247 * If the page is mmap'ed into a process' page tables, then we need to make
3248 * sure that it doesn't change while being written back.
3249 */
3250static int
3251cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3252{
3253 struct page *page = vmf->page;
3254
3255 lock_page(page);
3256 return VM_FAULT_LOCKED;
3257}
3258
Kirill A. Shutemov7cbea8d2015-09-09 15:39:26 -07003259static const struct vm_operations_struct cifs_file_vm_ops = {
Jeff Laytonca83ce32011-04-12 09:13:44 -04003260 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07003261 .map_pages = filemap_map_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003262 .page_mkwrite = cifs_page_mkwrite,
3263};
3264
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003265int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3266{
3267 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003268 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003269
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003270 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003271
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003272 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003273 rc = cifs_zap_mapping(inode);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003274 if (rc)
3275 return rc;
3276 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003277
3278 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003279 if (rc == 0)
3280 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003281 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003282 return rc;
3283}
3284
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3286{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 int rc, xid;
3288
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003289 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003290 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003292 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3293 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003294 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295 return rc;
3296 }
3297 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003298 if (rc == 0)
3299 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003300 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 return rc;
3302}
3303
Jeff Layton0471ca32012-05-16 07:13:16 -04003304static void
3305cifs_readv_complete(struct work_struct *work)
3306{
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003307 unsigned int i, got_bytes;
Jeff Layton0471ca32012-05-16 07:13:16 -04003308 struct cifs_readdata *rdata = container_of(work,
3309 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003310
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003311 got_bytes = rdata->got_bytes;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003312 for (i = 0; i < rdata->nr_pages; i++) {
3313 struct page *page = rdata->pages[i];
3314
Jeff Layton0471ca32012-05-16 07:13:16 -04003315 lru_cache_add_file(page);
3316
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003317 if (rdata->result == 0 ||
3318 (rdata->result == -EAGAIN && got_bytes)) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003319 flush_dcache_page(page);
3320 SetPageUptodate(page);
3321 }
3322
3323 unlock_page(page);
3324
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003325 if (rdata->result == 0 ||
3326 (rdata->result == -EAGAIN && got_bytes))
Jeff Layton0471ca32012-05-16 07:13:16 -04003327 cifs_readpage_to_fscache(rdata->mapping->host, page);
3328
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003329 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
Pavel Shilovskyb770ddf2014-07-10 11:31:53 +04003330
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003331 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003332 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003333 }
Jeff Layton6993f742012-05-16 07:13:17 -04003334 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003335}
3336
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003337static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003338cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3339 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003340{
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003341 int result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003342 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003343 u64 eof;
3344 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003345 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003346
3347 /* determine the eof that the server (probably) has */
3348 eof = CIFS_I(rdata->mapping->host)->server_eof;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003349 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003350 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003351
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003352 rdata->got_bytes = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003353 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003354 for (i = 0; i < nr_pages; i++) {
3355 struct page *page = rdata->pages[i];
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003356 size_t n = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003357
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003358 if (len >= PAGE_SIZE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003359 len -= PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003360 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003361 /* enough for partial page, fill and zero the rest */
Linus Torvalds442c9ac2016-05-18 10:17:56 -07003362 zero_user(page, len, PAGE_SIZE - len);
Al Viro71335662016-01-09 19:54:50 -05003363 n = rdata->tailsz = len;
Jeff Layton8321fec2012-09-19 06:22:32 -07003364 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003365 } else if (page->index > eof_index) {
3366 /*
3367 * The VFS will not try to do readahead past the
3368 * i_size, but it's possible that we have outstanding
3369 * writes with gaps in the middle and the i_size hasn't
3370 * caught up yet. Populate those with zeroed out pages
3371 * to prevent the VFS from repeatedly attempting to
3372 * fill them until the writes are flushed.
3373 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003374 zero_user(page, 0, PAGE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003375 lru_cache_add_file(page);
3376 flush_dcache_page(page);
3377 SetPageUptodate(page);
3378 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003379 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003380 rdata->pages[i] = NULL;
3381 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003382 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003383 } else {
3384 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003385 lru_cache_add_file(page);
3386 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003387 put_page(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003388 rdata->pages[i] = NULL;
3389 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003390 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003391 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003392
Al Viro71335662016-01-09 19:54:50 -05003393 result = cifs_read_page_from_socket(server, page, n);
Jeff Layton8321fec2012-09-19 06:22:32 -07003394 if (result < 0)
3395 break;
3396
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003397 rdata->got_bytes += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003398 }
3399
Pavel Shilovskyb3160ae2014-07-10 10:16:25 +04003400 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3401 rdata->got_bytes : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003402}
3403
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003404static int
3405readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3406 unsigned int rsize, struct list_head *tmplist,
3407 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3408{
3409 struct page *page, *tpage;
3410 unsigned int expected_index;
3411 int rc;
Michal Hocko8a5c7432016-07-26 15:24:53 -07003412 gfp_t gfp = readahead_gfp_mask(mapping);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003413
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003414 INIT_LIST_HEAD(tmplist);
3415
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003416 page = list_entry(page_list->prev, struct page, lru);
3417
3418 /*
3419 * Lock the page and put it in the cache. Since no one else
3420 * should have access to this page, we're safe to simply set
3421 * PG_locked without checking it first.
3422 */
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003423 __SetPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003424 rc = add_to_page_cache_locked(page, mapping,
Michal Hocko063d99b2015-10-15 15:28:24 -07003425 page->index, gfp);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003426
3427 /* give up if we can't stick it in the cache */
3428 if (rc) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003429 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003430 return rc;
3431 }
3432
3433 /* move first page to the tmplist */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003434 *offset = (loff_t)page->index << PAGE_SHIFT;
3435 *bytes = PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003436 *nr_pages = 1;
3437 list_move_tail(&page->lru, tmplist);
3438
3439 /* now try and add more pages onto the request */
3440 expected_index = page->index + 1;
3441 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3442 /* discontinuity ? */
3443 if (page->index != expected_index)
3444 break;
3445
3446 /* would this page push the read over the rsize? */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003447 if (*bytes + PAGE_SIZE > rsize)
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003448 break;
3449
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003450 __SetPageLocked(page);
Michal Hocko063d99b2015-10-15 15:28:24 -07003451 if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08003452 __ClearPageLocked(page);
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003453 break;
3454 }
3455 list_move_tail(&page->lru, tmplist);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003456 (*bytes) += PAGE_SIZE;
Pavel Shilovsky387eb922014-06-24 13:08:54 +04003457 expected_index++;
3458 (*nr_pages)++;
3459 }
3460 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461}
3462
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463static int cifs_readpages(struct file *file, struct address_space *mapping,
3464 struct list_head *page_list, unsigned num_pages)
3465{
Jeff Layton690c5e32011-10-19 15:30:16 -04003466 int rc;
3467 struct list_head tmplist;
3468 struct cifsFileInfo *open_file = file->private_data;
Al Viro7119e222014-10-22 00:25:12 -04003469 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003470 struct TCP_Server_Info *server;
Jeff Layton690c5e32011-10-19 15:30:16 -04003471 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472
Jeff Layton690c5e32011-10-19 15:30:16 -04003473 /*
Suresh Jayaraman56698232010-07-05 18:13:25 +05303474 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3475 * immediately if the cookie is negative
David Howells54afa992013-09-04 17:10:39 +00003476 *
3477 * After this point, every page in the list might have PG_fscache set,
3478 * so we will need to clean that up off of every page we don't use.
Suresh Jayaraman56698232010-07-05 18:13:25 +05303479 */
3480 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3481 &num_pages);
3482 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003483 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303484
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003485 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3486 pid = open_file->pid;
3487 else
3488 pid = current->tgid;
3489
Jeff Layton690c5e32011-10-19 15:30:16 -04003490 rc = 0;
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003491 server = tlink_tcon(open_file->tlink)->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
Joe Perchesf96637b2013-05-04 22:12:25 -05003493 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3494 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003495
3496 /*
3497 * Start with the page at end of list and move it to private
3498 * list. Do the same with any following pages until we hit
3499 * the rsize limit, hit an index discontinuity, or run out of
3500 * pages. Issue the async read and then start the loop again
3501 * until the list is empty.
3502 *
3503 * Note that list order is important. The page_list is in
3504 * the order of declining indexes. When we put the pages in
3505 * the rdata->pages, then we want them in increasing order.
3506 */
3507 while (!list_empty(page_list)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003508 unsigned int i, nr_pages, bytes, rsize;
Jeff Layton690c5e32011-10-19 15:30:16 -04003509 loff_t offset;
3510 struct page *page, *tpage;
3511 struct cifs_readdata *rdata;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003512 unsigned credits;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003514 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3515 &rsize, &credits);
3516 if (rc)
3517 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003518
Jeff Layton690c5e32011-10-19 15:30:16 -04003519 /*
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003520 * Give up immediately if rsize is too small to read an entire
3521 * page. The VFS will fall back to readpage. We should never
3522 * reach this point however since we set ra_pages to 0 when the
3523 * rsize is smaller than a cache page.
Jeff Layton690c5e32011-10-19 15:30:16 -04003524 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003525 if (unlikely(rsize < PAGE_SIZE)) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003526 add_credits_and_wake_if(server, credits, 0);
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003527 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003530 rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3531 &nr_pages, &offset, &bytes);
3532 if (rc) {
3533 add_credits_and_wake_if(server, credits, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 break;
Jeff Layton690c5e32011-10-19 15:30:16 -04003535 }
3536
Jeff Layton0471ca32012-05-16 07:13:16 -04003537 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003538 if (!rdata) {
3539 /* best to give up if we're out of mem */
3540 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3541 list_del(&page->lru);
3542 lru_cache_add_file(page);
3543 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003544 put_page(page);
Jeff Layton690c5e32011-10-19 15:30:16 -04003545 }
3546 rc = -ENOMEM;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003547 add_credits_and_wake_if(server, credits, 0);
Jeff Layton690c5e32011-10-19 15:30:16 -04003548 break;
3549 }
3550
Jeff Layton6993f742012-05-16 07:13:17 -04003551 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003552 rdata->mapping = mapping;
3553 rdata->offset = offset;
3554 rdata->bytes = bytes;
3555 rdata->pid = pid;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003556 rdata->pagesz = PAGE_SIZE;
Jeff Layton8321fec2012-09-19 06:22:32 -07003557 rdata->read_into_pages = cifs_readpages_read_into_pages;
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003558 rdata->credits = credits;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003559
3560 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3561 list_del(&page->lru);
3562 rdata->pages[rdata->nr_pages++] = page;
3563 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003564
Pavel Shilovsky69cebd72014-06-24 13:42:03 +04003565 if (!rdata->cfile->invalidHandle ||
3566 !cifs_reopen_file(rdata->cfile, true))
3567 rc = server->ops->async_readv(rdata);
3568 if (rc) {
Pavel Shilovskybed9da02014-06-25 11:28:57 +04003569 add_credits_and_wake_if(server, rdata->credits, 0);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003570 for (i = 0; i < rdata->nr_pages; i++) {
3571 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003572 lru_cache_add_file(page);
3573 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003574 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 }
Pavel Shilovsky1209bbd2014-10-02 20:13:35 +04003576 /* Fallback to the readpage in error/reconnect cases */
Jeff Layton6993f742012-05-16 07:13:17 -04003577 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 break;
3579 }
Jeff Layton6993f742012-05-16 07:13:17 -04003580
3581 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582 }
3583
David Howells54afa992013-09-04 17:10:39 +00003584 /* Any pages that have been shown to fscache but didn't get added to
3585 * the pagecache must be uncached before they get returned to the
3586 * allocator.
3587 */
3588 cifs_fscache_readpages_cancel(mapping->host, page_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 return rc;
3590}
3591
Sachin Prabhua9e9b7b2013-09-13 14:11:56 +01003592/*
3593 * cifs_readpage_worker must be called with the page pinned
3594 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595static int cifs_readpage_worker(struct file *file, struct page *page,
3596 loff_t *poffset)
3597{
3598 char *read_data;
3599 int rc;
3600
Suresh Jayaraman56698232010-07-05 18:13:25 +05303601 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003602 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303603 if (rc == 0)
3604 goto read_complete;
3605
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 read_data = kmap(page);
3607 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003608
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003609 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003610
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 if (rc < 0)
3612 goto io_error;
3613 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003614 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003615
Al Viro496ad9a2013-01-23 17:07:38 -05003616 file_inode(file)->i_atime =
Deepa Dinamanic2050a42016-09-14 07:48:06 -07003617 current_time(file_inode(file));
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003618
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003619 if (PAGE_SIZE > rc)
3620 memset(read_data + rc, 0, PAGE_SIZE - rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621
3622 flush_dcache_page(page);
3623 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303624
3625 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003626 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303627
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003629
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003631 kunmap(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003632 unlock_page(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303633
3634read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635 return rc;
3636}
3637
3638static int cifs_readpage(struct file *file, struct page *page)
3639{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003640 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003642 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003644 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645
3646 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303647 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003648 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303649 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 }
3651
Joe Perchesf96637b2013-05-04 22:12:25 -05003652 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003653 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654
3655 rc = cifs_readpage_worker(file, page, &offset);
3656
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003657 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003658 return rc;
3659}
3660
Steve Frencha403a0a2007-07-26 15:54:16 +00003661static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3662{
3663 struct cifsFileInfo *open_file;
Steve French3afca262016-09-22 18:58:16 -05003664 struct cifs_tcon *tcon =
3665 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
Steve Frencha403a0a2007-07-26 15:54:16 +00003666
Steve French3afca262016-09-22 18:58:16 -05003667 spin_lock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003668 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003669 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French3afca262016-09-22 18:58:16 -05003670 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003671 return 1;
3672 }
3673 }
Steve French3afca262016-09-22 18:58:16 -05003674 spin_unlock(&tcon->open_file_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003675 return 0;
3676}
3677
Linus Torvalds1da177e2005-04-16 15:20:36 -07003678/* We do not want to update the file size from server for inodes
3679 open for write - to avoid races with writepage extending
3680 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003681 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 but this is tricky to do without racing with writebehind
3683 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003684bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685{
Steve Frencha403a0a2007-07-26 15:54:16 +00003686 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003687 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003688
Steve Frencha403a0a2007-07-26 15:54:16 +00003689 if (is_inode_writable(cifsInode)) {
3690 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003691 struct cifs_sb_info *cifs_sb;
3692
Steve Frenchc32a0b62006-01-12 14:41:28 -08003693 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003694 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003695 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003696 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003697 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003698 }
3699
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003700 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003701 return true;
Steve French7ba52632007-02-08 18:14:13 +00003702
Steve French4b18f2a2008-04-29 00:06:05 +00003703 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003704 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003705 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706}
3707
Nick Piggind9414772008-09-24 11:32:59 -04003708static int cifs_write_begin(struct file *file, struct address_space *mapping,
3709 loff_t pos, unsigned len, unsigned flags,
3710 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711{
Sachin Prabhu466bd312013-09-13 14:11:57 +01003712 int oncethru = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003713 pgoff_t index = pos >> PAGE_SHIFT;
3714 loff_t offset = pos & (PAGE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003715 loff_t page_start = pos & PAGE_MASK;
3716 loff_t i_size;
3717 struct page *page;
3718 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719
Joe Perchesf96637b2013-05-04 22:12:25 -05003720 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003721
Sachin Prabhu466bd312013-09-13 14:11:57 +01003722start:
Nick Piggin54566b22009-01-04 12:00:53 -08003723 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003724 if (!page) {
3725 rc = -ENOMEM;
3726 goto out;
3727 }
Nick Piggind9414772008-09-24 11:32:59 -04003728
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003729 if (PageUptodate(page))
3730 goto out;
Steve French8a236262007-03-06 00:31:00 +00003731
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003732 /*
3733 * If we write a full page it will be up to date, no need to read from
3734 * the server. If the write is short, we'll end up doing a sync write
3735 * instead.
3736 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003737 if (len == PAGE_SIZE)
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003738 goto out;
3739
3740 /*
3741 * optimize away the read when we have an oplock, and we're not
3742 * expecting to use any of the data we'd be reading in. That
3743 * is, when the page lies beyond the EOF, or straddles the EOF
3744 * and the write will cover all of the existing data.
3745 */
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003746 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003747 i_size = i_size_read(mapping->host);
3748 if (page_start >= i_size ||
3749 (offset == 0 && (pos + len) >= i_size)) {
3750 zero_user_segments(page, 0, offset,
3751 offset + len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003752 PAGE_SIZE);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003753 /*
3754 * PageChecked means that the parts of the page
3755 * to which we're not writing are considered up
3756 * to date. Once the data is copied to the
3757 * page, it can be set uptodate.
3758 */
3759 SetPageChecked(page);
3760 goto out;
3761 }
3762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003763
Sachin Prabhu466bd312013-09-13 14:11:57 +01003764 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003765 /*
3766 * might as well read a page, it is fast enough. If we get
3767 * an error, we don't need to return it. cifs_write_end will
3768 * do a sync write instead since PG_uptodate isn't set.
3769 */
3770 cifs_readpage_worker(file, page, &page_start);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003771 put_page(page);
Sachin Prabhu466bd312013-09-13 14:11:57 +01003772 oncethru = 1;
3773 goto start;
Steve French8a236262007-03-06 00:31:00 +00003774 } else {
3775 /* we could try using another file handle if there is one -
3776 but how would we lock it to prevent close of that handle
3777 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003778 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003779 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003780out:
3781 *pagep = page;
3782 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783}
3784
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303785static int cifs_release_page(struct page *page, gfp_t gfp)
3786{
3787 if (PagePrivate(page))
3788 return 0;
3789
3790 return cifs_fscache_release_page(page, gfp);
3791}
3792
Lukas Czernerd47992f2013-05-21 23:17:23 -04003793static void cifs_invalidate_page(struct page *page, unsigned int offset,
3794 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303795{
3796 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3797
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003798 if (offset == 0 && length == PAGE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303799 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3800}
3801
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003802static int cifs_launder_page(struct page *page)
3803{
3804 int rc = 0;
3805 loff_t range_start = page_offset(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003806 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003807 struct writeback_control wbc = {
3808 .sync_mode = WB_SYNC_ALL,
3809 .nr_to_write = 0,
3810 .range_start = range_start,
3811 .range_end = range_end,
3812 };
3813
Joe Perchesf96637b2013-05-04 22:12:25 -05003814 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003815
3816 if (clear_page_dirty_for_io(page))
3817 rc = cifs_writepage_locked(page, &wbc);
3818
3819 cifs_fscache_invalidate_page(page, page->mapping->host);
3820 return rc;
3821}
3822
Tejun Heo9b646972010-07-20 22:09:02 +02003823void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003824{
3825 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3826 oplock_break);
David Howells2b0143b2015-03-17 22:25:59 +00003827 struct inode *inode = d_inode(cfile->dentry);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003828 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003829 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003830 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003831 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003832
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003833 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
NeilBrown74316202014-07-07 15:16:04 +10003834 TASK_UNINTERRUPTIBLE);
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003835
3836 server->ops->downgrade_oplock(server, cinode,
3837 test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
3838
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003839 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003840 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003841 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3842 inode);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003843 cinode->oplock = 0;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003844 }
3845
Jeff Layton3bc303c2009-09-21 06:47:50 -04003846 if (inode && S_ISREG(inode->i_mode)) {
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003847 if (CIFS_CACHE_READ(cinode))
Al Viro8737c932009-12-24 06:47:55 -05003848 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003849 else
Al Viro8737c932009-12-24 06:47:55 -05003850 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003851 rc = filemap_fdatawrite(inode->i_mapping);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +04003852 if (!CIFS_CACHE_READ(cinode)) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003853 rc = filemap_fdatawait(inode->i_mapping);
3854 mapping_set_error(inode->i_mapping, rc);
Jeff Layton4f73c7d2014-04-30 09:31:47 -04003855 cifs_zap_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003856 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003857 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003858 }
3859
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003860 rc = cifs_push_locks(cfile);
3861 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003862 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003863
Jeff Layton3bc303c2009-09-21 06:47:50 -04003864 /*
3865 * releasing stale oplock after recent reconnect of smb session using
3866 * a now incorrect file handle is not a data integrity issue but do
3867 * not bother sending an oplock release if session to server still is
3868 * disconnected since oplock already released by the server
3869 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003870 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003871 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3872 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003873 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003874 }
Sachin Prabhuc11f1df2014-03-11 16:11:47 +00003875 cifs_done_oplock_break(cinode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003876}
3877
Steve Frenchdca69282013-11-11 16:42:37 -06003878/*
3879 * The presence of cifs_direct_io() in the address space ops vector
3880 * allowes open() O_DIRECT flags which would have failed otherwise.
3881 *
3882 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3883 * so this method should never be called.
3884 *
3885 * Direct IO is not yet supported in the cached mode.
3886 */
3887static ssize_t
Christoph Hellwigc8b8e322016-04-07 08:51:58 -07003888cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Steve Frenchdca69282013-11-11 16:42:37 -06003889{
3890 /*
3891 * FIXME
3892 * Eventually need to support direct IO for non forcedirectio mounts
3893 */
3894 return -EINVAL;
3895}
3896
3897
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003898const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 .readpage = cifs_readpage,
3900 .readpages = cifs_readpages,
3901 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003902 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003903 .write_begin = cifs_write_begin,
3904 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303906 .releasepage = cifs_release_page,
Steve Frenchdca69282013-11-11 16:42:37 -06003907 .direct_IO = cifs_direct_io,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303908 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003909 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003911
3912/*
3913 * cifs_readpages requires the server to support a buffer large enough to
3914 * contain the header plus one complete page of data. Otherwise, we need
3915 * to leave cifs_readpages out of the address space operations.
3916 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003917const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003918 .readpage = cifs_readpage,
3919 .writepage = cifs_writepage,
3920 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003921 .write_begin = cifs_write_begin,
3922 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003923 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303924 .releasepage = cifs_release_page,
3925 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003926 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003927};