blob: 1e57f36ea1b2f84ac43c486cf6aa9cff4b2280a0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags &
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400228 oparms.tcon = tcon;
229 oparms.cifs_sb = cifs_sb;
230 oparms.desired_access = desired_access;
231 oparms.create_options = create_options;
232 oparms.disposition = disposition;
233 oparms.path = full_path;
234 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400235 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400236
237 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300238
239 if (rc)
240 goto out;
241
242 if (tcon->unix_ext)
243 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
244 xid);
245 else
246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700247 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300248
249out:
250 kfree(buf);
251 return rc;
252}
253
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400254static bool
255cifs_has_mand_locks(struct cifsInodeInfo *cinode)
256{
257 struct cifs_fid_locks *cur;
258 bool has_locks = false;
259
260 down_read(&cinode->lock_sem);
261 list_for_each_entry(cur, &cinode->llist, llist) {
262 if (!list_empty(&cur->locks)) {
263 has_locks = true;
264 break;
265 }
266 }
267 up_read(&cinode->lock_sem);
268 return has_locks;
269}
270
Jeff Layton15ecb432010-10-15 15:34:02 -0400271struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700272cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400273 struct tcon_link *tlink, __u32 oplock)
274{
275 struct dentry *dentry = file->f_path.dentry;
276 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700277 struct cifsInodeInfo *cinode = CIFS_I(inode);
278 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700279 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400281 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400282
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284 if (cfile == NULL)
285 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400286
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700287 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
288 if (!fdlocks) {
289 kfree(cfile);
290 return NULL;
291 }
292
293 INIT_LIST_HEAD(&fdlocks->locks);
294 fdlocks->cfile = cfile;
295 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700296 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700297 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700298 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700299
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700300 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700301 cfile->pid = current->tgid;
302 cfile->uid = current_fsuid();
303 cfile->dentry = dget(dentry);
304 cfile->f_flags = file->f_flags;
305 cfile->invalidHandle = false;
306 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700308 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400309
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100310 cifs_sb_active(inode->i_sb);
311
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400312 /*
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
315 */
316 if (oplock == server->vals->oplock_read &&
317 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500318 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 oplock = 0;
320 }
321
Jeff Layton44772882010-10-15 15:34:03 -0400322 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400323 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700324 oplock = fid->pending_open->oplock;
325 list_del(&fid->pending_open->olist);
326
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400327 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700328
329 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400330 /* if readable file instance put first in list*/
331 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700332 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400333 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700334 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400335 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400336
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700337 file->private_data = cfile;
338 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400339}
340
Jeff Layton764a1b12012-07-25 14:59:54 -0400341struct cifsFileInfo *
342cifsFileInfo_get(struct cifsFileInfo *cifs_file)
343{
344 spin_lock(&cifs_file_list_lock);
345 cifsFileInfo_get_locked(cifs_file);
346 spin_unlock(&cifs_file_list_lock);
347 return cifs_file;
348}
349
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350/*
351 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400352 * the filehandle out on the server. Must be called without holding
353 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000354 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400355void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
356{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300357 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000358 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700359 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300360 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100361 struct super_block *sb = inode->i_sb;
362 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700364 struct cifs_fid fid;
365 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000366
367 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400368 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 spin_unlock(&cifs_file_list_lock);
370 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400371 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000372
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700373 if (server->ops->get_lease_key)
374 server->ops->get_lease_key(inode, &fid);
375
376 /* store open in pending opens to make sure we don't miss lease break */
377 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
378
Steve Frenchcdff08e2010-10-21 22:46:14 +0000379 /* remove it from the lists */
380 list_del(&cifs_file->flist);
381 list_del(&cifs_file->tlist);
382
383 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500384 cifs_dbg(FYI, "closing last open instance for inode %p\n",
385 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700386 /*
387 * In strict cache mode we need invalidate mapping on the last
388 * close because it may cause a error when we open this file
389 * again and get at least level II oplock.
390 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300391 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
392 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300393 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000394 }
395 spin_unlock(&cifs_file_list_lock);
396
Jeff Laytonad635942011-07-26 12:20:17 -0400397 cancel_work_sync(&cifs_file->oplock_break);
398
Steve Frenchcdff08e2010-10-21 22:46:14 +0000399 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700400 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400401 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700402
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400403 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700404 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400405 server->ops->close(xid, tcon, &cifs_file->fid);
406 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000407 }
408
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700409 cifs_del_pending_open(&open);
410
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700411 /*
412 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413 * is closed anyway.
414 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700415 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700416 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400418 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419 kfree(li);
420 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700421 list_del(&cifs_file->llist->llist);
422 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700423 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000424
425 cifs_put_tlink(cifs_file->tlink);
426 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100427 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000428 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400435 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400436 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700438 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000439 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400440 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700441 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300443 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700444 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700445 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400447 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400450 tlink = cifs_sb_tlink(cifs_sb);
451 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400452 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400453 return PTR_ERR(tlink);
454 }
455 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700456 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800458 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530460 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400461 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 }
463
Joe Perchesf96637b2013-05-04 22:12:25 -0500464 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000465 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000466
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700467 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000468 oplock = REQ_OPLOCK;
469 else
470 oplock = 0;
471
Steve French64cc2c62009-03-04 19:54:08 +0000472 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400473 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
474 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000475 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400476 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000477 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700478 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000479 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500480 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300481 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000482 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
483 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500484 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
485 tcon->ses->serverName,
486 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000487 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000488 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
489 (rc != -EOPNOTSUPP)) /* path not found or net err */
490 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700491 /*
492 * Else fallthrough to retry open the old way on network i/o
493 * or DFS errors.
494 */
Steve French276a74a2009-03-03 18:00:34 +0000495 }
496
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700497 if (server->ops->get_lease_key)
498 server->ops->get_lease_key(inode, &fid);
499
500 cifs_add_pending_open(&fid, tlink, &open);
501
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700503 if (server->ops->get_lease_key)
504 server->ops->get_lease_key(inode, &fid);
505
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300506 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700507 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700508 if (rc) {
509 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300510 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700511 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300512 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400513
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700514 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
515 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700516 if (server->ops->close)
517 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700518 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 rc = -ENOMEM;
520 goto out;
521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530523 cifs_fscache_set_inode_cookie(inode, file);
524
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300525 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700526 /*
527 * Time to set mode which we can not set earlier due to
528 * problems creating new read-only files.
529 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300530 struct cifs_unix_set_info_args args = {
531 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800532 .uid = INVALID_UID, /* no change */
533 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300534 .ctime = NO_CHANGE_64,
535 .atime = NO_CHANGE_64,
536 .mtime = NO_CHANGE_64,
537 .device = 0,
538 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700539 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
540 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 }
542
543out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400545 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400546 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 return rc;
548}
549
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400550static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
551
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700552/*
553 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400554 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700555 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400556static int
557cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400559 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
560 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
561 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 int rc = 0;
563
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400564 down_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400565 if (cinode->can_cache_brlcks) {
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400566 /* can cache locks - no need to relock */
567 up_read(&cinode->lock_sem);
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400568 return rc;
569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400571 if (cap_unix(tcon->ses) &&
572 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
573 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
574 rc = cifs_push_posix_locks(cfile);
575 else
576 rc = tcon->ses->server->ops->push_mand_locks(cfile);
577
Pavel Shilovsky689c3db2013-07-11 11:17:45 +0400578 up_read(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 return rc;
580}
581
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700582static int
583cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584{
585 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400586 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400587 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000589 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700590 struct TCP_Server_Info *server;
591 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000592 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700594 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500596 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400597 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400599 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700600 mutex_lock(&cfile->fh_mutex);
601 if (!cfile->invalidHandle) {
602 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530603 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400604 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530605 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
607
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700608 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610 tcon = tlink_tcon(cfile->tlink);
611 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000612
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700613 /*
614 * Can not grab rename sem here because various ops, including those
615 * that already have the rename sem can end up causing writepage to get
616 * called and if the server was down that means we end up here, and we
617 * can never tell if the caller already has the rename_sem.
618 */
619 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000621 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700622 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400623 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000624 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 }
626
Joe Perchesf96637b2013-05-04 22:12:25 -0500627 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
628 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300630 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 oplock = REQ_OPLOCK;
632 else
Steve French4b18f2a2008-04-29 00:06:05 +0000633 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400635 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000636 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400637 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400638 /*
639 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
640 * original open. Must mask them off for a reopen.
641 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700642 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400643 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400644
Jeff Layton2422f672010-06-16 13:40:16 -0400645 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700646 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400647 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000648 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500649 cifs_dbg(FYI, "posix reopen succeeded\n");
Steve French7fc8f4e2009-02-23 20:43:11 +0000650 goto reopen_success;
651 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700652 /*
653 * fallthrough to retry open the old way on errors, especially
654 * in the reconnect path it is important to retry hard
655 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000656 }
657
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700658 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000659
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500660 if (backup_cred(cifs_sb))
661 create_options |= CREATE_OPEN_BACKUP_INTENT;
662
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700663 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400664 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700665
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400666 oparms.tcon = tcon;
667 oparms.cifs_sb = cifs_sb;
668 oparms.desired_access = desired_access;
669 oparms.create_options = create_options;
670 oparms.disposition = disposition;
671 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400672 oparms.fid = &cfile->fid;
673 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400674
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700675 /*
676 * Can not refresh inode by passing in file_info buf to be returned by
677 * CIFSSMBOpen and then calling get_inode_info with returned buf since
678 * file might have write behind data that needs to be flushed and server
679 * version of file size can be stale. If we knew for sure that inode was
680 * not dirty locally we could do this.
681 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400682 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400683 if (rc == -ENOENT && oparms.reconnect == false) {
684 /* durable handle timeout is expired - open the file again */
685 rc = server->ops->open(xid, &oparms, &oplock, NULL);
686 /* indicate that we need to relock the file */
687 oparms.reconnect = true;
688 }
689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700691 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500692 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
693 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400694 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 }
Jeff Layton15886172010-10-15 15:33:59 -0400696
697reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700698 cfile->invalidHandle = false;
699 mutex_unlock(&cfile->fh_mutex);
700 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400701
702 if (can_flush) {
703 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400704 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400705
Jeff Layton15886172010-10-15 15:33:59 -0400706 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700707 rc = cifs_get_inode_info_unix(&inode, full_path,
708 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400709 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700710 rc = cifs_get_inode_info(&inode, full_path, NULL,
711 inode->i_sb, xid, NULL);
712 }
713 /*
714 * Else we are writing out data to server already and could deadlock if
715 * we tried to flush data, and since we do not know if we have data that
716 * would invalidate the current end of file on the server we can not go
717 * to the server to get the new inode info.
718 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300719
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400720 server->ops->set_fid(cfile, &cfile->fid, oplock);
721 if (oparms.reconnect)
722 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400723
724reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400726 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 return rc;
728}
729
730int cifs_close(struct inode *inode, struct file *file)
731{
Jeff Layton77970692011-04-05 16:23:47 -0700732 if (file->private_data != NULL) {
733 cifsFileInfo_put(file->private_data);
734 file->private_data = NULL;
735 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Steve Frenchcdff08e2010-10-21 22:46:14 +0000737 /* return code from the ->release op is always ignored */
738 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739}
740
741int cifs_closedir(struct inode *inode, struct file *file)
742{
743 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400744 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700745 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700746 struct cifs_tcon *tcon;
747 struct TCP_Server_Info *server;
748 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
Joe Perchesf96637b2013-05-04 22:12:25 -0500750 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700752 if (cfile == NULL)
753 return rc;
754
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400755 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700756 tcon = tlink_tcon(cfile->tlink);
757 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
Joe Perchesf96637b2013-05-04 22:12:25 -0500759 cifs_dbg(FYI, "Freeing private data in close dir\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700760 spin_lock(&cifs_file_list_lock);
761 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
762 cfile->invalidHandle = true;
763 spin_unlock(&cifs_file_list_lock);
764 if (server->ops->close_dir)
765 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
766 else
767 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500768 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700769 /* not much we can do if it fails anyway, ignore rc */
770 rc = 0;
771 } else
772 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700774 buf = cfile->srch_inf.ntwrk_buf_start;
775 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500776 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700777 cfile->srch_inf.ntwrk_buf_start = NULL;
778 if (cfile->srch_inf.smallBuf)
779 cifs_small_buf_release(buf);
780 else
781 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700783
784 cifs_put_tlink(cfile->tlink);
785 kfree(file->private_data);
786 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400788 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 return rc;
790}
791
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400792static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300793cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000794{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400795 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000796 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400797 if (!lock)
798 return lock;
799 lock->offset = offset;
800 lock->length = length;
801 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400802 lock->pid = current->tgid;
803 INIT_LIST_HEAD(&lock->blist);
804 init_waitqueue_head(&lock->block_q);
805 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400806}
807
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700808void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809cifs_del_lock_waiters(struct cifsLockInfo *lock)
810{
811 struct cifsLockInfo *li, *tmp;
812 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
813 list_del_init(&li->blist);
814 wake_up(&li->block_q);
815 }
816}
817
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400818#define CIFS_LOCK_OP 0
819#define CIFS_READ_OP 1
820#define CIFS_WRITE_OP 2
821
822/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400823static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700824cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
825 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400826 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300828 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700829 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300830 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400831
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700832 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400833 if (offset + length <= li->offset ||
834 offset >= li->offset + li->length)
835 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400836 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
837 server->ops->compare_fids(cfile, cur_cfile)) {
838 /* shared lock prevents write op through the same fid */
839 if (!(li->type & server->vals->shared_lock_type) ||
840 rw_check != CIFS_WRITE_OP)
841 continue;
842 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700843 if ((type & server->vals->shared_lock_type) &&
844 ((server->ops->compare_fids(cfile, cur_cfile) &&
845 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400846 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700847 if (conf_lock)
848 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700849 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400850 }
851 return false;
852}
853
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700854bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300855cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700856 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400857 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400858{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300859 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700860 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300861 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300862
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700863 list_for_each_entry(cur, &cinode->llist, llist) {
864 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700865 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300866 if (rc)
867 break;
868 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300869
870 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400871}
872
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300873/*
874 * Check if there is another lock that prevents us to set the lock (mandatory
875 * style). If such a lock exists, update the flock structure with its
876 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
877 * or leave it the same if we can't. Returns 0 if we don't need to request to
878 * the server or 1 otherwise.
879 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400880static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300881cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
882 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400883{
884 int rc = 0;
885 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300886 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300887 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400888 bool exist;
889
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700890 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400891
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300892 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400893 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400894 if (exist) {
895 flock->fl_start = conf_lock->offset;
896 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
897 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300898 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400899 flock->fl_type = F_RDLCK;
900 else
901 flock->fl_type = F_WRLCK;
902 } else if (!cinode->can_cache_brlcks)
903 rc = 1;
904 else
905 flock->fl_type = F_UNLCK;
906
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700907 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400908 return rc;
909}
910
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400911static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300912cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400913{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300914 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700915 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700916 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700917 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000918}
919
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300920/*
921 * Set the byte-range lock (mandatory style). Returns:
922 * 1) 0, if we set the lock and don't need to request to the server;
923 * 2) 1, if no locks prevent us but we need to request to the server;
924 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
925 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300927cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400928 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400929{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400930 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300931 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400932 bool exist;
933 int rc = 0;
934
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400935try_again:
936 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700937 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400938
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300939 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400940 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400941 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700942 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700943 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400944 return rc;
945 }
946
947 if (!exist)
948 rc = 1;
949 else if (!wait)
950 rc = -EACCES;
951 else {
952 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700953 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400954 rc = wait_event_interruptible(lock->block_q,
955 (lock->blist.prev == &lock->blist) &&
956 (lock->blist.next == &lock->blist));
957 if (!rc)
958 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700959 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400960 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400961 }
962
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700963 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400964 return rc;
965}
966
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300967/*
968 * Check if there is another lock that prevents us to set the lock (posix
969 * style). If such a lock exists, update the flock structure with its
970 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
971 * or leave it the same if we can't. Returns 0 if we don't need to request to
972 * the server or 1 otherwise.
973 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400974static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400975cifs_posix_lock_test(struct file *file, struct file_lock *flock)
976{
977 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500978 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400979 unsigned char saved_type = flock->fl_type;
980
Pavel Shilovsky50792762011-10-29 17:17:57 +0400981 if ((flock->fl_flags & FL_POSIX) == 0)
982 return 1;
983
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700984 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400985 posix_test_lock(file, flock);
986
987 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
988 flock->fl_type = saved_type;
989 rc = 1;
990 }
991
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700992 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400993 return rc;
994}
995
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300996/*
997 * Set the byte-range lock (posix style). Returns:
998 * 1) 0, if we set the lock and don't need to request to the server;
999 * 2) 1, if we need to request to the server;
1000 * 3) <0, if the error occurs while setting the lock.
1001 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001002static int
1003cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1004{
Al Viro496ad9a2013-01-23 17:07:38 -05001005 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001006 int rc = 1;
1007
1008 if ((flock->fl_flags & FL_POSIX) == 0)
1009 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001010
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001011try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001012 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001013 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001014 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001015 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001016 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001017
1018 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001019 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001020 if (rc == FILE_LOCK_DEFERRED) {
1021 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1022 if (!rc)
1023 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001024 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001025 }
Steve French9ebb3892012-04-01 13:52:54 -05001026 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001027}
1028
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001029int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001030cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001031{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001032 unsigned int xid;
1033 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001034 struct cifsLockInfo *li, *tmp;
1035 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001036 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001037 LOCKING_ANDX_RANGE *buf, *cur;
1038 int types[] = {LOCKING_ANDX_LARGE_FILES,
1039 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1040 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001041
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001042 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001043 tcon = tlink_tcon(cfile->tlink);
1044
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001045 /*
1046 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1047 * and check it for zero before using.
1048 */
1049 max_buf = tcon->ses->server->maxBuf;
1050 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001051 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001052 return -EINVAL;
1053 }
1054
1055 max_num = (max_buf - sizeof(struct smb_hdr)) /
1056 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001057 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1058 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001059 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001060 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001061 }
1062
1063 for (i = 0; i < 2; i++) {
1064 cur = buf;
1065 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001066 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001067 if (li->type != types[i])
1068 continue;
1069 cur->Pid = cpu_to_le16(li->pid);
1070 cur->LengthLow = cpu_to_le32((u32)li->length);
1071 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1072 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1073 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1074 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001075 stored_rc = cifs_lockv(xid, tcon,
1076 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001077 (__u8)li->type, 0, num,
1078 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001079 if (stored_rc)
1080 rc = stored_rc;
1081 cur = buf;
1082 num = 0;
1083 } else
1084 cur++;
1085 }
1086
1087 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001088 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001089 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001090 if (stored_rc)
1091 rc = stored_rc;
1092 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001093 }
1094
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001095 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001096 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001097 return rc;
1098}
1099
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001100/* copied from fs/locks.c with a name change */
1101#define cifs_for_each_lock(inode, lockp) \
1102 for (lockp = &inode->i_flock; *lockp != NULL; \
1103 lockp = &(*lockp)->fl_next)
1104
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001105struct lock_to_push {
1106 struct list_head llist;
1107 __u64 offset;
1108 __u64 length;
1109 __u32 pid;
1110 __u16 netfid;
1111 __u8 type;
1112};
1113
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001114static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001115cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001116{
Jeff Layton1c8c6012013-06-21 08:58:15 -04001117 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001118 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1119 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001120 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001121 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001122 struct list_head locks_to_send, *el;
1123 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001124 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001125
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001126 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001127
Jeff Layton1c8c6012013-06-21 08:58:15 -04001128 spin_lock(&inode->i_lock);
1129 cifs_for_each_lock(inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001130 if ((*before)->fl_flags & FL_POSIX)
1131 count++;
1132 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001133 spin_unlock(&inode->i_lock);
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001134
1135 INIT_LIST_HEAD(&locks_to_send);
1136
1137 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001138 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001139 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001140 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001141 */
1142 for (; i < count; i++) {
1143 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1144 if (!lck) {
1145 rc = -ENOMEM;
1146 goto err_out;
1147 }
1148 list_add_tail(&lck->llist, &locks_to_send);
1149 }
1150
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001151 el = locks_to_send.next;
Jeff Layton1c8c6012013-06-21 08:58:15 -04001152 spin_lock(&inode->i_lock);
1153 cifs_for_each_lock(inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001154 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001155 if ((flock->fl_flags & FL_POSIX) == 0)
1156 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001157 if (el == &locks_to_send) {
1158 /*
1159 * The list ended. We don't have enough allocated
1160 * structures - something is really wrong.
1161 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001162 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001163 break;
1164 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001165 length = 1 + flock->fl_end - flock->fl_start;
1166 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1167 type = CIFS_RDLCK;
1168 else
1169 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001170 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001171 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001172 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001173 lck->length = length;
1174 lck->type = type;
1175 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001176 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001177 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001178 spin_unlock(&inode->i_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001179
1180 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001181 int stored_rc;
1182
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001183 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001184 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001185 lck->type, 0);
1186 if (stored_rc)
1187 rc = stored_rc;
1188 list_del(&lck->llist);
1189 kfree(lck);
1190 }
1191
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001192out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001193 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001194 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001195err_out:
1196 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1197 list_del(&lck->llist);
1198 kfree(lck);
1199 }
1200 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001201}
1202
1203static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001204cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001205{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001206 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001207 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001208 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001209 int rc = 0;
1210
1211 /* we are going to update can_cache_brlcks here - need a write access */
1212 down_write(&cinode->lock_sem);
1213 if (!cinode->can_cache_brlcks) {
1214 up_write(&cinode->lock_sem);
1215 return rc;
1216 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001217
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001218 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001219 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1220 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001221 rc = cifs_push_posix_locks(cfile);
1222 else
1223 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001224
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001225 cinode->can_cache_brlcks = false;
1226 up_write(&cinode->lock_sem);
1227 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001228}
1229
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001230static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001231cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001232 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001234 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001235 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001236 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001237 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001238 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001239 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001242 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001243 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001244 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001245 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001246 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001247 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1248 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001249 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001251 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001252 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001253 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001254 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001255 *lock = 1;
1256 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001257 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001258 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001259 *unlock = 1;
1260 /* Check if unlock includes more than one lock range */
1261 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001262 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001263 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001264 *lock = 1;
1265 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001266 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001267 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001268 *lock = 1;
1269 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001270 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001271 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001272 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001274 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001275}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001277static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001278cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001279 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001280{
1281 int rc = 0;
1282 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001283 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1284 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001285 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001286 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001288 if (posix_lck) {
1289 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001290
1291 rc = cifs_posix_lock_test(file, flock);
1292 if (!rc)
1293 return rc;
1294
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001295 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001296 posix_lock_type = CIFS_RDLCK;
1297 else
1298 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001299 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001300 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001301 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return rc;
1303 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001304
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001305 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001306 if (!rc)
1307 return rc;
1308
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001309 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001310 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1311 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001312 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001313 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1314 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001315 flock->fl_type = F_UNLCK;
1316 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001317 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1318 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001319 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001320 }
1321
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001322 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001323 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001324 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001325 }
1326
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001327 type &= ~server->vals->exclusive_lock_type;
1328
1329 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1330 type | server->vals->shared_lock_type,
1331 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001332 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001333 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1334 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001335 flock->fl_type = F_RDLCK;
1336 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001337 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1338 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001339 } else
1340 flock->fl_type = F_WRLCK;
1341
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001342 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001343}
1344
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001345void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001346cifs_move_llist(struct list_head *source, struct list_head *dest)
1347{
1348 struct list_head *li, *tmp;
1349 list_for_each_safe(li, tmp, source)
1350 list_move(li, dest);
1351}
1352
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001353void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001354cifs_free_llist(struct list_head *llist)
1355{
1356 struct cifsLockInfo *li, *tmp;
1357 list_for_each_entry_safe(li, tmp, llist, llist) {
1358 cifs_del_lock_waiters(li);
1359 list_del(&li->llist);
1360 kfree(li);
1361 }
1362}
1363
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001364int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001365cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1366 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001367{
1368 int rc = 0, stored_rc;
1369 int types[] = {LOCKING_ANDX_LARGE_FILES,
1370 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1371 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001372 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001373 LOCKING_ANDX_RANGE *buf, *cur;
1374 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1375 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1376 struct cifsLockInfo *li, *tmp;
1377 __u64 length = 1 + flock->fl_end - flock->fl_start;
1378 struct list_head tmp_llist;
1379
1380 INIT_LIST_HEAD(&tmp_llist);
1381
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001382 /*
1383 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1384 * and check it for zero before using.
1385 */
1386 max_buf = tcon->ses->server->maxBuf;
1387 if (!max_buf)
1388 return -EINVAL;
1389
1390 max_num = (max_buf - sizeof(struct smb_hdr)) /
1391 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001392 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1393 if (!buf)
1394 return -ENOMEM;
1395
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001396 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001397 for (i = 0; i < 2; i++) {
1398 cur = buf;
1399 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001400 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001401 if (flock->fl_start > li->offset ||
1402 (flock->fl_start + length) <
1403 (li->offset + li->length))
1404 continue;
1405 if (current->tgid != li->pid)
1406 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001407 if (types[i] != li->type)
1408 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001409 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001410 /*
1411 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001412 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001413 */
1414 list_del(&li->llist);
1415 cifs_del_lock_waiters(li);
1416 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001417 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001418 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001419 cur->Pid = cpu_to_le16(li->pid);
1420 cur->LengthLow = cpu_to_le32((u32)li->length);
1421 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1422 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1423 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1424 /*
1425 * We need to save a lock here to let us add it again to
1426 * the file's list if the unlock range request fails on
1427 * the server.
1428 */
1429 list_move(&li->llist, &tmp_llist);
1430 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001431 stored_rc = cifs_lockv(xid, tcon,
1432 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001433 li->type, num, 0, buf);
1434 if (stored_rc) {
1435 /*
1436 * We failed on the unlock range
1437 * request - add all locks from the tmp
1438 * list to the head of the file's list.
1439 */
1440 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001441 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001442 rc = stored_rc;
1443 } else
1444 /*
1445 * The unlock range request succeed -
1446 * free the tmp list.
1447 */
1448 cifs_free_llist(&tmp_llist);
1449 cur = buf;
1450 num = 0;
1451 } else
1452 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001453 }
1454 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001455 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001456 types[i], num, 0, buf);
1457 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001458 cifs_move_llist(&tmp_llist,
1459 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001460 rc = stored_rc;
1461 } else
1462 cifs_free_llist(&tmp_llist);
1463 }
1464 }
1465
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001466 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001467 kfree(buf);
1468 return rc;
1469}
1470
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001471static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001472cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001473 bool wait_flag, bool posix_lck, int lock, int unlock,
1474 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001475{
1476 int rc = 0;
1477 __u64 length = 1 + flock->fl_end - flock->fl_start;
1478 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1479 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001480 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001481 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001482
1483 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001484 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001485
1486 rc = cifs_posix_lock_set(file, flock);
1487 if (!rc || rc < 0)
1488 return rc;
1489
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001490 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001491 posix_lock_type = CIFS_RDLCK;
1492 else
1493 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001494
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001495 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001496 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001497
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001498 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1499 current->tgid, flock->fl_start, length,
1500 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001501 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001502 }
1503
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001504 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001505 struct cifsLockInfo *lock;
1506
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001507 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001508 if (!lock)
1509 return -ENOMEM;
1510
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001511 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001512 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001513 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001514 return rc;
1515 }
1516 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001517 goto out;
1518
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001519 /*
1520 * Windows 7 server can delay breaking lease from read to None
1521 * if we set a byte-range lock on a file - break it explicitly
1522 * before sending the lock to the server to be sure the next
1523 * read won't conflict with non-overlapted locks due to
1524 * pagereading.
1525 */
1526 if (!CIFS_I(inode)->clientCanCacheAll &&
1527 CIFS_I(inode)->clientCanCacheRead) {
1528 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001529 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1530 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001531 CIFS_I(inode)->clientCanCacheRead = false;
1532 }
1533
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001534 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1535 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001536 if (rc) {
1537 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001538 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001539 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001540
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001541 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001542 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001543 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001544
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001545out:
1546 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001547 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001548 return rc;
1549}
1550
1551int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1552{
1553 int rc, xid;
1554 int lock = 0, unlock = 0;
1555 bool wait_flag = false;
1556 bool posix_lck = false;
1557 struct cifs_sb_info *cifs_sb;
1558 struct cifs_tcon *tcon;
1559 struct cifsInodeInfo *cinode;
1560 struct cifsFileInfo *cfile;
1561 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001562 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001563
1564 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001565 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001566
Joe Perchesf96637b2013-05-04 22:12:25 -05001567 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1568 cmd, flock->fl_flags, flock->fl_type,
1569 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001570
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001571 cfile = (struct cifsFileInfo *)file->private_data;
1572 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001573
1574 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1575 tcon->ses->server);
1576
1577 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001578 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001579 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001580
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001581 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001582 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1583 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1584 posix_lck = true;
1585 /*
1586 * BB add code here to normalize offset and length to account for
1587 * negative length which we can not accept over the wire.
1588 */
1589 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001590 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001591 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001592 return rc;
1593 }
1594
1595 if (!lock && !unlock) {
1596 /*
1597 * if no lock or unlock then nothing to do since we do not
1598 * know what it is
1599 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001600 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001601 return -EOPNOTSUPP;
1602 }
1603
1604 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1605 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001606 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 return rc;
1608}
1609
Jeff Layton597b0272012-03-23 14:40:56 -04001610/*
1611 * update the file size (if needed) after a write. Should be called with
1612 * the inode->i_lock held
1613 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001614void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001615cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1616 unsigned int bytes_written)
1617{
1618 loff_t end_of_write = offset + bytes_written;
1619
1620 if (end_of_write > cifsi->server_eof)
1621 cifsi->server_eof = end_of_write;
1622}
1623
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001624static ssize_t
1625cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1626 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627{
1628 int rc = 0;
1629 unsigned int bytes_written = 0;
1630 unsigned int total_written;
1631 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001632 struct cifs_tcon *tcon;
1633 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001634 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001635 struct dentry *dentry = open_file->dentry;
1636 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001637 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
Jeff Layton7da4b492010-10-15 15:34:00 -04001639 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
Joe Perchesf96637b2013-05-04 22:12:25 -05001641 cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n",
1642 write_size, *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001644 tcon = tlink_tcon(open_file->tlink);
1645 server = tcon->ses->server;
1646
1647 if (!server->ops->sync_write)
1648 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001649
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001650 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 for (total_written = 0; write_size > total_written;
1653 total_written += bytes_written) {
1654 rc = -EAGAIN;
1655 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001656 struct kvec iov[2];
1657 unsigned int len;
1658
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 /* we could deadlock if we called
1661 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001662 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001664 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 if (rc != 0)
1666 break;
1667 }
Steve French3e844692005-10-03 13:37:24 -07001668
Jeff Laytonca83ce32011-04-12 09:13:44 -04001669 len = min((size_t)cifs_sb->wsize,
1670 write_size - total_written);
1671 /* iov[0] is reserved for smb header */
1672 iov[1].iov_base = (char *)write_data + total_written;
1673 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001674 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001675 io_parms.tcon = tcon;
1676 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001677 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001678 rc = server->ops->sync_write(xid, open_file, &io_parms,
1679 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 }
1681 if (rc || (bytes_written == 0)) {
1682 if (total_written)
1683 break;
1684 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001685 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 return rc;
1687 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001688 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001689 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001690 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001691 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001692 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001693 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 }
1695
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001696 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Jeff Layton7da4b492010-10-15 15:34:00 -04001698 if (total_written > 0) {
1699 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001700 if (*offset > dentry->d_inode->i_size)
1701 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001702 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001704 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001705 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 return total_written;
1707}
1708
Jeff Layton6508d902010-09-29 19:51:11 -04001709struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1710 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001711{
1712 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001713 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1714
1715 /* only filter by fsuid on multiuser mounts */
1716 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1717 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001718
Jeff Layton44772882010-10-15 15:34:03 -04001719 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001720 /* we could simply get the first_list_entry since write-only entries
1721 are always at the end of the list but since the first entry might
1722 have a close pending, we go through the whole list */
1723 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001724 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001725 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001726 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001727 if (!open_file->invalidHandle) {
1728 /* found a good file */
1729 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001730 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001731 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001732 return open_file;
1733 } /* else might as well continue, and look for
1734 another, or simply have the caller reopen it
1735 again rather than trying to fix this handle */
1736 } else /* write only file */
1737 break; /* write only files are last so must be done */
1738 }
Jeff Layton44772882010-10-15 15:34:03 -04001739 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001740 return NULL;
1741}
Steve French630f3f0c2007-10-25 21:17:17 +00001742
Jeff Layton6508d902010-09-29 19:51:11 -04001743struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1744 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001745{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001746 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001747 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001748 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001749 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001750 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001751
Steve French60808232006-04-22 15:53:05 +00001752 /* Having a null inode here (because mapping->host was set to zero by
1753 the VFS or MM) should not happen but we had reports of on oops (due to
1754 it being zero) during stress testcases so we need to check for it */
1755
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001756 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001757 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001758 dump_stack();
1759 return NULL;
1760 }
1761
Jeff Laytond3892292010-11-02 16:22:50 -04001762 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1763
Jeff Layton6508d902010-09-29 19:51:11 -04001764 /* only filter by fsuid on multiuser mounts */
1765 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1766 fsuid_only = false;
1767
Jeff Layton44772882010-10-15 15:34:03 -04001768 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001769refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001770 if (refind > MAX_REOPEN_ATT) {
1771 spin_unlock(&cifs_file_list_lock);
1772 return NULL;
1773 }
Steve French6148a742005-10-05 12:23:19 -07001774 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001775 if (!any_available && open_file->pid != current->tgid)
1776 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001777 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001778 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001779 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001780 if (!open_file->invalidHandle) {
1781 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001782 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001783 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001784 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001785 } else {
1786 if (!inv_file)
1787 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001788 }
Steve French6148a742005-10-05 12:23:19 -07001789 }
1790 }
Jeff Layton2846d382008-09-22 21:33:33 -04001791 /* couldn't find useable FH with same pid, try any available */
1792 if (!any_available) {
1793 any_available = true;
1794 goto refind_writable;
1795 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001796
1797 if (inv_file) {
1798 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001799 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001800 }
1801
Jeff Layton44772882010-10-15 15:34:03 -04001802 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001803
1804 if (inv_file) {
1805 rc = cifs_reopen_file(inv_file, false);
1806 if (!rc)
1807 return inv_file;
1808 else {
1809 spin_lock(&cifs_file_list_lock);
1810 list_move_tail(&inv_file->flist,
1811 &cifs_inode->openFileList);
1812 spin_unlock(&cifs_file_list_lock);
1813 cifsFileInfo_put(inv_file);
1814 spin_lock(&cifs_file_list_lock);
1815 ++refind;
1816 goto refind_writable;
1817 }
1818 }
1819
Steve French6148a742005-10-05 12:23:19 -07001820 return NULL;
1821}
1822
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1824{
1825 struct address_space *mapping = page->mapping;
1826 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1827 char *write_data;
1828 int rc = -EFAULT;
1829 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001831 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
1833 if (!mapping || !mapping->host)
1834 return -EFAULT;
1835
1836 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837
1838 offset += (loff_t)from;
1839 write_data = kmap(page);
1840 write_data += from;
1841
1842 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1843 kunmap(page);
1844 return -EIO;
1845 }
1846
1847 /* racing with truncate? */
1848 if (offset > mapping->host->i_size) {
1849 kunmap(page);
1850 return 0; /* don't care */
1851 }
1852
1853 /* check to make sure that we are not extending the file */
1854 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001855 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
Jeff Layton6508d902010-09-29 19:51:11 -04001857 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001858 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001859 bytes_written = cifs_write(open_file, open_file->pid,
1860 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001861 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001863 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001864 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001865 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001866 else if (bytes_written < 0)
1867 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001868 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001869 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 rc = -EIO;
1871 }
1872
1873 kunmap(page);
1874 return rc;
1875}
1876
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001878 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001880 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1881 bool done = false, scanned = false, range_whole = false;
1882 pgoff_t end, index;
1883 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001884 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001885 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001886 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001887
Steve French37c0eb42005-10-05 14:50:29 -07001888 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001889 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001890 * one page at a time via cifs_writepage
1891 */
1892 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1893 return generic_writepages(mapping, wbc);
1894
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001895 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001896 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001897 end = -1;
1898 } else {
1899 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1900 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1901 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001902 range_whole = true;
1903 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001904 }
1905retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001906 while (!done && index <= end) {
1907 unsigned int i, nr_pages, found_pages;
1908 pgoff_t next = 0, tofind;
1909 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001910
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001911 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1912 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001913
Jeff Laytonc2e87642012-03-23 14:40:55 -04001914 wdata = cifs_writedata_alloc((unsigned int)tofind,
1915 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001916 if (!wdata) {
1917 rc = -ENOMEM;
1918 break;
1919 }
1920
1921 /*
1922 * find_get_pages_tag seems to return a max of 256 on each
1923 * iteration, so we must call it several times in order to
1924 * fill the array or the wsize is effectively limited to
1925 * 256 * PAGE_CACHE_SIZE.
1926 */
1927 found_pages = 0;
1928 pages = wdata->pages;
1929 do {
1930 nr_pages = find_get_pages_tag(mapping, &index,
1931 PAGECACHE_TAG_DIRTY,
1932 tofind, pages);
1933 found_pages += nr_pages;
1934 tofind -= nr_pages;
1935 pages += nr_pages;
1936 } while (nr_pages && tofind && index <= end);
1937
1938 if (found_pages == 0) {
1939 kref_put(&wdata->refcount, cifs_writedata_release);
1940 break;
1941 }
1942
1943 nr_pages = 0;
1944 for (i = 0; i < found_pages; i++) {
1945 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001946 /*
1947 * At this point we hold neither mapping->tree_lock nor
1948 * lock on the page itself: the page may be truncated or
1949 * invalidated (changing page->mapping to NULL), or even
1950 * swizzled back from swapper_space to tmpfs file
1951 * mapping
1952 */
1953
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001954 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001955 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001956 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001957 break;
1958
1959 if (unlikely(page->mapping != mapping)) {
1960 unlock_page(page);
1961 break;
1962 }
1963
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001964 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001965 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001966 unlock_page(page);
1967 break;
1968 }
1969
1970 if (next && (page->index != next)) {
1971 /* Not next consecutive page */
1972 unlock_page(page);
1973 break;
1974 }
1975
1976 if (wbc->sync_mode != WB_SYNC_NONE)
1977 wait_on_page_writeback(page);
1978
1979 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001980 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001981 unlock_page(page);
1982 break;
1983 }
Steve French84d2f072005-10-12 15:32:05 -07001984
Linus Torvaldscb876f42006-12-23 16:19:07 -08001985 /*
1986 * This actually clears the dirty bit in the radix tree.
1987 * See cifs_writepage() for more commentary.
1988 */
1989 set_page_writeback(page);
1990
Jeff Layton3a98b862012-11-26 09:48:41 -05001991 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001992 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001993 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001994 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001995 break;
1996 }
1997
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001998 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001999 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002000 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07002001 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002002
2003 /* reset index to refind any pages skipped */
2004 if (nr_pages == 0)
2005 index = wdata->pages[0]->index + 1;
2006
2007 /* put any pages we aren't going to use */
2008 for (i = nr_pages; i < found_pages; i++) {
2009 page_cache_release(wdata->pages[i]);
2010 wdata->pages[i] = NULL;
2011 }
2012
2013 /* nothing to write? */
2014 if (nr_pages == 0) {
2015 kref_put(&wdata->refcount, cifs_writedata_release);
2016 continue;
2017 }
2018
2019 wdata->sync_mode = wbc->sync_mode;
2020 wdata->nr_pages = nr_pages;
2021 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07002022 wdata->pagesz = PAGE_CACHE_SIZE;
2023 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002024 min(i_size_read(mapping->host) -
2025 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002026 (loff_t)PAGE_CACHE_SIZE);
2027 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2028 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002029
2030 do {
2031 if (wdata->cfile != NULL)
2032 cifsFileInfo_put(wdata->cfile);
2033 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2034 false);
2035 if (!wdata->cfile) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002036 cifs_dbg(VFS, "No writable handles for inode\n");
Steve French23e7dd72005-10-20 13:44:56 -07002037 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002038 break;
Steve French37c0eb42005-10-05 14:50:29 -07002039 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002040 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002041 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2042 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002043 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002044
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002045 for (i = 0; i < nr_pages; ++i)
2046 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002047
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002048 /* send failure -- clean up the mess */
2049 if (rc != 0) {
2050 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002051 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002052 redirty_page_for_writepage(wbc,
2053 wdata->pages[i]);
2054 else
2055 SetPageError(wdata->pages[i]);
2056 end_page_writeback(wdata->pages[i]);
2057 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002058 }
Jeff Layton941b8532011-01-11 07:24:01 -05002059 if (rc != -EAGAIN)
2060 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002061 }
2062 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002063
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002064 wbc->nr_to_write -= nr_pages;
2065 if (wbc->nr_to_write <= 0)
2066 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002067
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002068 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002069 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002070
Steve French37c0eb42005-10-05 14:50:29 -07002071 if (!scanned && !done) {
2072 /*
2073 * We hit the last page and there is more work to be done: wrap
2074 * back to the start of the file
2075 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002076 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002077 index = 0;
2078 goto retry;
2079 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002080
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002081 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002082 mapping->writeback_index = index;
2083
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 return rc;
2085}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002087static int
2088cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002090 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002091 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002093 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094/* BB add check for wbc flags */
2095 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002096 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002097 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002098
2099 /*
2100 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2101 *
2102 * A writepage() implementation always needs to do either this,
2103 * or re-dirty the page with "redirty_page_for_writepage()" in
2104 * the case of a failure.
2105 *
2106 * Just unlocking the page will cause the radix tree tag-bits
2107 * to fail to update with the state of the page correctly.
2108 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002109 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002110retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002112 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2113 goto retry_write;
2114 else if (rc == -EAGAIN)
2115 redirty_page_for_writepage(wbc, page);
2116 else if (rc != 0)
2117 SetPageError(page);
2118 else
2119 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002120 end_page_writeback(page);
2121 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002122 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 return rc;
2124}
2125
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002126static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2127{
2128 int rc = cifs_writepage_locked(page, wbc);
2129 unlock_page(page);
2130 return rc;
2131}
2132
Nick Piggind9414772008-09-24 11:32:59 -04002133static int cifs_write_end(struct file *file, struct address_space *mapping,
2134 loff_t pos, unsigned len, unsigned copied,
2135 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136{
Nick Piggind9414772008-09-24 11:32:59 -04002137 int rc;
2138 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002139 struct cifsFileInfo *cfile = file->private_data;
2140 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2141 __u32 pid;
2142
2143 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2144 pid = cfile->pid;
2145 else
2146 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
Joe Perchesf96637b2013-05-04 22:12:25 -05002148 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002149 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002150
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002151 if (PageChecked(page)) {
2152 if (copied == len)
2153 SetPageUptodate(page);
2154 ClearPageChecked(page);
2155 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002156 SetPageUptodate(page);
2157
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002159 char *page_data;
2160 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002161 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002162
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002163 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 /* this is probably better than directly calling
2165 partialpage_write since in this function the file handle is
2166 known which we might as well leverage */
2167 /* BB check if anything else missing out of ppw
2168 such as updating last write time */
2169 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002170 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002171 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002173
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002174 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002175 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002176 rc = copied;
2177 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002178 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 }
2180
Nick Piggind9414772008-09-24 11:32:59 -04002181 if (rc > 0) {
2182 spin_lock(&inode->i_lock);
2183 if (pos > inode->i_size)
2184 i_size_write(inode, pos);
2185 spin_unlock(&inode->i_lock);
2186 }
2187
2188 unlock_page(page);
2189 page_cache_release(page);
2190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 return rc;
2192}
2193
Josef Bacik02c24a82011-07-16 20:44:56 -04002194int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2195 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002197 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002199 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002200 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002201 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002202 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002203 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Josef Bacik02c24a82011-07-16 20:44:56 -04002205 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2206 if (rc)
2207 return rc;
2208 mutex_lock(&inode->i_mutex);
2209
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002210 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Joe Perchesf96637b2013-05-04 22:12:25 -05002212 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2213 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002214
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002215 if (!CIFS_I(inode)->clientCanCacheRead) {
2216 rc = cifs_invalidate_mapping(inode);
2217 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002218 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002219 rc = 0; /* don't care about it in fsync */
2220 }
2221 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002222
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002223 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002224 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2225 server = tcon->ses->server;
2226 if (server->ops->flush)
2227 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2228 else
2229 rc = -ENOSYS;
2230 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002231
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002232 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002233 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002234 return rc;
2235}
2236
Josef Bacik02c24a82011-07-16 20:44:56 -04002237int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002238{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002239 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002240 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002241 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002242 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002243 struct cifsFileInfo *smbfile = file->private_data;
2244 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002245 struct inode *inode = file->f_mapping->host;
2246
2247 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2248 if (rc)
2249 return rc;
2250 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002251
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002252 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002253
Joe Perchesf96637b2013-05-04 22:12:25 -05002254 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2255 file->f_path.dentry->d_name.name, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002256
2257 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002258 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2259 server = tcon->ses->server;
2260 if (server->ops->flush)
2261 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2262 else
2263 rc = -ENOSYS;
2264 }
Steve Frenchb298f222009-02-21 21:17:43 +00002265
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002266 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002267 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 return rc;
2269}
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271/*
2272 * As file closes, flush all cached write data for this inode checking
2273 * for write behind errors.
2274 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002275int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276{
Al Viro496ad9a2013-01-23 17:07:38 -05002277 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 int rc = 0;
2279
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002280 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002281 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002282
Joe Perchesf96637b2013-05-04 22:12:25 -05002283 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284
2285 return rc;
2286}
2287
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002288static int
2289cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2290{
2291 int rc = 0;
2292 unsigned long i;
2293
2294 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002295 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002296 if (!pages[i]) {
2297 /*
2298 * save number of pages we have already allocated and
2299 * return with ENOMEM error
2300 */
2301 num_pages = i;
2302 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002303 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002304 }
2305 }
2306
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002307 if (rc) {
2308 for (i = 0; i < num_pages; i++)
2309 put_page(pages[i]);
2310 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002311 return rc;
2312}
2313
2314static inline
2315size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2316{
2317 size_t num_pages;
2318 size_t clen;
2319
2320 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002321 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002322
2323 if (cur_len)
2324 *cur_len = clen;
2325
2326 return num_pages;
2327}
2328
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002329static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002330cifs_uncached_writev_complete(struct work_struct *work)
2331{
2332 int i;
2333 struct cifs_writedata *wdata = container_of(work,
2334 struct cifs_writedata, work);
2335 struct inode *inode = wdata->cfile->dentry->d_inode;
2336 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2337
2338 spin_lock(&inode->i_lock);
2339 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2340 if (cifsi->server_eof > inode->i_size)
2341 i_size_write(inode, cifsi->server_eof);
2342 spin_unlock(&inode->i_lock);
2343
2344 complete(&wdata->done);
2345
2346 if (wdata->result != -EAGAIN) {
2347 for (i = 0; i < wdata->nr_pages; i++)
2348 put_page(wdata->pages[i]);
2349 }
2350
2351 kref_put(&wdata->refcount, cifs_writedata_release);
2352}
2353
2354/* attempt to send write to server, retry on any -EAGAIN errors */
2355static int
2356cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2357{
2358 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002359 struct TCP_Server_Info *server;
2360
2361 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002362
2363 do {
2364 if (wdata->cfile->invalidHandle) {
2365 rc = cifs_reopen_file(wdata->cfile, false);
2366 if (rc != 0)
2367 continue;
2368 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002369 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002370 } while (rc == -EAGAIN);
2371
2372 return rc;
2373}
2374
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002375static ssize_t
2376cifs_iovec_write(struct file *file, const struct iovec *iov,
2377 unsigned long nr_segs, loff_t *poffset)
2378{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002379 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002380 size_t copied, len, cur_len;
2381 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002382 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002383 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002384 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002385 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002386 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002387 struct cifs_writedata *wdata, *tmp;
2388 struct list_head wdata_list;
2389 int rc;
2390 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002391
2392 len = iov_length(iov, nr_segs);
2393 if (!len)
2394 return 0;
2395
2396 rc = generic_write_checks(file, poffset, &len, 0);
2397 if (rc)
2398 return rc;
2399
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002400 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002401 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002402 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002403 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002404
2405 if (!tcon->ses->server->ops->async_writev)
2406 return -ENOSYS;
2407
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002408 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002409
2410 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2411 pid = open_file->pid;
2412 else
2413 pid = current->tgid;
2414
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002415 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002416 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002417 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002418
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002419 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2420 wdata = cifs_writedata_alloc(nr_pages,
2421 cifs_uncached_writev_complete);
2422 if (!wdata) {
2423 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002424 break;
2425 }
2426
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002427 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2428 if (rc) {
2429 kfree(wdata);
2430 break;
2431 }
2432
2433 save_len = cur_len;
2434 for (i = 0; i < nr_pages; i++) {
2435 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2436 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2437 0, copied);
2438 cur_len -= copied;
2439 iov_iter_advance(&it, copied);
2440 }
2441 cur_len = save_len - cur_len;
2442
2443 wdata->sync_mode = WB_SYNC_ALL;
2444 wdata->nr_pages = nr_pages;
2445 wdata->offset = (__u64)offset;
2446 wdata->cfile = cifsFileInfo_get(open_file);
2447 wdata->pid = pid;
2448 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002449 wdata->pagesz = PAGE_SIZE;
2450 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002451 rc = cifs_uncached_retry_writev(wdata);
2452 if (rc) {
2453 kref_put(&wdata->refcount, cifs_writedata_release);
2454 break;
2455 }
2456
2457 list_add_tail(&wdata->list, &wdata_list);
2458 offset += cur_len;
2459 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002460 } while (len > 0);
2461
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002462 /*
2463 * If at least one write was successfully sent, then discard any rc
2464 * value from the later writes. If the other write succeeds, then
2465 * we'll end up returning whatever was written. If it fails, then
2466 * we'll get a new rc value from that.
2467 */
2468 if (!list_empty(&wdata_list))
2469 rc = 0;
2470
2471 /*
2472 * Wait for and collect replies for any successful sends in order of
2473 * increasing offset. Once an error is hit or we get a fatal signal
2474 * while waiting, then return without waiting for any more replies.
2475 */
2476restart_loop:
2477 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2478 if (!rc) {
2479 /* FIXME: freezable too? */
2480 rc = wait_for_completion_killable(&wdata->done);
2481 if (rc)
2482 rc = -EINTR;
2483 else if (wdata->result)
2484 rc = wdata->result;
2485 else
2486 total_written += wdata->bytes;
2487
2488 /* resend call if it's a retryable error */
2489 if (rc == -EAGAIN) {
2490 rc = cifs_uncached_retry_writev(wdata);
2491 goto restart_loop;
2492 }
2493 }
2494 list_del_init(&wdata->list);
2495 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002496 }
2497
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002498 if (total_written > 0)
2499 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002500
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002501 cifs_stats_bytes_written(tcon, total_written);
2502 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002503}
2504
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002505ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002506 unsigned long nr_segs, loff_t pos)
2507{
2508 ssize_t written;
2509 struct inode *inode;
2510
Al Viro496ad9a2013-01-23 17:07:38 -05002511 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002512
2513 /*
2514 * BB - optimize the way when signing is disabled. We can drop this
2515 * extra memory-to-memory copying and use iovec buffers for constructing
2516 * write request.
2517 */
2518
2519 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2520 if (written > 0) {
2521 CIFS_I(inode)->invalid_mapping = true;
2522 iocb->ki_pos = pos;
2523 }
2524
2525 return written;
2526}
2527
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002528static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002529cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2530 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002531{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002532 struct file *file = iocb->ki_filp;
2533 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2534 struct inode *inode = file->f_mapping->host;
2535 struct cifsInodeInfo *cinode = CIFS_I(inode);
2536 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2537 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002538
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002539 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002540
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002541 /*
2542 * We need to hold the sem to be sure nobody modifies lock list
2543 * with a brlock that prevents writing.
2544 */
2545 down_read(&cinode->lock_sem);
2546 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2547 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002548 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002549 mutex_lock(&inode->i_mutex);
2550 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002551 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002552 mutex_unlock(&inode->i_mutex);
2553 }
2554
2555 if (rc > 0 || rc == -EIOCBQUEUED) {
2556 ssize_t err;
2557
2558 err = generic_write_sync(file, pos, rc);
2559 if (err < 0 && rc > 0)
2560 rc = err;
2561 }
2562
2563 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002564 return rc;
2565}
2566
2567ssize_t
2568cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2569 unsigned long nr_segs, loff_t pos)
2570{
Al Viro496ad9a2013-01-23 17:07:38 -05002571 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002572 struct cifsInodeInfo *cinode = CIFS_I(inode);
2573 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2574 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2575 iocb->ki_filp->private_data;
2576 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002577 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002578
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002579 if (cinode->clientCanCacheAll) {
2580 if (cap_unix(tcon->ses) &&
2581 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2582 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2583 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2584 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002585 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002586 /*
2587 * For non-oplocked files in strict cache mode we need to write the data
2588 * to the server exactly from the pos to pos+len-1 rather than flush all
2589 * affected pages because it may cause a error with mandatory locks on
2590 * these pages but not on the region from pos to ppos+len-1.
2591 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002592 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2593 if (written > 0 && cinode->clientCanCacheRead) {
2594 /*
2595 * Windows 7 server can delay breaking level2 oplock if a write
2596 * request comes - break it on the client to prevent reading
2597 * an old data.
2598 */
2599 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002600 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2601 inode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002602 cinode->clientCanCacheRead = false;
2603 }
2604 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002605}
2606
Jeff Layton0471ca32012-05-16 07:13:16 -04002607static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002608cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002609{
2610 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002611
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002612 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2613 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002614 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002615 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002616 INIT_LIST_HEAD(&rdata->list);
2617 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002618 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002619 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002620
Jeff Layton0471ca32012-05-16 07:13:16 -04002621 return rdata;
2622}
2623
Jeff Layton6993f742012-05-16 07:13:17 -04002624void
2625cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002626{
Jeff Layton6993f742012-05-16 07:13:17 -04002627 struct cifs_readdata *rdata = container_of(refcount,
2628 struct cifs_readdata, refcount);
2629
2630 if (rdata->cfile)
2631 cifsFileInfo_put(rdata->cfile);
2632
Jeff Layton0471ca32012-05-16 07:13:16 -04002633 kfree(rdata);
2634}
2635
Jeff Layton2a1bb132012-05-16 07:13:17 -04002636static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002637cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002638{
2639 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002640 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002641 unsigned int i;
2642
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002643 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002644 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2645 if (!page) {
2646 rc = -ENOMEM;
2647 break;
2648 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002649 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002650 }
2651
2652 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002653 for (i = 0; i < nr_pages; i++) {
2654 put_page(rdata->pages[i]);
2655 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002656 }
2657 }
2658 return rc;
2659}
2660
2661static void
2662cifs_uncached_readdata_release(struct kref *refcount)
2663{
Jeff Layton1c892542012-05-16 07:13:17 -04002664 struct cifs_readdata *rdata = container_of(refcount,
2665 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002666 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002667
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002668 for (i = 0; i < rdata->nr_pages; i++) {
2669 put_page(rdata->pages[i]);
2670 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002671 }
2672 cifs_readdata_release(refcount);
2673}
2674
2675static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002676cifs_retry_async_readv(struct cifs_readdata *rdata)
2677{
2678 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002679 struct TCP_Server_Info *server;
2680
2681 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002682
2683 do {
2684 if (rdata->cfile->invalidHandle) {
2685 rc = cifs_reopen_file(rdata->cfile, true);
2686 if (rc != 0)
2687 continue;
2688 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002689 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002690 } while (rc == -EAGAIN);
2691
2692 return rc;
2693}
2694
Jeff Layton1c892542012-05-16 07:13:17 -04002695/**
2696 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2697 * @rdata: the readdata response with list of pages holding data
2698 * @iov: vector in which we should copy the data
2699 * @nr_segs: number of segments in vector
2700 * @offset: offset into file of the first iovec
2701 * @copied: used to return the amount of data copied to the iov
2702 *
2703 * This function copies data from a list of pages in a readdata response into
2704 * an array of iovecs. It will first calculate where the data should go
2705 * based on the info in the readdata and then copy the data into that spot.
2706 */
2707static ssize_t
2708cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2709 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2710{
2711 int rc = 0;
2712 struct iov_iter ii;
2713 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002714 ssize_t remaining = rdata->bytes;
2715 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002716 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002717
2718 /* set up iov_iter and advance to the correct offset */
2719 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2720 iov_iter_advance(&ii, pos);
2721
2722 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002723 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002724 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002725 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002726
2727 /* copy a whole page or whatever's left */
2728 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2729
2730 /* ...but limit it to whatever space is left in the iov */
2731 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2732
2733 /* go while there's data to be copied and no errors */
2734 if (copy && !rc) {
2735 pdata = kmap(page);
2736 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2737 (int)copy);
2738 kunmap(page);
2739 if (!rc) {
2740 *copied += copy;
2741 remaining -= copy;
2742 iov_iter_advance(&ii, copy);
2743 }
2744 }
Jeff Layton1c892542012-05-16 07:13:17 -04002745 }
2746
2747 return rc;
2748}
2749
2750static void
2751cifs_uncached_readv_complete(struct work_struct *work)
2752{
2753 struct cifs_readdata *rdata = container_of(work,
2754 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002755
2756 complete(&rdata->done);
2757 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2758}
2759
2760static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002761cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2762 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002763{
Jeff Layton8321fec2012-09-19 06:22:32 -07002764 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002765 unsigned int i;
2766 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002767 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002768
Jeff Layton8321fec2012-09-19 06:22:32 -07002769 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002770 for (i = 0; i < nr_pages; i++) {
2771 struct page *page = rdata->pages[i];
2772
Jeff Layton8321fec2012-09-19 06:22:32 -07002773 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002774 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002775 iov.iov_base = kmap(page);
2776 iov.iov_len = PAGE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05002777 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2778 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002779 len -= PAGE_SIZE;
2780 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002781 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002782 iov.iov_base = kmap(page);
2783 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05002784 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2785 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002786 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2787 rdata->tailsz = len;
2788 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002789 } else {
2790 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002791 rdata->pages[i] = NULL;
2792 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002793 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002794 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002795 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002796
2797 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2798 kunmap(page);
2799 if (result < 0)
2800 break;
2801
2802 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002803 }
2804
Jeff Layton8321fec2012-09-19 06:22:32 -07002805 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002806}
2807
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002808static ssize_t
2809cifs_iovec_read(struct file *file, const struct iovec *iov,
2810 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
Jeff Layton1c892542012-05-16 07:13:17 -04002812 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002813 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002814 ssize_t total_read = 0;
2815 loff_t offset = *poffset;
2816 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002818 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002820 struct cifs_readdata *rdata, *tmp;
2821 struct list_head rdata_list;
2822 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002823
2824 if (!nr_segs)
2825 return 0;
2826
2827 len = iov_length(iov, nr_segs);
2828 if (!len)
2829 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Jeff Layton1c892542012-05-16 07:13:17 -04002831 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002832 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002833 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002834 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002836 if (!tcon->ses->server->ops->async_readv)
2837 return -ENOSYS;
2838
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002839 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2840 pid = open_file->pid;
2841 else
2842 pid = current->tgid;
2843
Steve Frenchad7a2922008-02-07 23:25:02 +00002844 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05002845 cifs_dbg(FYI, "attempting read on write only file instance\n");
Steve Frenchad7a2922008-02-07 23:25:02 +00002846
Jeff Layton1c892542012-05-16 07:13:17 -04002847 do {
2848 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2849 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002850
Jeff Layton1c892542012-05-16 07:13:17 -04002851 /* allocate a readdata struct */
2852 rdata = cifs_readdata_alloc(npages,
2853 cifs_uncached_readv_complete);
2854 if (!rdata) {
2855 rc = -ENOMEM;
2856 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002858
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002859 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002860 if (rc)
2861 goto error;
2862
2863 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002864 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002865 rdata->offset = offset;
2866 rdata->bytes = cur_len;
2867 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002868 rdata->pagesz = PAGE_SIZE;
2869 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002870
2871 rc = cifs_retry_async_readv(rdata);
2872error:
2873 if (rc) {
2874 kref_put(&rdata->refcount,
2875 cifs_uncached_readdata_release);
2876 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 }
Jeff Layton1c892542012-05-16 07:13:17 -04002878
2879 list_add_tail(&rdata->list, &rdata_list);
2880 offset += cur_len;
2881 len -= cur_len;
2882 } while (len > 0);
2883
2884 /* if at least one read request send succeeded, then reset rc */
2885 if (!list_empty(&rdata_list))
2886 rc = 0;
2887
2888 /* the loop below should proceed in the order of increasing offsets */
2889restart_loop:
2890 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2891 if (!rc) {
2892 ssize_t copied;
2893
2894 /* FIXME: freezable sleep too? */
2895 rc = wait_for_completion_killable(&rdata->done);
2896 if (rc)
2897 rc = -EINTR;
2898 else if (rdata->result)
2899 rc = rdata->result;
2900 else {
2901 rc = cifs_readdata_to_iov(rdata, iov,
2902 nr_segs, *poffset,
2903 &copied);
2904 total_read += copied;
2905 }
2906
2907 /* resend call if it's a retryable error */
2908 if (rc == -EAGAIN) {
2909 rc = cifs_retry_async_readv(rdata);
2910 goto restart_loop;
2911 }
2912 }
2913 list_del_init(&rdata->list);
2914 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002915 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002916
Jeff Layton1c892542012-05-16 07:13:17 -04002917 cifs_stats_bytes_read(tcon, total_read);
2918 *poffset += total_read;
2919
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002920 /* mask nodata case */
2921 if (rc == -ENODATA)
2922 rc = 0;
2923
Jeff Layton1c892542012-05-16 07:13:17 -04002924 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925}
2926
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002927ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002928 unsigned long nr_segs, loff_t pos)
2929{
2930 ssize_t read;
2931
2932 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2933 if (read > 0)
2934 iocb->ki_pos = pos;
2935
2936 return read;
2937}
2938
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002939ssize_t
2940cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2941 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002942{
Al Viro496ad9a2013-01-23 17:07:38 -05002943 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002944 struct cifsInodeInfo *cinode = CIFS_I(inode);
2945 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2946 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2947 iocb->ki_filp->private_data;
2948 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2949 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002950
2951 /*
2952 * In strict cache mode we need to read from the server all the time
2953 * if we don't have level II oplock because the server can delay mtime
2954 * change - so we can't make a decision about inode invalidating.
2955 * And we can also fail with pagereading if there are mandatory locks
2956 * on pages affected by this read but not on the region from pos to
2957 * pos+len-1.
2958 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002959 if (!cinode->clientCanCacheRead)
2960 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002961
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002962 if (cap_unix(tcon->ses) &&
2963 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2964 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2965 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2966
2967 /*
2968 * We need to hold the sem to be sure nobody modifies lock list
2969 * with a brlock that prevents reading.
2970 */
2971 down_read(&cinode->lock_sem);
2972 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2973 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002974 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002975 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2976 up_read(&cinode->lock_sem);
2977 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002978}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002980static ssize_t
2981cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982{
2983 int rc = -EACCES;
2984 unsigned int bytes_read = 0;
2985 unsigned int total_read;
2986 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002987 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002989 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002990 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002991 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002992 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002994 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002995 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002996 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002998 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002999 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003001 /* FIXME: set up handlers for larger reads and/or convert to async */
3002 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3003
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303005 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003006 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303007 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003009 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003010 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003011 server = tcon->ses->server;
3012
3013 if (!server->ops->sync_read) {
3014 free_xid(xid);
3015 return -ENOSYS;
3016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003018 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3019 pid = open_file->pid;
3020 else
3021 pid = current->tgid;
3022
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003024 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003026 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3027 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003028 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003029 /*
3030 * For windows me and 9x we do not want to request more than it
3031 * negotiated since it will refuse the read then.
3032 */
3033 if ((tcon->ses) && !(tcon->ses->capabilities &
3034 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003035 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003036 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003037 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 rc = -EAGAIN;
3039 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003040 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003041 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 if (rc != 0)
3043 break;
3044 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003045 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003046 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003047 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003048 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003049 rc = server->ops->sync_read(xid, open_file, &io_parms,
3050 &bytes_read, &cur_offset,
3051 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 }
3053 if (rc || (bytes_read == 0)) {
3054 if (total_read) {
3055 break;
3056 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003057 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 return rc;
3059 }
3060 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003061 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003062 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063 }
3064 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003065 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066 return total_read;
3067}
3068
Jeff Laytonca83ce32011-04-12 09:13:44 -04003069/*
3070 * If the page is mmap'ed into a process' page tables, then we need to make
3071 * sure that it doesn't change while being written back.
3072 */
3073static int
3074cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3075{
3076 struct page *page = vmf->page;
3077
3078 lock_page(page);
3079 return VM_FAULT_LOCKED;
3080}
3081
3082static struct vm_operations_struct cifs_file_vm_ops = {
3083 .fault = filemap_fault,
3084 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003085 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003086};
3087
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003088int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3089{
3090 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003091 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003092
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003093 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003094
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003095 if (!CIFS_I(inode)->clientCanCacheRead) {
3096 rc = cifs_invalidate_mapping(inode);
3097 if (rc)
3098 return rc;
3099 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003100
3101 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003102 if (rc == 0)
3103 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003104 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003105 return rc;
3106}
3107
Linus Torvalds1da177e2005-04-16 15:20:36 -07003108int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3109{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 int rc, xid;
3111
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003112 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003113 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003115 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3116 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003117 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118 return rc;
3119 }
3120 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003121 if (rc == 0)
3122 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003123 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 return rc;
3125}
3126
Jeff Layton0471ca32012-05-16 07:13:16 -04003127static void
3128cifs_readv_complete(struct work_struct *work)
3129{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003130 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003131 struct cifs_readdata *rdata = container_of(work,
3132 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003133
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003134 for (i = 0; i < rdata->nr_pages; i++) {
3135 struct page *page = rdata->pages[i];
3136
Jeff Layton0471ca32012-05-16 07:13:16 -04003137 lru_cache_add_file(page);
3138
3139 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003140 flush_dcache_page(page);
3141 SetPageUptodate(page);
3142 }
3143
3144 unlock_page(page);
3145
3146 if (rdata->result == 0)
3147 cifs_readpage_to_fscache(rdata->mapping->host, page);
3148
3149 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003150 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003151 }
Jeff Layton6993f742012-05-16 07:13:17 -04003152 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003153}
3154
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003155static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003156cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3157 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003158{
Jeff Layton8321fec2012-09-19 06:22:32 -07003159 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003160 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003161 u64 eof;
3162 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003163 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003164 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003165
3166 /* determine the eof that the server (probably) has */
3167 eof = CIFS_I(rdata->mapping->host)->server_eof;
3168 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003169 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003170
Jeff Layton8321fec2012-09-19 06:22:32 -07003171 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003172 for (i = 0; i < nr_pages; i++) {
3173 struct page *page = rdata->pages[i];
3174
Jeff Layton8321fec2012-09-19 06:22:32 -07003175 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003176 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003177 iov.iov_base = kmap(page);
3178 iov.iov_len = PAGE_CACHE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05003179 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3180 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003181 len -= PAGE_CACHE_SIZE;
3182 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003183 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003184 iov.iov_base = kmap(page);
3185 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05003186 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3187 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003188 memset(iov.iov_base + len,
3189 '\0', PAGE_CACHE_SIZE - len);
3190 rdata->tailsz = len;
3191 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003192 } else if (page->index > eof_index) {
3193 /*
3194 * The VFS will not try to do readahead past the
3195 * i_size, but it's possible that we have outstanding
3196 * writes with gaps in the middle and the i_size hasn't
3197 * caught up yet. Populate those with zeroed out pages
3198 * to prevent the VFS from repeatedly attempting to
3199 * fill them until the writes are flushed.
3200 */
3201 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003202 lru_cache_add_file(page);
3203 flush_dcache_page(page);
3204 SetPageUptodate(page);
3205 unlock_page(page);
3206 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003207 rdata->pages[i] = NULL;
3208 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003209 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003210 } else {
3211 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003212 lru_cache_add_file(page);
3213 unlock_page(page);
3214 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003215 rdata->pages[i] = NULL;
3216 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003217 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003218 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003219
3220 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3221 kunmap(page);
3222 if (result < 0)
3223 break;
3224
3225 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003226 }
3227
Jeff Layton8321fec2012-09-19 06:22:32 -07003228 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003229}
3230
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231static int cifs_readpages(struct file *file, struct address_space *mapping,
3232 struct list_head *page_list, unsigned num_pages)
3233{
Jeff Layton690c5e32011-10-19 15:30:16 -04003234 int rc;
3235 struct list_head tmplist;
3236 struct cifsFileInfo *open_file = file->private_data;
3237 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3238 unsigned int rsize = cifs_sb->rsize;
3239 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240
Jeff Layton690c5e32011-10-19 15:30:16 -04003241 /*
3242 * Give up immediately if rsize is too small to read an entire page.
3243 * The VFS will fall back to readpage. We should never reach this
3244 * point however since we set ra_pages to 0 when the rsize is smaller
3245 * than a cache page.
3246 */
3247 if (unlikely(rsize < PAGE_CACHE_SIZE))
3248 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003249
Suresh Jayaraman56698232010-07-05 18:13:25 +05303250 /*
3251 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3252 * immediately if the cookie is negative
3253 */
3254 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3255 &num_pages);
3256 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003257 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303258
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003259 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3260 pid = open_file->pid;
3261 else
3262 pid = current->tgid;
3263
Jeff Layton690c5e32011-10-19 15:30:16 -04003264 rc = 0;
3265 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266
Joe Perchesf96637b2013-05-04 22:12:25 -05003267 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3268 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003269
3270 /*
3271 * Start with the page at end of list and move it to private
3272 * list. Do the same with any following pages until we hit
3273 * the rsize limit, hit an index discontinuity, or run out of
3274 * pages. Issue the async read and then start the loop again
3275 * until the list is empty.
3276 *
3277 * Note that list order is important. The page_list is in
3278 * the order of declining indexes. When we put the pages in
3279 * the rdata->pages, then we want them in increasing order.
3280 */
3281 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003282 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003283 unsigned int bytes = PAGE_CACHE_SIZE;
3284 unsigned int expected_index;
3285 unsigned int nr_pages = 1;
3286 loff_t offset;
3287 struct page *page, *tpage;
3288 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289
3290 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
Jeff Layton690c5e32011-10-19 15:30:16 -04003292 /*
3293 * Lock the page and put it in the cache. Since no one else
3294 * should have access to this page, we're safe to simply set
3295 * PG_locked without checking it first.
3296 */
3297 __set_page_locked(page);
3298 rc = add_to_page_cache_locked(page, mapping,
3299 page->index, GFP_KERNEL);
3300
3301 /* give up if we can't stick it in the cache */
3302 if (rc) {
3303 __clear_page_locked(page);
3304 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306
Jeff Layton690c5e32011-10-19 15:30:16 -04003307 /* move first page to the tmplist */
3308 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3309 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310
Jeff Layton690c5e32011-10-19 15:30:16 -04003311 /* now try and add more pages onto the request */
3312 expected_index = page->index + 1;
3313 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3314 /* discontinuity ? */
3315 if (page->index != expected_index)
3316 break;
3317
3318 /* would this page push the read over the rsize? */
3319 if (bytes + PAGE_CACHE_SIZE > rsize)
3320 break;
3321
3322 __set_page_locked(page);
3323 if (add_to_page_cache_locked(page, mapping,
3324 page->index, GFP_KERNEL)) {
3325 __clear_page_locked(page);
3326 break;
3327 }
3328 list_move_tail(&page->lru, &tmplist);
3329 bytes += PAGE_CACHE_SIZE;
3330 expected_index++;
3331 nr_pages++;
3332 }
3333
Jeff Layton0471ca32012-05-16 07:13:16 -04003334 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003335 if (!rdata) {
3336 /* best to give up if we're out of mem */
3337 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3338 list_del(&page->lru);
3339 lru_cache_add_file(page);
3340 unlock_page(page);
3341 page_cache_release(page);
3342 }
3343 rc = -ENOMEM;
3344 break;
3345 }
3346
Jeff Layton6993f742012-05-16 07:13:17 -04003347 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003348 rdata->mapping = mapping;
3349 rdata->offset = offset;
3350 rdata->bytes = bytes;
3351 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003352 rdata->pagesz = PAGE_CACHE_SIZE;
3353 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003354
3355 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3356 list_del(&page->lru);
3357 rdata->pages[rdata->nr_pages++] = page;
3358 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003359
Jeff Layton2a1bb132012-05-16 07:13:17 -04003360 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003361 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003362 for (i = 0; i < rdata->nr_pages; i++) {
3363 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003364 lru_cache_add_file(page);
3365 unlock_page(page);
3366 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 }
Jeff Layton6993f742012-05-16 07:13:17 -04003368 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 break;
3370 }
Jeff Layton6993f742012-05-16 07:13:17 -04003371
3372 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 }
3374
Linus Torvalds1da177e2005-04-16 15:20:36 -07003375 return rc;
3376}
3377
3378static int cifs_readpage_worker(struct file *file, struct page *page,
3379 loff_t *poffset)
3380{
3381 char *read_data;
3382 int rc;
3383
Suresh Jayaraman56698232010-07-05 18:13:25 +05303384 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003385 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303386 if (rc == 0)
3387 goto read_complete;
3388
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 page_cache_get(page);
3390 read_data = kmap(page);
3391 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003392
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003394
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 if (rc < 0)
3396 goto io_error;
3397 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003398 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003399
Al Viro496ad9a2013-01-23 17:07:38 -05003400 file_inode(file)->i_atime =
3401 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003402
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 if (PAGE_CACHE_SIZE > rc)
3404 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3405
3406 flush_dcache_page(page);
3407 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303408
3409 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003410 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303411
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003413
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003415 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303417
3418read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 return rc;
3420}
3421
3422static int cifs_readpage(struct file *file, struct page *page)
3423{
3424 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3425 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003426 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003428 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
3430 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303431 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003432 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303433 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434 }
3435
Joe Perchesf96637b2013-05-04 22:12:25 -05003436 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003437 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
3439 rc = cifs_readpage_worker(file, page, &offset);
3440
3441 unlock_page(page);
3442
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003443 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 return rc;
3445}
3446
Steve Frencha403a0a2007-07-26 15:54:16 +00003447static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3448{
3449 struct cifsFileInfo *open_file;
3450
Jeff Layton44772882010-10-15 15:34:03 -04003451 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003452 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003453 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003454 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003455 return 1;
3456 }
3457 }
Jeff Layton44772882010-10-15 15:34:03 -04003458 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003459 return 0;
3460}
3461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462/* We do not want to update the file size from server for inodes
3463 open for write - to avoid races with writepage extending
3464 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003465 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 but this is tricky to do without racing with writebehind
3467 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003468bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469{
Steve Frencha403a0a2007-07-26 15:54:16 +00003470 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003471 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003472
Steve Frencha403a0a2007-07-26 15:54:16 +00003473 if (is_inode_writable(cifsInode)) {
3474 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003475 struct cifs_sb_info *cifs_sb;
3476
Steve Frenchc32a0b62006-01-12 14:41:28 -08003477 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003478 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003479 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003480 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003481 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003482 }
3483
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003484 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003485 return true;
Steve French7ba52632007-02-08 18:14:13 +00003486
Steve French4b18f2a2008-04-29 00:06:05 +00003487 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003488 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003489 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490}
3491
Nick Piggind9414772008-09-24 11:32:59 -04003492static int cifs_write_begin(struct file *file, struct address_space *mapping,
3493 loff_t pos, unsigned len, unsigned flags,
3494 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495{
Nick Piggind9414772008-09-24 11:32:59 -04003496 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3497 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003498 loff_t page_start = pos & PAGE_MASK;
3499 loff_t i_size;
3500 struct page *page;
3501 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
Joe Perchesf96637b2013-05-04 22:12:25 -05003503 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003504
Nick Piggin54566b22009-01-04 12:00:53 -08003505 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003506 if (!page) {
3507 rc = -ENOMEM;
3508 goto out;
3509 }
Nick Piggind9414772008-09-24 11:32:59 -04003510
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003511 if (PageUptodate(page))
3512 goto out;
Steve French8a236262007-03-06 00:31:00 +00003513
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003514 /*
3515 * If we write a full page it will be up to date, no need to read from
3516 * the server. If the write is short, we'll end up doing a sync write
3517 * instead.
3518 */
3519 if (len == PAGE_CACHE_SIZE)
3520 goto out;
3521
3522 /*
3523 * optimize away the read when we have an oplock, and we're not
3524 * expecting to use any of the data we'd be reading in. That
3525 * is, when the page lies beyond the EOF, or straddles the EOF
3526 * and the write will cover all of the existing data.
3527 */
3528 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3529 i_size = i_size_read(mapping->host);
3530 if (page_start >= i_size ||
3531 (offset == 0 && (pos + len) >= i_size)) {
3532 zero_user_segments(page, 0, offset,
3533 offset + len,
3534 PAGE_CACHE_SIZE);
3535 /*
3536 * PageChecked means that the parts of the page
3537 * to which we're not writing are considered up
3538 * to date. Once the data is copied to the
3539 * page, it can be set uptodate.
3540 */
3541 SetPageChecked(page);
3542 goto out;
3543 }
3544 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545
Nick Piggind9414772008-09-24 11:32:59 -04003546 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003547 /*
3548 * might as well read a page, it is fast enough. If we get
3549 * an error, we don't need to return it. cifs_write_end will
3550 * do a sync write instead since PG_uptodate isn't set.
3551 */
3552 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003553 } else {
3554 /* we could try using another file handle if there is one -
3555 but how would we lock it to prevent close of that handle
3556 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003557 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003558 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003559out:
3560 *pagep = page;
3561 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562}
3563
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303564static int cifs_release_page(struct page *page, gfp_t gfp)
3565{
3566 if (PagePrivate(page))
3567 return 0;
3568
3569 return cifs_fscache_release_page(page, gfp);
3570}
3571
Lukas Czernerd47992f2013-05-21 23:17:23 -04003572static void cifs_invalidate_page(struct page *page, unsigned int offset,
3573 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303574{
3575 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3576
Lukas Czernerd47992f2013-05-21 23:17:23 -04003577 if (offset == 0 && length == PAGE_CACHE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303578 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3579}
3580
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003581static int cifs_launder_page(struct page *page)
3582{
3583 int rc = 0;
3584 loff_t range_start = page_offset(page);
3585 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3586 struct writeback_control wbc = {
3587 .sync_mode = WB_SYNC_ALL,
3588 .nr_to_write = 0,
3589 .range_start = range_start,
3590 .range_end = range_end,
3591 };
3592
Joe Perchesf96637b2013-05-04 22:12:25 -05003593 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003594
3595 if (clear_page_dirty_for_io(page))
3596 rc = cifs_writepage_locked(page, &wbc);
3597
3598 cifs_fscache_invalidate_page(page, page->mapping->host);
3599 return rc;
3600}
3601
Tejun Heo9b646972010-07-20 22:09:02 +02003602void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003603{
3604 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3605 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003606 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003607 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003608 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003609 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003610
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003611 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3612 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003613 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3614 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003615 cinode->clientCanCacheRead = false;
3616 }
3617
Jeff Layton3bc303c2009-09-21 06:47:50 -04003618 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003619 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003620 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003621 else
Al Viro8737c932009-12-24 06:47:55 -05003622 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003623 rc = filemap_fdatawrite(inode->i_mapping);
3624 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003625 rc = filemap_fdatawait(inode->i_mapping);
3626 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003627 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003628 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003629 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003630 }
3631
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003632 rc = cifs_push_locks(cfile);
3633 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003634 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003635
Jeff Layton3bc303c2009-09-21 06:47:50 -04003636 /*
3637 * releasing stale oplock after recent reconnect of smb session using
3638 * a now incorrect file handle is not a data integrity issue but do
3639 * not bother sending an oplock release if session to server still is
3640 * disconnected since oplock already released by the server
3641 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003642 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003643 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3644 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003645 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003646 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003647}
3648
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003649const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 .readpage = cifs_readpage,
3651 .readpages = cifs_readpages,
3652 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003653 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003654 .write_begin = cifs_write_begin,
3655 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303657 .releasepage = cifs_release_page,
3658 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003659 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003660};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003661
3662/*
3663 * cifs_readpages requires the server to support a buffer large enough to
3664 * contain the header plus one complete page of data. Otherwise, we need
3665 * to leave cifs_readpages out of the address space operations.
3666 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003667const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003668 .readpage = cifs_readpage,
3669 .writepage = cifs_writepage,
3670 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003671 .write_begin = cifs_write_begin,
3672 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003673 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303674 .releasepage = cifs_release_page,
3675 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003676 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003677};