blob: ba7eed2ee6627d24aec664b8ee07c10ef2f89176 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags &
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400228 oparms.tcon = tcon;
229 oparms.cifs_sb = cifs_sb;
230 oparms.desired_access = desired_access;
231 oparms.create_options = create_options;
232 oparms.disposition = disposition;
233 oparms.path = full_path;
234 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400235 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400236
237 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300238
239 if (rc)
240 goto out;
241
242 if (tcon->unix_ext)
243 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
244 xid);
245 else
246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700247 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300248
249out:
250 kfree(buf);
251 return rc;
252}
253
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400254static bool
255cifs_has_mand_locks(struct cifsInodeInfo *cinode)
256{
257 struct cifs_fid_locks *cur;
258 bool has_locks = false;
259
260 down_read(&cinode->lock_sem);
261 list_for_each_entry(cur, &cinode->llist, llist) {
262 if (!list_empty(&cur->locks)) {
263 has_locks = true;
264 break;
265 }
266 }
267 up_read(&cinode->lock_sem);
268 return has_locks;
269}
270
Jeff Layton15ecb432010-10-15 15:34:02 -0400271struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700272cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400273 struct tcon_link *tlink, __u32 oplock)
274{
275 struct dentry *dentry = file->f_path.dentry;
276 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700277 struct cifsInodeInfo *cinode = CIFS_I(inode);
278 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700279 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400281 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400282
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284 if (cfile == NULL)
285 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400286
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700287 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
288 if (!fdlocks) {
289 kfree(cfile);
290 return NULL;
291 }
292
293 INIT_LIST_HEAD(&fdlocks->locks);
294 fdlocks->cfile = cfile;
295 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700296 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700297 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700298 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700299
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700300 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700301 cfile->pid = current->tgid;
302 cfile->uid = current_fsuid();
303 cfile->dentry = dget(dentry);
304 cfile->f_flags = file->f_flags;
305 cfile->invalidHandle = false;
306 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700308 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400309
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100310 cifs_sb_active(inode->i_sb);
311
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400312 /*
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
315 */
316 if (oplock == server->vals->oplock_read &&
317 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500318 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 oplock = 0;
320 }
321
Jeff Layton44772882010-10-15 15:34:03 -0400322 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400323 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700324 oplock = fid->pending_open->oplock;
325 list_del(&fid->pending_open->olist);
326
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400327 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700328
329 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400330 /* if readable file instance put first in list*/
331 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700332 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400333 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700334 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400335 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400336
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700337 file->private_data = cfile;
338 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400339}
340
Jeff Layton764a1b12012-07-25 14:59:54 -0400341struct cifsFileInfo *
342cifsFileInfo_get(struct cifsFileInfo *cifs_file)
343{
344 spin_lock(&cifs_file_list_lock);
345 cifsFileInfo_get_locked(cifs_file);
346 spin_unlock(&cifs_file_list_lock);
347 return cifs_file;
348}
349
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350/*
351 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400352 * the filehandle out on the server. Must be called without holding
353 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000354 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400355void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
356{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300357 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000358 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700359 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300360 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100361 struct super_block *sb = inode->i_sb;
362 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700364 struct cifs_fid fid;
365 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000366
367 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400368 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 spin_unlock(&cifs_file_list_lock);
370 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400371 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000372
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700373 if (server->ops->get_lease_key)
374 server->ops->get_lease_key(inode, &fid);
375
376 /* store open in pending opens to make sure we don't miss lease break */
377 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
378
Steve Frenchcdff08e2010-10-21 22:46:14 +0000379 /* remove it from the lists */
380 list_del(&cifs_file->flist);
381 list_del(&cifs_file->tlist);
382
383 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500384 cifs_dbg(FYI, "closing last open instance for inode %p\n",
385 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700386 /*
387 * In strict cache mode we need invalidate mapping on the last
388 * close because it may cause a error when we open this file
389 * again and get at least level II oplock.
390 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300391 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
392 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300393 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000394 }
395 spin_unlock(&cifs_file_list_lock);
396
Jeff Laytonad635942011-07-26 12:20:17 -0400397 cancel_work_sync(&cifs_file->oplock_break);
398
Steve Frenchcdff08e2010-10-21 22:46:14 +0000399 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700400 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400401 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700402
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400403 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700404 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400405 server->ops->close(xid, tcon, &cifs_file->fid);
406 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000407 }
408
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700409 cifs_del_pending_open(&open);
410
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700411 /*
412 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413 * is closed anyway.
414 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700415 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700416 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400418 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419 kfree(li);
420 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700421 list_del(&cifs_file->llist->llist);
422 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700423 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000424
425 cifs_put_tlink(cifs_file->tlink);
426 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100427 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000428 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400435 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400436 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700438 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000439 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400440 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700441 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300443 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700444 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700445 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400447 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400450 tlink = cifs_sb_tlink(cifs_sb);
451 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400452 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400453 return PTR_ERR(tlink);
454 }
455 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700456 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800458 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530460 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400461 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 }
463
Joe Perchesf96637b2013-05-04 22:12:25 -0500464 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000465 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000466
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700467 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000468 oplock = REQ_OPLOCK;
469 else
470 oplock = 0;
471
Steve French64cc2c62009-03-04 19:54:08 +0000472 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400473 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
474 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000475 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400476 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000477 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700478 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000479 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500480 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300481 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000482 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
483 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500484 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
485 tcon->ses->serverName,
486 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000487 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000488 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
489 (rc != -EOPNOTSUPP)) /* path not found or net err */
490 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700491 /*
492 * Else fallthrough to retry open the old way on network i/o
493 * or DFS errors.
494 */
Steve French276a74a2009-03-03 18:00:34 +0000495 }
496
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700497 if (server->ops->get_lease_key)
498 server->ops->get_lease_key(inode, &fid);
499
500 cifs_add_pending_open(&fid, tlink, &open);
501
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700503 if (server->ops->get_lease_key)
504 server->ops->get_lease_key(inode, &fid);
505
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300506 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700507 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700508 if (rc) {
509 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300510 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700511 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300512 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400513
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700514 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
515 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700516 if (server->ops->close)
517 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700518 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 rc = -ENOMEM;
520 goto out;
521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530523 cifs_fscache_set_inode_cookie(inode, file);
524
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300525 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700526 /*
527 * Time to set mode which we can not set earlier due to
528 * problems creating new read-only files.
529 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300530 struct cifs_unix_set_info_args args = {
531 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800532 .uid = INVALID_UID, /* no change */
533 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300534 .ctime = NO_CHANGE_64,
535 .atime = NO_CHANGE_64,
536 .mtime = NO_CHANGE_64,
537 .device = 0,
538 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700539 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
540 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 }
542
543out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400545 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400546 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 return rc;
548}
549
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400550static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
551
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700552/*
553 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400554 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700555 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400556static int
557cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400559 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
560 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
561 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 int rc = 0;
563
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400564 /* we are going to update can_cache_brlcks here - need a write access */
565 down_write(&cinode->lock_sem);
566 if (cinode->can_cache_brlcks) {
567 /* can cache locks - no need to push them */
568 up_write(&cinode->lock_sem);
569 return rc;
570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400572 if (cap_unix(tcon->ses) &&
573 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
574 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
575 rc = cifs_push_posix_locks(cfile);
576 else
577 rc = tcon->ses->server->ops->push_mand_locks(cfile);
578
579 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 return rc;
581}
582
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583static int
584cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585{
586 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400587 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400588 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000590 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700591 struct TCP_Server_Info *server;
592 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000593 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700595 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500597 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400598 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400600 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700601 mutex_lock(&cfile->fh_mutex);
602 if (!cfile->invalidHandle) {
603 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530604 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400605 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530606 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
608
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700609 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 tcon = tlink_tcon(cfile->tlink);
612 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000613
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700614 /*
615 * Can not grab rename sem here because various ops, including those
616 * that already have the rename sem can end up causing writepage to get
617 * called and if the server was down that means we end up here, and we
618 * can never tell if the caller already has the rename_sem.
619 */
620 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000622 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700623 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400624 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000625 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 }
627
Joe Perchesf96637b2013-05-04 22:12:25 -0500628 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
629 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300631 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 oplock = REQ_OPLOCK;
633 else
Steve French4b18f2a2008-04-29 00:06:05 +0000634 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400636 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000637 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400638 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400639 /*
640 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
641 * original open. Must mask them off for a reopen.
642 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400644 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400645
Jeff Layton2422f672010-06-16 13:40:16 -0400646 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700647 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400648 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000649 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500650 cifs_dbg(FYI, "posix reopen succeeded\n");
Steve French7fc8f4e2009-02-23 20:43:11 +0000651 goto reopen_success;
652 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700653 /*
654 * fallthrough to retry open the old way on errors, especially
655 * in the reconnect path it is important to retry hard
656 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000657 }
658
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700659 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000660
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500661 if (backup_cred(cifs_sb))
662 create_options |= CREATE_OPEN_BACKUP_INTENT;
663
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700664 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400665 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700666
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400667 oparms.tcon = tcon;
668 oparms.cifs_sb = cifs_sb;
669 oparms.desired_access = desired_access;
670 oparms.create_options = create_options;
671 oparms.disposition = disposition;
672 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400673 oparms.fid = &cfile->fid;
674 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400675
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700676 /*
677 * Can not refresh inode by passing in file_info buf to be returned by
678 * CIFSSMBOpen and then calling get_inode_info with returned buf since
679 * file might have write behind data that needs to be flushed and server
680 * version of file size can be stale. If we knew for sure that inode was
681 * not dirty locally we could do this.
682 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400683 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700685 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500686 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
687 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400688 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 }
Jeff Layton15886172010-10-15 15:33:59 -0400690
691reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700692 cfile->invalidHandle = false;
693 mutex_unlock(&cfile->fh_mutex);
694 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400695
696 if (can_flush) {
697 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400698 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400699
Jeff Layton15886172010-10-15 15:33:59 -0400700 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700701 rc = cifs_get_inode_info_unix(&inode, full_path,
702 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400703 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700704 rc = cifs_get_inode_info(&inode, full_path, NULL,
705 inode->i_sb, xid, NULL);
706 }
707 /*
708 * Else we are writing out data to server already and could deadlock if
709 * we tried to flush data, and since we do not know if we have data that
710 * would invalidate the current end of file on the server we can not go
711 * to the server to get the new inode info.
712 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300713
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400714 server->ops->set_fid(cfile, &cfile->fid, oplock);
715 if (oparms.reconnect)
716 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400717
718reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400720 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 return rc;
722}
723
724int cifs_close(struct inode *inode, struct file *file)
725{
Jeff Layton77970692011-04-05 16:23:47 -0700726 if (file->private_data != NULL) {
727 cifsFileInfo_put(file->private_data);
728 file->private_data = NULL;
729 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Steve Frenchcdff08e2010-10-21 22:46:14 +0000731 /* return code from the ->release op is always ignored */
732 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733}
734
735int cifs_closedir(struct inode *inode, struct file *file)
736{
737 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400738 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700739 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700740 struct cifs_tcon *tcon;
741 struct TCP_Server_Info *server;
742 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743
Joe Perchesf96637b2013-05-04 22:12:25 -0500744 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700746 if (cfile == NULL)
747 return rc;
748
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400749 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700750 tcon = tlink_tcon(cfile->tlink);
751 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Joe Perchesf96637b2013-05-04 22:12:25 -0500753 cifs_dbg(FYI, "Freeing private data in close dir\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700754 spin_lock(&cifs_file_list_lock);
755 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
756 cfile->invalidHandle = true;
757 spin_unlock(&cifs_file_list_lock);
758 if (server->ops->close_dir)
759 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
760 else
761 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500762 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700763 /* not much we can do if it fails anyway, ignore rc */
764 rc = 0;
765 } else
766 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700768 buf = cfile->srch_inf.ntwrk_buf_start;
769 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500770 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700771 cfile->srch_inf.ntwrk_buf_start = NULL;
772 if (cfile->srch_inf.smallBuf)
773 cifs_small_buf_release(buf);
774 else
775 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700777
778 cifs_put_tlink(cfile->tlink);
779 kfree(file->private_data);
780 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400782 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 return rc;
784}
785
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400786static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300787cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000788{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400789 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000790 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400791 if (!lock)
792 return lock;
793 lock->offset = offset;
794 lock->length = length;
795 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400796 lock->pid = current->tgid;
797 INIT_LIST_HEAD(&lock->blist);
798 init_waitqueue_head(&lock->block_q);
799 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400800}
801
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700802void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400803cifs_del_lock_waiters(struct cifsLockInfo *lock)
804{
805 struct cifsLockInfo *li, *tmp;
806 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
807 list_del_init(&li->blist);
808 wake_up(&li->block_q);
809 }
810}
811
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400812#define CIFS_LOCK_OP 0
813#define CIFS_READ_OP 1
814#define CIFS_WRITE_OP 2
815
816/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400817static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700818cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
819 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400820 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400821{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300822 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700823 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300824 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400825
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700826 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400827 if (offset + length <= li->offset ||
828 offset >= li->offset + li->length)
829 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400830 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
831 server->ops->compare_fids(cfile, cur_cfile)) {
832 /* shared lock prevents write op through the same fid */
833 if (!(li->type & server->vals->shared_lock_type) ||
834 rw_check != CIFS_WRITE_OP)
835 continue;
836 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700837 if ((type & server->vals->shared_lock_type) &&
838 ((server->ops->compare_fids(cfile, cur_cfile) &&
839 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400840 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700841 if (conf_lock)
842 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700843 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400844 }
845 return false;
846}
847
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700848bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300849cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700850 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400851 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400852{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300853 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700854 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300855 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300856
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700857 list_for_each_entry(cur, &cinode->llist, llist) {
858 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700859 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300860 if (rc)
861 break;
862 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300863
864 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400865}
866
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300867/*
868 * Check if there is another lock that prevents us to set the lock (mandatory
869 * style). If such a lock exists, update the flock structure with its
870 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
871 * or leave it the same if we can't. Returns 0 if we don't need to request to
872 * the server or 1 otherwise.
873 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400874static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300875cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
876 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400877{
878 int rc = 0;
879 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300880 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300881 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400882 bool exist;
883
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700884 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400885
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300886 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400887 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400888 if (exist) {
889 flock->fl_start = conf_lock->offset;
890 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
891 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300892 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400893 flock->fl_type = F_RDLCK;
894 else
895 flock->fl_type = F_WRLCK;
896 } else if (!cinode->can_cache_brlcks)
897 rc = 1;
898 else
899 flock->fl_type = F_UNLCK;
900
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700901 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400902 return rc;
903}
904
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400905static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300906cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300908 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700909 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700910 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700911 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000912}
913
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300914/*
915 * Set the byte-range lock (mandatory style). Returns:
916 * 1) 0, if we set the lock and don't need to request to the server;
917 * 2) 1, if no locks prevent us but we need to request to the server;
918 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
919 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400920static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300921cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400922 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400923{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400924 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300925 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926 bool exist;
927 int rc = 0;
928
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400929try_again:
930 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700931 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400932
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300933 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400934 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400935 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700936 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700937 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400938 return rc;
939 }
940
941 if (!exist)
942 rc = 1;
943 else if (!wait)
944 rc = -EACCES;
945 else {
946 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700947 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400948 rc = wait_event_interruptible(lock->block_q,
949 (lock->blist.prev == &lock->blist) &&
950 (lock->blist.next == &lock->blist));
951 if (!rc)
952 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700953 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400954 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400955 }
956
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700957 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400958 return rc;
959}
960
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300961/*
962 * Check if there is another lock that prevents us to set the lock (posix
963 * style). If such a lock exists, update the flock structure with its
964 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
965 * or leave it the same if we can't. Returns 0 if we don't need to request to
966 * the server or 1 otherwise.
967 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400968static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400969cifs_posix_lock_test(struct file *file, struct file_lock *flock)
970{
971 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500972 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400973 unsigned char saved_type = flock->fl_type;
974
Pavel Shilovsky50792762011-10-29 17:17:57 +0400975 if ((flock->fl_flags & FL_POSIX) == 0)
976 return 1;
977
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700978 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400979 posix_test_lock(file, flock);
980
981 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
982 flock->fl_type = saved_type;
983 rc = 1;
984 }
985
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700986 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400987 return rc;
988}
989
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300990/*
991 * Set the byte-range lock (posix style). Returns:
992 * 1) 0, if we set the lock and don't need to request to the server;
993 * 2) 1, if we need to request to the server;
994 * 3) <0, if the error occurs while setting the lock.
995 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400996static int
997cifs_posix_lock_set(struct file *file, struct file_lock *flock)
998{
Al Viro496ad9a2013-01-23 17:07:38 -0500999 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001000 int rc = 1;
1001
1002 if ((flock->fl_flags & FL_POSIX) == 0)
1003 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001004
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001005try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001006 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001007 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001008 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001009 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001010 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001011
1012 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001013 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001014 if (rc == FILE_LOCK_DEFERRED) {
1015 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1016 if (!rc)
1017 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001018 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001019 }
Steve French9ebb3892012-04-01 13:52:54 -05001020 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001021}
1022
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001023int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001024cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001025{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001026 unsigned int xid;
1027 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001028 struct cifsLockInfo *li, *tmp;
1029 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001030 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001031 LOCKING_ANDX_RANGE *buf, *cur;
1032 int types[] = {LOCKING_ANDX_LARGE_FILES,
1033 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1034 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001036 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001037 tcon = tlink_tcon(cfile->tlink);
1038
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001039 /*
1040 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1041 * and check it for zero before using.
1042 */
1043 max_buf = tcon->ses->server->maxBuf;
1044 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001045 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001046 return -EINVAL;
1047 }
1048
1049 max_num = (max_buf - sizeof(struct smb_hdr)) /
1050 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001051 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1052 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001053 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001054 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001055 }
1056
1057 for (i = 0; i < 2; i++) {
1058 cur = buf;
1059 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001060 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001061 if (li->type != types[i])
1062 continue;
1063 cur->Pid = cpu_to_le16(li->pid);
1064 cur->LengthLow = cpu_to_le32((u32)li->length);
1065 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1066 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1067 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1068 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001069 stored_rc = cifs_lockv(xid, tcon,
1070 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001071 (__u8)li->type, 0, num,
1072 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001073 if (stored_rc)
1074 rc = stored_rc;
1075 cur = buf;
1076 num = 0;
1077 } else
1078 cur++;
1079 }
1080
1081 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001082 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001083 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001084 if (stored_rc)
1085 rc = stored_rc;
1086 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001087 }
1088
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001089 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001090 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001091 return rc;
1092}
1093
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001094/* copied from fs/locks.c with a name change */
1095#define cifs_for_each_lock(inode, lockp) \
1096 for (lockp = &inode->i_flock; *lockp != NULL; \
1097 lockp = &(*lockp)->fl_next)
1098
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001099struct lock_to_push {
1100 struct list_head llist;
1101 __u64 offset;
1102 __u64 length;
1103 __u32 pid;
1104 __u16 netfid;
1105 __u8 type;
1106};
1107
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001108static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001109cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001110{
Jeff Layton1c8c6012013-06-21 08:58:15 -04001111 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001112 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1113 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001114 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001116 struct list_head locks_to_send, *el;
1117 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001118 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001119
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001120 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001121
Jeff Layton1c8c6012013-06-21 08:58:15 -04001122 spin_lock(&inode->i_lock);
1123 cifs_for_each_lock(inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001124 if ((*before)->fl_flags & FL_POSIX)
1125 count++;
1126 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001127 spin_unlock(&inode->i_lock);
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001128
1129 INIT_LIST_HEAD(&locks_to_send);
1130
1131 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001132 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001133 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001134 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001135 */
1136 for (; i < count; i++) {
1137 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1138 if (!lck) {
1139 rc = -ENOMEM;
1140 goto err_out;
1141 }
1142 list_add_tail(&lck->llist, &locks_to_send);
1143 }
1144
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001145 el = locks_to_send.next;
Jeff Layton1c8c6012013-06-21 08:58:15 -04001146 spin_lock(&inode->i_lock);
1147 cifs_for_each_lock(inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001148 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001149 if ((flock->fl_flags & FL_POSIX) == 0)
1150 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001151 if (el == &locks_to_send) {
1152 /*
1153 * The list ended. We don't have enough allocated
1154 * structures - something is really wrong.
1155 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001156 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001157 break;
1158 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001159 length = 1 + flock->fl_end - flock->fl_start;
1160 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1161 type = CIFS_RDLCK;
1162 else
1163 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001164 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001165 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001166 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001167 lck->length = length;
1168 lck->type = type;
1169 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001170 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001171 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001172 spin_unlock(&inode->i_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001173
1174 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001175 int stored_rc;
1176
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001177 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001178 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001179 lck->type, 0);
1180 if (stored_rc)
1181 rc = stored_rc;
1182 list_del(&lck->llist);
1183 kfree(lck);
1184 }
1185
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001186out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001187 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001188 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001189err_out:
1190 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1191 list_del(&lck->llist);
1192 kfree(lck);
1193 }
1194 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001195}
1196
1197static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001198cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001199{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001200 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001201 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001202 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001203 int rc = 0;
1204
1205 /* we are going to update can_cache_brlcks here - need a write access */
1206 down_write(&cinode->lock_sem);
1207 if (!cinode->can_cache_brlcks) {
1208 up_write(&cinode->lock_sem);
1209 return rc;
1210 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001211
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001212 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001213 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1214 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001215 rc = cifs_push_posix_locks(cfile);
1216 else
1217 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001218
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001219 cinode->can_cache_brlcks = false;
1220 up_write(&cinode->lock_sem);
1221 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001222}
1223
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001225cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001226 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001228 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001229 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001230 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001231 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001232 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001233 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001234 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001236 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001237 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001238 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001239 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001240 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001241 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1242 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001243 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001245 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001246 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001247 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001248 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001249 *lock = 1;
1250 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001251 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001252 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001253 *unlock = 1;
1254 /* Check if unlock includes more than one lock range */
1255 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001256 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001257 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001258 *lock = 1;
1259 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001260 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001261 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001262 *lock = 1;
1263 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001264 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001265 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001266 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001268 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001269}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001271static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001272cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001273 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001274{
1275 int rc = 0;
1276 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001277 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1278 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001279 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001280 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001282 if (posix_lck) {
1283 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001284
1285 rc = cifs_posix_lock_test(file, flock);
1286 if (!rc)
1287 return rc;
1288
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001289 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001290 posix_lock_type = CIFS_RDLCK;
1291 else
1292 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001293 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001294 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001295 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 return rc;
1297 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001298
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001299 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001300 if (!rc)
1301 return rc;
1302
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001303 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001304 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1305 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001306 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001307 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1308 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001309 flock->fl_type = F_UNLCK;
1310 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001311 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1312 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001313 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001314 }
1315
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001316 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001317 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001318 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001319 }
1320
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001321 type &= ~server->vals->exclusive_lock_type;
1322
1323 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1324 type | server->vals->shared_lock_type,
1325 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001326 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001327 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1328 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001329 flock->fl_type = F_RDLCK;
1330 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001331 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1332 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 } else
1334 flock->fl_type = F_WRLCK;
1335
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001336 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001337}
1338
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001339void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001340cifs_move_llist(struct list_head *source, struct list_head *dest)
1341{
1342 struct list_head *li, *tmp;
1343 list_for_each_safe(li, tmp, source)
1344 list_move(li, dest);
1345}
1346
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001347void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001348cifs_free_llist(struct list_head *llist)
1349{
1350 struct cifsLockInfo *li, *tmp;
1351 list_for_each_entry_safe(li, tmp, llist, llist) {
1352 cifs_del_lock_waiters(li);
1353 list_del(&li->llist);
1354 kfree(li);
1355 }
1356}
1357
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001358int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001359cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1360 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001361{
1362 int rc = 0, stored_rc;
1363 int types[] = {LOCKING_ANDX_LARGE_FILES,
1364 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1365 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001366 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001367 LOCKING_ANDX_RANGE *buf, *cur;
1368 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1369 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1370 struct cifsLockInfo *li, *tmp;
1371 __u64 length = 1 + flock->fl_end - flock->fl_start;
1372 struct list_head tmp_llist;
1373
1374 INIT_LIST_HEAD(&tmp_llist);
1375
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001376 /*
1377 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1378 * and check it for zero before using.
1379 */
1380 max_buf = tcon->ses->server->maxBuf;
1381 if (!max_buf)
1382 return -EINVAL;
1383
1384 max_num = (max_buf - sizeof(struct smb_hdr)) /
1385 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001386 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1387 if (!buf)
1388 return -ENOMEM;
1389
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001390 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001391 for (i = 0; i < 2; i++) {
1392 cur = buf;
1393 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001394 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001395 if (flock->fl_start > li->offset ||
1396 (flock->fl_start + length) <
1397 (li->offset + li->length))
1398 continue;
1399 if (current->tgid != li->pid)
1400 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001401 if (types[i] != li->type)
1402 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001403 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001404 /*
1405 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001406 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001407 */
1408 list_del(&li->llist);
1409 cifs_del_lock_waiters(li);
1410 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001411 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001412 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001413 cur->Pid = cpu_to_le16(li->pid);
1414 cur->LengthLow = cpu_to_le32((u32)li->length);
1415 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1416 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1417 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1418 /*
1419 * We need to save a lock here to let us add it again to
1420 * the file's list if the unlock range request fails on
1421 * the server.
1422 */
1423 list_move(&li->llist, &tmp_llist);
1424 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001425 stored_rc = cifs_lockv(xid, tcon,
1426 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001427 li->type, num, 0, buf);
1428 if (stored_rc) {
1429 /*
1430 * We failed on the unlock range
1431 * request - add all locks from the tmp
1432 * list to the head of the file's list.
1433 */
1434 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001435 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001436 rc = stored_rc;
1437 } else
1438 /*
1439 * The unlock range request succeed -
1440 * free the tmp list.
1441 */
1442 cifs_free_llist(&tmp_llist);
1443 cur = buf;
1444 num = 0;
1445 } else
1446 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001447 }
1448 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001449 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001450 types[i], num, 0, buf);
1451 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001452 cifs_move_llist(&tmp_llist,
1453 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001454 rc = stored_rc;
1455 } else
1456 cifs_free_llist(&tmp_llist);
1457 }
1458 }
1459
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001460 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001461 kfree(buf);
1462 return rc;
1463}
1464
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001465static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001466cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001467 bool wait_flag, bool posix_lck, int lock, int unlock,
1468 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001469{
1470 int rc = 0;
1471 __u64 length = 1 + flock->fl_end - flock->fl_start;
1472 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1473 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001474 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001475 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476
1477 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001478 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001479
1480 rc = cifs_posix_lock_set(file, flock);
1481 if (!rc || rc < 0)
1482 return rc;
1483
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001484 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001485 posix_lock_type = CIFS_RDLCK;
1486 else
1487 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001488
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001489 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001490 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001491
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001492 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1493 current->tgid, flock->fl_start, length,
1494 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001495 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001496 }
1497
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001498 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001499 struct cifsLockInfo *lock;
1500
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001501 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001502 if (!lock)
1503 return -ENOMEM;
1504
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001505 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001506 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001507 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001508 return rc;
1509 }
1510 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001511 goto out;
1512
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +04001513
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001514 /*
1515 * Windows 7 server can delay breaking lease from read to None
1516 * if we set a byte-range lock on a file - break it explicitly
1517 * before sending the lock to the server to be sure the next
1518 * read won't conflict with non-overlapted locks due to
1519 * pagereading.
1520 */
1521 if (!CIFS_I(inode)->clientCanCacheAll &&
1522 CIFS_I(inode)->clientCanCacheRead) {
1523 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001524 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1525 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001526 CIFS_I(inode)->clientCanCacheRead = false;
1527 }
1528
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001529 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1530 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001531 if (rc) {
1532 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001533 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001534 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001535
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001536 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001537 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001538 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001539
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001540out:
1541 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001542 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001543 return rc;
1544}
1545
1546int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1547{
1548 int rc, xid;
1549 int lock = 0, unlock = 0;
1550 bool wait_flag = false;
1551 bool posix_lck = false;
1552 struct cifs_sb_info *cifs_sb;
1553 struct cifs_tcon *tcon;
1554 struct cifsInodeInfo *cinode;
1555 struct cifsFileInfo *cfile;
1556 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001557 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558
1559 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001560 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001561
Joe Perchesf96637b2013-05-04 22:12:25 -05001562 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1563 cmd, flock->fl_flags, flock->fl_type,
1564 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001565
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001566 cfile = (struct cifsFileInfo *)file->private_data;
1567 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001568
1569 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1570 tcon->ses->server);
1571
1572 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001573 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001574 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001575
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001576 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001577 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1578 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1579 posix_lck = true;
1580 /*
1581 * BB add code here to normalize offset and length to account for
1582 * negative length which we can not accept over the wire.
1583 */
1584 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001585 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001586 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001587 return rc;
1588 }
1589
1590 if (!lock && !unlock) {
1591 /*
1592 * if no lock or unlock then nothing to do since we do not
1593 * know what it is
1594 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001595 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001596 return -EOPNOTSUPP;
1597 }
1598
1599 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1600 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001601 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 return rc;
1603}
1604
Jeff Layton597b0272012-03-23 14:40:56 -04001605/*
1606 * update the file size (if needed) after a write. Should be called with
1607 * the inode->i_lock held
1608 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001609void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001610cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1611 unsigned int bytes_written)
1612{
1613 loff_t end_of_write = offset + bytes_written;
1614
1615 if (end_of_write > cifsi->server_eof)
1616 cifsi->server_eof = end_of_write;
1617}
1618
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001619static ssize_t
1620cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1621 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622{
1623 int rc = 0;
1624 unsigned int bytes_written = 0;
1625 unsigned int total_written;
1626 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001627 struct cifs_tcon *tcon;
1628 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001629 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001630 struct dentry *dentry = open_file->dentry;
1631 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001632 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Jeff Layton7da4b492010-10-15 15:34:00 -04001634 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
Joe Perchesf96637b2013-05-04 22:12:25 -05001636 cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n",
1637 write_size, *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001639 tcon = tlink_tcon(open_file->tlink);
1640 server = tcon->ses->server;
1641
1642 if (!server->ops->sync_write)
1643 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001644
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001645 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 for (total_written = 0; write_size > total_written;
1648 total_written += bytes_written) {
1649 rc = -EAGAIN;
1650 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001651 struct kvec iov[2];
1652 unsigned int len;
1653
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 /* we could deadlock if we called
1656 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001657 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001659 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 if (rc != 0)
1661 break;
1662 }
Steve French3e844692005-10-03 13:37:24 -07001663
Jeff Laytonca83ce32011-04-12 09:13:44 -04001664 len = min((size_t)cifs_sb->wsize,
1665 write_size - total_written);
1666 /* iov[0] is reserved for smb header */
1667 iov[1].iov_base = (char *)write_data + total_written;
1668 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001669 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001670 io_parms.tcon = tcon;
1671 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001672 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001673 rc = server->ops->sync_write(xid, open_file, &io_parms,
1674 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 }
1676 if (rc || (bytes_written == 0)) {
1677 if (total_written)
1678 break;
1679 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001680 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 return rc;
1682 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001683 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001684 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001685 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001686 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001687 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 }
1690
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001691 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Jeff Layton7da4b492010-10-15 15:34:00 -04001693 if (total_written > 0) {
1694 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001695 if (*offset > dentry->d_inode->i_size)
1696 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001697 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001699 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001700 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return total_written;
1702}
1703
Jeff Layton6508d902010-09-29 19:51:11 -04001704struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1705 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001706{
1707 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001708 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1709
1710 /* only filter by fsuid on multiuser mounts */
1711 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1712 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001713
Jeff Layton44772882010-10-15 15:34:03 -04001714 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001715 /* we could simply get the first_list_entry since write-only entries
1716 are always at the end of the list but since the first entry might
1717 have a close pending, we go through the whole list */
1718 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001719 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001720 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001721 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001722 if (!open_file->invalidHandle) {
1723 /* found a good file */
1724 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001725 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001726 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001727 return open_file;
1728 } /* else might as well continue, and look for
1729 another, or simply have the caller reopen it
1730 again rather than trying to fix this handle */
1731 } else /* write only file */
1732 break; /* write only files are last so must be done */
1733 }
Jeff Layton44772882010-10-15 15:34:03 -04001734 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001735 return NULL;
1736}
Steve French630f3f0c2007-10-25 21:17:17 +00001737
Jeff Layton6508d902010-09-29 19:51:11 -04001738struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1739 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001740{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001741 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001742 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001743 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001744 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001745 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001746
Steve French60808232006-04-22 15:53:05 +00001747 /* Having a null inode here (because mapping->host was set to zero by
1748 the VFS or MM) should not happen but we had reports of on oops (due to
1749 it being zero) during stress testcases so we need to check for it */
1750
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001751 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001752 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001753 dump_stack();
1754 return NULL;
1755 }
1756
Jeff Laytond3892292010-11-02 16:22:50 -04001757 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1758
Jeff Layton6508d902010-09-29 19:51:11 -04001759 /* only filter by fsuid on multiuser mounts */
1760 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1761 fsuid_only = false;
1762
Jeff Layton44772882010-10-15 15:34:03 -04001763 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001764refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001765 if (refind > MAX_REOPEN_ATT) {
1766 spin_unlock(&cifs_file_list_lock);
1767 return NULL;
1768 }
Steve French6148a742005-10-05 12:23:19 -07001769 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001770 if (!any_available && open_file->pid != current->tgid)
1771 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001772 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001773 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001774 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001775 if (!open_file->invalidHandle) {
1776 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001777 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001778 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001779 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001780 } else {
1781 if (!inv_file)
1782 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001783 }
Steve French6148a742005-10-05 12:23:19 -07001784 }
1785 }
Jeff Layton2846d382008-09-22 21:33:33 -04001786 /* couldn't find useable FH with same pid, try any available */
1787 if (!any_available) {
1788 any_available = true;
1789 goto refind_writable;
1790 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001791
1792 if (inv_file) {
1793 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001794 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001795 }
1796
Jeff Layton44772882010-10-15 15:34:03 -04001797 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001798
1799 if (inv_file) {
1800 rc = cifs_reopen_file(inv_file, false);
1801 if (!rc)
1802 return inv_file;
1803 else {
1804 spin_lock(&cifs_file_list_lock);
1805 list_move_tail(&inv_file->flist,
1806 &cifs_inode->openFileList);
1807 spin_unlock(&cifs_file_list_lock);
1808 cifsFileInfo_put(inv_file);
1809 spin_lock(&cifs_file_list_lock);
1810 ++refind;
1811 goto refind_writable;
1812 }
1813 }
1814
Steve French6148a742005-10-05 12:23:19 -07001815 return NULL;
1816}
1817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1819{
1820 struct address_space *mapping = page->mapping;
1821 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1822 char *write_data;
1823 int rc = -EFAULT;
1824 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001826 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827
1828 if (!mapping || !mapping->host)
1829 return -EFAULT;
1830
1831 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
1833 offset += (loff_t)from;
1834 write_data = kmap(page);
1835 write_data += from;
1836
1837 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1838 kunmap(page);
1839 return -EIO;
1840 }
1841
1842 /* racing with truncate? */
1843 if (offset > mapping->host->i_size) {
1844 kunmap(page);
1845 return 0; /* don't care */
1846 }
1847
1848 /* check to make sure that we are not extending the file */
1849 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001850 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Jeff Layton6508d902010-09-29 19:51:11 -04001852 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001853 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001854 bytes_written = cifs_write(open_file, open_file->pid,
1855 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001856 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001858 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001859 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001860 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001861 else if (bytes_written < 0)
1862 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001863 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001864 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 rc = -EIO;
1866 }
1867
1868 kunmap(page);
1869 return rc;
1870}
1871
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001873 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001875 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1876 bool done = false, scanned = false, range_whole = false;
1877 pgoff_t end, index;
1878 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001879 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001880 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001881 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001882
Steve French37c0eb42005-10-05 14:50:29 -07001883 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001884 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001885 * one page at a time via cifs_writepage
1886 */
1887 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1888 return generic_writepages(mapping, wbc);
1889
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001890 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001891 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001892 end = -1;
1893 } else {
1894 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1895 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1896 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001897 range_whole = true;
1898 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001899 }
1900retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001901 while (!done && index <= end) {
1902 unsigned int i, nr_pages, found_pages;
1903 pgoff_t next = 0, tofind;
1904 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001905
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001906 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1907 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001908
Jeff Laytonc2e87642012-03-23 14:40:55 -04001909 wdata = cifs_writedata_alloc((unsigned int)tofind,
1910 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001911 if (!wdata) {
1912 rc = -ENOMEM;
1913 break;
1914 }
1915
1916 /*
1917 * find_get_pages_tag seems to return a max of 256 on each
1918 * iteration, so we must call it several times in order to
1919 * fill the array or the wsize is effectively limited to
1920 * 256 * PAGE_CACHE_SIZE.
1921 */
1922 found_pages = 0;
1923 pages = wdata->pages;
1924 do {
1925 nr_pages = find_get_pages_tag(mapping, &index,
1926 PAGECACHE_TAG_DIRTY,
1927 tofind, pages);
1928 found_pages += nr_pages;
1929 tofind -= nr_pages;
1930 pages += nr_pages;
1931 } while (nr_pages && tofind && index <= end);
1932
1933 if (found_pages == 0) {
1934 kref_put(&wdata->refcount, cifs_writedata_release);
1935 break;
1936 }
1937
1938 nr_pages = 0;
1939 for (i = 0; i < found_pages; i++) {
1940 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001941 /*
1942 * At this point we hold neither mapping->tree_lock nor
1943 * lock on the page itself: the page may be truncated or
1944 * invalidated (changing page->mapping to NULL), or even
1945 * swizzled back from swapper_space to tmpfs file
1946 * mapping
1947 */
1948
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001949 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001950 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001951 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001952 break;
1953
1954 if (unlikely(page->mapping != mapping)) {
1955 unlock_page(page);
1956 break;
1957 }
1958
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001959 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001960 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001961 unlock_page(page);
1962 break;
1963 }
1964
1965 if (next && (page->index != next)) {
1966 /* Not next consecutive page */
1967 unlock_page(page);
1968 break;
1969 }
1970
1971 if (wbc->sync_mode != WB_SYNC_NONE)
1972 wait_on_page_writeback(page);
1973
1974 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001975 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001976 unlock_page(page);
1977 break;
1978 }
Steve French84d2f072005-10-12 15:32:05 -07001979
Linus Torvaldscb876f42006-12-23 16:19:07 -08001980 /*
1981 * This actually clears the dirty bit in the radix tree.
1982 * See cifs_writepage() for more commentary.
1983 */
1984 set_page_writeback(page);
1985
Jeff Layton3a98b862012-11-26 09:48:41 -05001986 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001987 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001988 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001989 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001990 break;
1991 }
1992
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001993 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001994 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001995 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001996 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001997
1998 /* reset index to refind any pages skipped */
1999 if (nr_pages == 0)
2000 index = wdata->pages[0]->index + 1;
2001
2002 /* put any pages we aren't going to use */
2003 for (i = nr_pages; i < found_pages; i++) {
2004 page_cache_release(wdata->pages[i]);
2005 wdata->pages[i] = NULL;
2006 }
2007
2008 /* nothing to write? */
2009 if (nr_pages == 0) {
2010 kref_put(&wdata->refcount, cifs_writedata_release);
2011 continue;
2012 }
2013
2014 wdata->sync_mode = wbc->sync_mode;
2015 wdata->nr_pages = nr_pages;
2016 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07002017 wdata->pagesz = PAGE_CACHE_SIZE;
2018 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002019 min(i_size_read(mapping->host) -
2020 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002021 (loff_t)PAGE_CACHE_SIZE);
2022 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2023 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002024
2025 do {
2026 if (wdata->cfile != NULL)
2027 cifsFileInfo_put(wdata->cfile);
2028 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2029 false);
2030 if (!wdata->cfile) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002031 cifs_dbg(VFS, "No writable handles for inode\n");
Steve French23e7dd72005-10-20 13:44:56 -07002032 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002033 break;
Steve French37c0eb42005-10-05 14:50:29 -07002034 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002035 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002036 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2037 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002038 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002039
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002040 for (i = 0; i < nr_pages; ++i)
2041 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002042
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002043 /* send failure -- clean up the mess */
2044 if (rc != 0) {
2045 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002046 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002047 redirty_page_for_writepage(wbc,
2048 wdata->pages[i]);
2049 else
2050 SetPageError(wdata->pages[i]);
2051 end_page_writeback(wdata->pages[i]);
2052 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002053 }
Jeff Layton941b8532011-01-11 07:24:01 -05002054 if (rc != -EAGAIN)
2055 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002056 }
2057 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002058
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002059 wbc->nr_to_write -= nr_pages;
2060 if (wbc->nr_to_write <= 0)
2061 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002062
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002063 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002064 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002065
Steve French37c0eb42005-10-05 14:50:29 -07002066 if (!scanned && !done) {
2067 /*
2068 * We hit the last page and there is more work to be done: wrap
2069 * back to the start of the file
2070 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002071 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002072 index = 0;
2073 goto retry;
2074 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002075
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002076 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002077 mapping->writeback_index = index;
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 return rc;
2080}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002082static int
2083cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002085 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002086 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002088 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089/* BB add check for wbc flags */
2090 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002091 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002092 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002093
2094 /*
2095 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2096 *
2097 * A writepage() implementation always needs to do either this,
2098 * or re-dirty the page with "redirty_page_for_writepage()" in
2099 * the case of a failure.
2100 *
2101 * Just unlocking the page will cause the radix tree tag-bits
2102 * to fail to update with the state of the page correctly.
2103 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002104 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002105retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002107 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2108 goto retry_write;
2109 else if (rc == -EAGAIN)
2110 redirty_page_for_writepage(wbc, page);
2111 else if (rc != 0)
2112 SetPageError(page);
2113 else
2114 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002115 end_page_writeback(page);
2116 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002117 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 return rc;
2119}
2120
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002121static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2122{
2123 int rc = cifs_writepage_locked(page, wbc);
2124 unlock_page(page);
2125 return rc;
2126}
2127
Nick Piggind9414772008-09-24 11:32:59 -04002128static int cifs_write_end(struct file *file, struct address_space *mapping,
2129 loff_t pos, unsigned len, unsigned copied,
2130 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131{
Nick Piggind9414772008-09-24 11:32:59 -04002132 int rc;
2133 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002134 struct cifsFileInfo *cfile = file->private_data;
2135 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2136 __u32 pid;
2137
2138 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2139 pid = cfile->pid;
2140 else
2141 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
Joe Perchesf96637b2013-05-04 22:12:25 -05002143 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002144 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002145
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002146 if (PageChecked(page)) {
2147 if (copied == len)
2148 SetPageUptodate(page);
2149 ClearPageChecked(page);
2150 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002151 SetPageUptodate(page);
2152
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002154 char *page_data;
2155 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002156 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002157
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002158 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 /* this is probably better than directly calling
2160 partialpage_write since in this function the file handle is
2161 known which we might as well leverage */
2162 /* BB check if anything else missing out of ppw
2163 such as updating last write time */
2164 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002165 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002166 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002168
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002169 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002170 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002171 rc = copied;
2172 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002173 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 }
2175
Nick Piggind9414772008-09-24 11:32:59 -04002176 if (rc > 0) {
2177 spin_lock(&inode->i_lock);
2178 if (pos > inode->i_size)
2179 i_size_write(inode, pos);
2180 spin_unlock(&inode->i_lock);
2181 }
2182
2183 unlock_page(page);
2184 page_cache_release(page);
2185
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 return rc;
2187}
2188
Josef Bacik02c24a82011-07-16 20:44:56 -04002189int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2190 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002192 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002194 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002195 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002196 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002197 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002198 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Josef Bacik02c24a82011-07-16 20:44:56 -04002200 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2201 if (rc)
2202 return rc;
2203 mutex_lock(&inode->i_mutex);
2204
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002205 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
Joe Perchesf96637b2013-05-04 22:12:25 -05002207 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2208 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002209
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002210 if (!CIFS_I(inode)->clientCanCacheRead) {
2211 rc = cifs_invalidate_mapping(inode);
2212 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002213 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002214 rc = 0; /* don't care about it in fsync */
2215 }
2216 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002217
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002218 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002219 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2220 server = tcon->ses->server;
2221 if (server->ops->flush)
2222 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2223 else
2224 rc = -ENOSYS;
2225 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002226
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002227 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002228 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002229 return rc;
2230}
2231
Josef Bacik02c24a82011-07-16 20:44:56 -04002232int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002233{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002234 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002235 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002236 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002237 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002238 struct cifsFileInfo *smbfile = file->private_data;
2239 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002240 struct inode *inode = file->f_mapping->host;
2241
2242 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2243 if (rc)
2244 return rc;
2245 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002246
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002247 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002248
Joe Perchesf96637b2013-05-04 22:12:25 -05002249 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2250 file->f_path.dentry->d_name.name, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002251
2252 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002253 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2254 server = tcon->ses->server;
2255 if (server->ops->flush)
2256 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2257 else
2258 rc = -ENOSYS;
2259 }
Steve Frenchb298f222009-02-21 21:17:43 +00002260
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002261 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002262 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 return rc;
2264}
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266/*
2267 * As file closes, flush all cached write data for this inode checking
2268 * for write behind errors.
2269 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002270int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271{
Al Viro496ad9a2013-01-23 17:07:38 -05002272 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 int rc = 0;
2274
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002275 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002276 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002277
Joe Perchesf96637b2013-05-04 22:12:25 -05002278 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
2280 return rc;
2281}
2282
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002283static int
2284cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2285{
2286 int rc = 0;
2287 unsigned long i;
2288
2289 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002290 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002291 if (!pages[i]) {
2292 /*
2293 * save number of pages we have already allocated and
2294 * return with ENOMEM error
2295 */
2296 num_pages = i;
2297 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002298 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002299 }
2300 }
2301
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002302 if (rc) {
2303 for (i = 0; i < num_pages; i++)
2304 put_page(pages[i]);
2305 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002306 return rc;
2307}
2308
2309static inline
2310size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2311{
2312 size_t num_pages;
2313 size_t clen;
2314
2315 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002316 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002317
2318 if (cur_len)
2319 *cur_len = clen;
2320
2321 return num_pages;
2322}
2323
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002324static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002325cifs_uncached_writev_complete(struct work_struct *work)
2326{
2327 int i;
2328 struct cifs_writedata *wdata = container_of(work,
2329 struct cifs_writedata, work);
2330 struct inode *inode = wdata->cfile->dentry->d_inode;
2331 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2332
2333 spin_lock(&inode->i_lock);
2334 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2335 if (cifsi->server_eof > inode->i_size)
2336 i_size_write(inode, cifsi->server_eof);
2337 spin_unlock(&inode->i_lock);
2338
2339 complete(&wdata->done);
2340
2341 if (wdata->result != -EAGAIN) {
2342 for (i = 0; i < wdata->nr_pages; i++)
2343 put_page(wdata->pages[i]);
2344 }
2345
2346 kref_put(&wdata->refcount, cifs_writedata_release);
2347}
2348
2349/* attempt to send write to server, retry on any -EAGAIN errors */
2350static int
2351cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2352{
2353 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002354 struct TCP_Server_Info *server;
2355
2356 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002357
2358 do {
2359 if (wdata->cfile->invalidHandle) {
2360 rc = cifs_reopen_file(wdata->cfile, false);
2361 if (rc != 0)
2362 continue;
2363 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002364 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002365 } while (rc == -EAGAIN);
2366
2367 return rc;
2368}
2369
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002370static ssize_t
2371cifs_iovec_write(struct file *file, const struct iovec *iov,
2372 unsigned long nr_segs, loff_t *poffset)
2373{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002374 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002375 size_t copied, len, cur_len;
2376 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002377 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002378 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002379 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002380 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002381 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002382 struct cifs_writedata *wdata, *tmp;
2383 struct list_head wdata_list;
2384 int rc;
2385 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002386
2387 len = iov_length(iov, nr_segs);
2388 if (!len)
2389 return 0;
2390
2391 rc = generic_write_checks(file, poffset, &len, 0);
2392 if (rc)
2393 return rc;
2394
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002395 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002396 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002397 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002398 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002399
2400 if (!tcon->ses->server->ops->async_writev)
2401 return -ENOSYS;
2402
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002403 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002404
2405 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2406 pid = open_file->pid;
2407 else
2408 pid = current->tgid;
2409
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002410 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002411 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002412 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002413
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002414 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2415 wdata = cifs_writedata_alloc(nr_pages,
2416 cifs_uncached_writev_complete);
2417 if (!wdata) {
2418 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002419 break;
2420 }
2421
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002422 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2423 if (rc) {
2424 kfree(wdata);
2425 break;
2426 }
2427
2428 save_len = cur_len;
2429 for (i = 0; i < nr_pages; i++) {
2430 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2431 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2432 0, copied);
2433 cur_len -= copied;
2434 iov_iter_advance(&it, copied);
2435 }
2436 cur_len = save_len - cur_len;
2437
2438 wdata->sync_mode = WB_SYNC_ALL;
2439 wdata->nr_pages = nr_pages;
2440 wdata->offset = (__u64)offset;
2441 wdata->cfile = cifsFileInfo_get(open_file);
2442 wdata->pid = pid;
2443 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002444 wdata->pagesz = PAGE_SIZE;
2445 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002446 rc = cifs_uncached_retry_writev(wdata);
2447 if (rc) {
2448 kref_put(&wdata->refcount, cifs_writedata_release);
2449 break;
2450 }
2451
2452 list_add_tail(&wdata->list, &wdata_list);
2453 offset += cur_len;
2454 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002455 } while (len > 0);
2456
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002457 /*
2458 * If at least one write was successfully sent, then discard any rc
2459 * value from the later writes. If the other write succeeds, then
2460 * we'll end up returning whatever was written. If it fails, then
2461 * we'll get a new rc value from that.
2462 */
2463 if (!list_empty(&wdata_list))
2464 rc = 0;
2465
2466 /*
2467 * Wait for and collect replies for any successful sends in order of
2468 * increasing offset. Once an error is hit or we get a fatal signal
2469 * while waiting, then return without waiting for any more replies.
2470 */
2471restart_loop:
2472 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2473 if (!rc) {
2474 /* FIXME: freezable too? */
2475 rc = wait_for_completion_killable(&wdata->done);
2476 if (rc)
2477 rc = -EINTR;
2478 else if (wdata->result)
2479 rc = wdata->result;
2480 else
2481 total_written += wdata->bytes;
2482
2483 /* resend call if it's a retryable error */
2484 if (rc == -EAGAIN) {
2485 rc = cifs_uncached_retry_writev(wdata);
2486 goto restart_loop;
2487 }
2488 }
2489 list_del_init(&wdata->list);
2490 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002491 }
2492
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002493 if (total_written > 0)
2494 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002495
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002496 cifs_stats_bytes_written(tcon, total_written);
2497 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002498}
2499
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002500ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002501 unsigned long nr_segs, loff_t pos)
2502{
2503 ssize_t written;
2504 struct inode *inode;
2505
Al Viro496ad9a2013-01-23 17:07:38 -05002506 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002507
2508 /*
2509 * BB - optimize the way when signing is disabled. We can drop this
2510 * extra memory-to-memory copying and use iovec buffers for constructing
2511 * write request.
2512 */
2513
2514 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2515 if (written > 0) {
2516 CIFS_I(inode)->invalid_mapping = true;
2517 iocb->ki_pos = pos;
2518 }
2519
2520 return written;
2521}
2522
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002523static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002524cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2525 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002526{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002527 struct file *file = iocb->ki_filp;
2528 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2529 struct inode *inode = file->f_mapping->host;
2530 struct cifsInodeInfo *cinode = CIFS_I(inode);
2531 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2532 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002533
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002534 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002535
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002536 /*
2537 * We need to hold the sem to be sure nobody modifies lock list
2538 * with a brlock that prevents writing.
2539 */
2540 down_read(&cinode->lock_sem);
2541 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2542 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002543 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002544 mutex_lock(&inode->i_mutex);
2545 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002546 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002547 mutex_unlock(&inode->i_mutex);
2548 }
2549
2550 if (rc > 0 || rc == -EIOCBQUEUED) {
2551 ssize_t err;
2552
2553 err = generic_write_sync(file, pos, rc);
2554 if (err < 0 && rc > 0)
2555 rc = err;
2556 }
2557
2558 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002559 return rc;
2560}
2561
2562ssize_t
2563cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2564 unsigned long nr_segs, loff_t pos)
2565{
Al Viro496ad9a2013-01-23 17:07:38 -05002566 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002567 struct cifsInodeInfo *cinode = CIFS_I(inode);
2568 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2569 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2570 iocb->ki_filp->private_data;
2571 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002572 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002573
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002574 if (cinode->clientCanCacheAll) {
2575 if (cap_unix(tcon->ses) &&
2576 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2577 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2578 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2579 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002580 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002581 /*
2582 * For non-oplocked files in strict cache mode we need to write the data
2583 * to the server exactly from the pos to pos+len-1 rather than flush all
2584 * affected pages because it may cause a error with mandatory locks on
2585 * these pages but not on the region from pos to ppos+len-1.
2586 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002587 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2588 if (written > 0 && cinode->clientCanCacheRead) {
2589 /*
2590 * Windows 7 server can delay breaking level2 oplock if a write
2591 * request comes - break it on the client to prevent reading
2592 * an old data.
2593 */
2594 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002595 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2596 inode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002597 cinode->clientCanCacheRead = false;
2598 }
2599 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002600}
2601
Jeff Layton0471ca32012-05-16 07:13:16 -04002602static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002603cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002604{
2605 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002606
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002607 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2608 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002609 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002610 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002611 INIT_LIST_HEAD(&rdata->list);
2612 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002613 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002614 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002615
Jeff Layton0471ca32012-05-16 07:13:16 -04002616 return rdata;
2617}
2618
Jeff Layton6993f742012-05-16 07:13:17 -04002619void
2620cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002621{
Jeff Layton6993f742012-05-16 07:13:17 -04002622 struct cifs_readdata *rdata = container_of(refcount,
2623 struct cifs_readdata, refcount);
2624
2625 if (rdata->cfile)
2626 cifsFileInfo_put(rdata->cfile);
2627
Jeff Layton0471ca32012-05-16 07:13:16 -04002628 kfree(rdata);
2629}
2630
Jeff Layton2a1bb132012-05-16 07:13:17 -04002631static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002632cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002633{
2634 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002635 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002636 unsigned int i;
2637
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002638 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002639 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2640 if (!page) {
2641 rc = -ENOMEM;
2642 break;
2643 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002644 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002645 }
2646
2647 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002648 for (i = 0; i < nr_pages; i++) {
2649 put_page(rdata->pages[i]);
2650 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002651 }
2652 }
2653 return rc;
2654}
2655
2656static void
2657cifs_uncached_readdata_release(struct kref *refcount)
2658{
Jeff Layton1c892542012-05-16 07:13:17 -04002659 struct cifs_readdata *rdata = container_of(refcount,
2660 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002661 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002662
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002663 for (i = 0; i < rdata->nr_pages; i++) {
2664 put_page(rdata->pages[i]);
2665 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002666 }
2667 cifs_readdata_release(refcount);
2668}
2669
2670static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002671cifs_retry_async_readv(struct cifs_readdata *rdata)
2672{
2673 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002674 struct TCP_Server_Info *server;
2675
2676 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002677
2678 do {
2679 if (rdata->cfile->invalidHandle) {
2680 rc = cifs_reopen_file(rdata->cfile, true);
2681 if (rc != 0)
2682 continue;
2683 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002684 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002685 } while (rc == -EAGAIN);
2686
2687 return rc;
2688}
2689
Jeff Layton1c892542012-05-16 07:13:17 -04002690/**
2691 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2692 * @rdata: the readdata response with list of pages holding data
2693 * @iov: vector in which we should copy the data
2694 * @nr_segs: number of segments in vector
2695 * @offset: offset into file of the first iovec
2696 * @copied: used to return the amount of data copied to the iov
2697 *
2698 * This function copies data from a list of pages in a readdata response into
2699 * an array of iovecs. It will first calculate where the data should go
2700 * based on the info in the readdata and then copy the data into that spot.
2701 */
2702static ssize_t
2703cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2704 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2705{
2706 int rc = 0;
2707 struct iov_iter ii;
2708 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002709 ssize_t remaining = rdata->bytes;
2710 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002711 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002712
2713 /* set up iov_iter and advance to the correct offset */
2714 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2715 iov_iter_advance(&ii, pos);
2716
2717 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002718 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002719 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002720 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002721
2722 /* copy a whole page or whatever's left */
2723 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2724
2725 /* ...but limit it to whatever space is left in the iov */
2726 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2727
2728 /* go while there's data to be copied and no errors */
2729 if (copy && !rc) {
2730 pdata = kmap(page);
2731 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2732 (int)copy);
2733 kunmap(page);
2734 if (!rc) {
2735 *copied += copy;
2736 remaining -= copy;
2737 iov_iter_advance(&ii, copy);
2738 }
2739 }
Jeff Layton1c892542012-05-16 07:13:17 -04002740 }
2741
2742 return rc;
2743}
2744
2745static void
2746cifs_uncached_readv_complete(struct work_struct *work)
2747{
2748 struct cifs_readdata *rdata = container_of(work,
2749 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002750
2751 complete(&rdata->done);
2752 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2753}
2754
2755static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002756cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2757 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002758{
Jeff Layton8321fec2012-09-19 06:22:32 -07002759 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002760 unsigned int i;
2761 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002762 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002763
Jeff Layton8321fec2012-09-19 06:22:32 -07002764 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002765 for (i = 0; i < nr_pages; i++) {
2766 struct page *page = rdata->pages[i];
2767
Jeff Layton8321fec2012-09-19 06:22:32 -07002768 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002769 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002770 iov.iov_base = kmap(page);
2771 iov.iov_len = PAGE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05002772 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2773 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002774 len -= PAGE_SIZE;
2775 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002776 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002777 iov.iov_base = kmap(page);
2778 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05002779 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2780 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002781 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2782 rdata->tailsz = len;
2783 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002784 } else {
2785 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002786 rdata->pages[i] = NULL;
2787 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002788 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002789 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002790 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002791
2792 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2793 kunmap(page);
2794 if (result < 0)
2795 break;
2796
2797 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002798 }
2799
Jeff Layton8321fec2012-09-19 06:22:32 -07002800 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002801}
2802
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002803static ssize_t
2804cifs_iovec_read(struct file *file, const struct iovec *iov,
2805 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806{
Jeff Layton1c892542012-05-16 07:13:17 -04002807 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002808 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002809 ssize_t total_read = 0;
2810 loff_t offset = *poffset;
2811 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002813 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002815 struct cifs_readdata *rdata, *tmp;
2816 struct list_head rdata_list;
2817 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002818
2819 if (!nr_segs)
2820 return 0;
2821
2822 len = iov_length(iov, nr_segs);
2823 if (!len)
2824 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
Jeff Layton1c892542012-05-16 07:13:17 -04002826 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002827 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002828 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002829 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002831 if (!tcon->ses->server->ops->async_readv)
2832 return -ENOSYS;
2833
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002834 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2835 pid = open_file->pid;
2836 else
2837 pid = current->tgid;
2838
Steve Frenchad7a2922008-02-07 23:25:02 +00002839 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05002840 cifs_dbg(FYI, "attempting read on write only file instance\n");
Steve Frenchad7a2922008-02-07 23:25:02 +00002841
Jeff Layton1c892542012-05-16 07:13:17 -04002842 do {
2843 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2844 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002845
Jeff Layton1c892542012-05-16 07:13:17 -04002846 /* allocate a readdata struct */
2847 rdata = cifs_readdata_alloc(npages,
2848 cifs_uncached_readv_complete);
2849 if (!rdata) {
2850 rc = -ENOMEM;
2851 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002853
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002854 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002855 if (rc)
2856 goto error;
2857
2858 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002859 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002860 rdata->offset = offset;
2861 rdata->bytes = cur_len;
2862 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002863 rdata->pagesz = PAGE_SIZE;
2864 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002865
2866 rc = cifs_retry_async_readv(rdata);
2867error:
2868 if (rc) {
2869 kref_put(&rdata->refcount,
2870 cifs_uncached_readdata_release);
2871 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 }
Jeff Layton1c892542012-05-16 07:13:17 -04002873
2874 list_add_tail(&rdata->list, &rdata_list);
2875 offset += cur_len;
2876 len -= cur_len;
2877 } while (len > 0);
2878
2879 /* if at least one read request send succeeded, then reset rc */
2880 if (!list_empty(&rdata_list))
2881 rc = 0;
2882
2883 /* the loop below should proceed in the order of increasing offsets */
2884restart_loop:
2885 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2886 if (!rc) {
2887 ssize_t copied;
2888
2889 /* FIXME: freezable sleep too? */
2890 rc = wait_for_completion_killable(&rdata->done);
2891 if (rc)
2892 rc = -EINTR;
2893 else if (rdata->result)
2894 rc = rdata->result;
2895 else {
2896 rc = cifs_readdata_to_iov(rdata, iov,
2897 nr_segs, *poffset,
2898 &copied);
2899 total_read += copied;
2900 }
2901
2902 /* resend call if it's a retryable error */
2903 if (rc == -EAGAIN) {
2904 rc = cifs_retry_async_readv(rdata);
2905 goto restart_loop;
2906 }
2907 }
2908 list_del_init(&rdata->list);
2909 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002911
Jeff Layton1c892542012-05-16 07:13:17 -04002912 cifs_stats_bytes_read(tcon, total_read);
2913 *poffset += total_read;
2914
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002915 /* mask nodata case */
2916 if (rc == -ENODATA)
2917 rc = 0;
2918
Jeff Layton1c892542012-05-16 07:13:17 -04002919 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920}
2921
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002922ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002923 unsigned long nr_segs, loff_t pos)
2924{
2925 ssize_t read;
2926
2927 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2928 if (read > 0)
2929 iocb->ki_pos = pos;
2930
2931 return read;
2932}
2933
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002934ssize_t
2935cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2936 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002937{
Al Viro496ad9a2013-01-23 17:07:38 -05002938 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002939 struct cifsInodeInfo *cinode = CIFS_I(inode);
2940 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2941 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2942 iocb->ki_filp->private_data;
2943 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2944 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002945
2946 /*
2947 * In strict cache mode we need to read from the server all the time
2948 * if we don't have level II oplock because the server can delay mtime
2949 * change - so we can't make a decision about inode invalidating.
2950 * And we can also fail with pagereading if there are mandatory locks
2951 * on pages affected by this read but not on the region from pos to
2952 * pos+len-1.
2953 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002954 if (!cinode->clientCanCacheRead)
2955 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002956
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002957 if (cap_unix(tcon->ses) &&
2958 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2959 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2960 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2961
2962 /*
2963 * We need to hold the sem to be sure nobody modifies lock list
2964 * with a brlock that prevents reading.
2965 */
2966 down_read(&cinode->lock_sem);
2967 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2968 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002969 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002970 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2971 up_read(&cinode->lock_sem);
2972 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002973}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002975static ssize_t
2976cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002977{
2978 int rc = -EACCES;
2979 unsigned int bytes_read = 0;
2980 unsigned int total_read;
2981 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002982 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002984 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002985 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002986 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002987 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002989 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002990 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002991 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002993 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002994 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002996 /* FIXME: set up handlers for larger reads and/or convert to async */
2997 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2998
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303000 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003001 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303002 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003004 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003005 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003006 server = tcon->ses->server;
3007
3008 if (!server->ops->sync_read) {
3009 free_xid(xid);
3010 return -ENOSYS;
3011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003013 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3014 pid = open_file->pid;
3015 else
3016 pid = current->tgid;
3017
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003019 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003021 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3022 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003023 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003024 /*
3025 * For windows me and 9x we do not want to request more than it
3026 * negotiated since it will refuse the read then.
3027 */
3028 if ((tcon->ses) && !(tcon->ses->capabilities &
3029 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003030 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003031 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003032 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033 rc = -EAGAIN;
3034 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003035 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003036 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 if (rc != 0)
3038 break;
3039 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003040 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003041 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003042 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003043 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003044 rc = server->ops->sync_read(xid, open_file, &io_parms,
3045 &bytes_read, &cur_offset,
3046 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 }
3048 if (rc || (bytes_read == 0)) {
3049 if (total_read) {
3050 break;
3051 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003052 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 return rc;
3054 }
3055 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003056 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003057 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 }
3059 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003060 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003061 return total_read;
3062}
3063
Jeff Laytonca83ce32011-04-12 09:13:44 -04003064/*
3065 * If the page is mmap'ed into a process' page tables, then we need to make
3066 * sure that it doesn't change while being written back.
3067 */
3068static int
3069cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3070{
3071 struct page *page = vmf->page;
3072
3073 lock_page(page);
3074 return VM_FAULT_LOCKED;
3075}
3076
3077static struct vm_operations_struct cifs_file_vm_ops = {
3078 .fault = filemap_fault,
3079 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003080 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003081};
3082
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003083int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3084{
3085 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003086 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003087
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003088 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003089
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003090 if (!CIFS_I(inode)->clientCanCacheRead) {
3091 rc = cifs_invalidate_mapping(inode);
3092 if (rc)
3093 return rc;
3094 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003095
3096 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003097 if (rc == 0)
3098 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003099 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003100 return rc;
3101}
3102
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3104{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 int rc, xid;
3106
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003107 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003108 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003110 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3111 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003112 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113 return rc;
3114 }
3115 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003116 if (rc == 0)
3117 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003118 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 return rc;
3120}
3121
Jeff Layton0471ca32012-05-16 07:13:16 -04003122static void
3123cifs_readv_complete(struct work_struct *work)
3124{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003125 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003126 struct cifs_readdata *rdata = container_of(work,
3127 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003128
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003129 for (i = 0; i < rdata->nr_pages; i++) {
3130 struct page *page = rdata->pages[i];
3131
Jeff Layton0471ca32012-05-16 07:13:16 -04003132 lru_cache_add_file(page);
3133
3134 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003135 flush_dcache_page(page);
3136 SetPageUptodate(page);
3137 }
3138
3139 unlock_page(page);
3140
3141 if (rdata->result == 0)
3142 cifs_readpage_to_fscache(rdata->mapping->host, page);
3143
3144 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003145 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003146 }
Jeff Layton6993f742012-05-16 07:13:17 -04003147 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003148}
3149
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003150static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003151cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3152 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003153{
Jeff Layton8321fec2012-09-19 06:22:32 -07003154 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003155 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003156 u64 eof;
3157 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003158 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003159 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003160
3161 /* determine the eof that the server (probably) has */
3162 eof = CIFS_I(rdata->mapping->host)->server_eof;
3163 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003164 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003165
Jeff Layton8321fec2012-09-19 06:22:32 -07003166 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003167 for (i = 0; i < nr_pages; i++) {
3168 struct page *page = rdata->pages[i];
3169
Jeff Layton8321fec2012-09-19 06:22:32 -07003170 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003171 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003172 iov.iov_base = kmap(page);
3173 iov.iov_len = PAGE_CACHE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05003174 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3175 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003176 len -= PAGE_CACHE_SIZE;
3177 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003178 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003179 iov.iov_base = kmap(page);
3180 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05003181 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3182 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003183 memset(iov.iov_base + len,
3184 '\0', PAGE_CACHE_SIZE - len);
3185 rdata->tailsz = len;
3186 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003187 } else if (page->index > eof_index) {
3188 /*
3189 * The VFS will not try to do readahead past the
3190 * i_size, but it's possible that we have outstanding
3191 * writes with gaps in the middle and the i_size hasn't
3192 * caught up yet. Populate those with zeroed out pages
3193 * to prevent the VFS from repeatedly attempting to
3194 * fill them until the writes are flushed.
3195 */
3196 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003197 lru_cache_add_file(page);
3198 flush_dcache_page(page);
3199 SetPageUptodate(page);
3200 unlock_page(page);
3201 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003202 rdata->pages[i] = NULL;
3203 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003204 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003205 } else {
3206 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003207 lru_cache_add_file(page);
3208 unlock_page(page);
3209 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003210 rdata->pages[i] = NULL;
3211 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003212 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003213 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003214
3215 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3216 kunmap(page);
3217 if (result < 0)
3218 break;
3219
3220 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003221 }
3222
Jeff Layton8321fec2012-09-19 06:22:32 -07003223 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003224}
3225
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226static int cifs_readpages(struct file *file, struct address_space *mapping,
3227 struct list_head *page_list, unsigned num_pages)
3228{
Jeff Layton690c5e32011-10-19 15:30:16 -04003229 int rc;
3230 struct list_head tmplist;
3231 struct cifsFileInfo *open_file = file->private_data;
3232 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3233 unsigned int rsize = cifs_sb->rsize;
3234 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235
Jeff Layton690c5e32011-10-19 15:30:16 -04003236 /*
3237 * Give up immediately if rsize is too small to read an entire page.
3238 * The VFS will fall back to readpage. We should never reach this
3239 * point however since we set ra_pages to 0 when the rsize is smaller
3240 * than a cache page.
3241 */
3242 if (unlikely(rsize < PAGE_CACHE_SIZE))
3243 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003244
Suresh Jayaraman56698232010-07-05 18:13:25 +05303245 /*
3246 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3247 * immediately if the cookie is negative
3248 */
3249 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3250 &num_pages);
3251 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003252 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303253
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003254 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3255 pid = open_file->pid;
3256 else
3257 pid = current->tgid;
3258
Jeff Layton690c5e32011-10-19 15:30:16 -04003259 rc = 0;
3260 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261
Joe Perchesf96637b2013-05-04 22:12:25 -05003262 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3263 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003264
3265 /*
3266 * Start with the page at end of list and move it to private
3267 * list. Do the same with any following pages until we hit
3268 * the rsize limit, hit an index discontinuity, or run out of
3269 * pages. Issue the async read and then start the loop again
3270 * until the list is empty.
3271 *
3272 * Note that list order is important. The page_list is in
3273 * the order of declining indexes. When we put the pages in
3274 * the rdata->pages, then we want them in increasing order.
3275 */
3276 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003277 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003278 unsigned int bytes = PAGE_CACHE_SIZE;
3279 unsigned int expected_index;
3280 unsigned int nr_pages = 1;
3281 loff_t offset;
3282 struct page *page, *tpage;
3283 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284
3285 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286
Jeff Layton690c5e32011-10-19 15:30:16 -04003287 /*
3288 * Lock the page and put it in the cache. Since no one else
3289 * should have access to this page, we're safe to simply set
3290 * PG_locked without checking it first.
3291 */
3292 __set_page_locked(page);
3293 rc = add_to_page_cache_locked(page, mapping,
3294 page->index, GFP_KERNEL);
3295
3296 /* give up if we can't stick it in the cache */
3297 if (rc) {
3298 __clear_page_locked(page);
3299 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301
Jeff Layton690c5e32011-10-19 15:30:16 -04003302 /* move first page to the tmplist */
3303 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3304 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305
Jeff Layton690c5e32011-10-19 15:30:16 -04003306 /* now try and add more pages onto the request */
3307 expected_index = page->index + 1;
3308 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3309 /* discontinuity ? */
3310 if (page->index != expected_index)
3311 break;
3312
3313 /* would this page push the read over the rsize? */
3314 if (bytes + PAGE_CACHE_SIZE > rsize)
3315 break;
3316
3317 __set_page_locked(page);
3318 if (add_to_page_cache_locked(page, mapping,
3319 page->index, GFP_KERNEL)) {
3320 __clear_page_locked(page);
3321 break;
3322 }
3323 list_move_tail(&page->lru, &tmplist);
3324 bytes += PAGE_CACHE_SIZE;
3325 expected_index++;
3326 nr_pages++;
3327 }
3328
Jeff Layton0471ca32012-05-16 07:13:16 -04003329 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003330 if (!rdata) {
3331 /* best to give up if we're out of mem */
3332 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3333 list_del(&page->lru);
3334 lru_cache_add_file(page);
3335 unlock_page(page);
3336 page_cache_release(page);
3337 }
3338 rc = -ENOMEM;
3339 break;
3340 }
3341
Jeff Layton6993f742012-05-16 07:13:17 -04003342 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003343 rdata->mapping = mapping;
3344 rdata->offset = offset;
3345 rdata->bytes = bytes;
3346 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003347 rdata->pagesz = PAGE_CACHE_SIZE;
3348 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003349
3350 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3351 list_del(&page->lru);
3352 rdata->pages[rdata->nr_pages++] = page;
3353 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003354
Jeff Layton2a1bb132012-05-16 07:13:17 -04003355 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003356 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003357 for (i = 0; i < rdata->nr_pages; i++) {
3358 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003359 lru_cache_add_file(page);
3360 unlock_page(page);
3361 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003362 }
Jeff Layton6993f742012-05-16 07:13:17 -04003363 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 break;
3365 }
Jeff Layton6993f742012-05-16 07:13:17 -04003366
3367 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 }
3369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 return rc;
3371}
3372
3373static int cifs_readpage_worker(struct file *file, struct page *page,
3374 loff_t *poffset)
3375{
3376 char *read_data;
3377 int rc;
3378
Suresh Jayaraman56698232010-07-05 18:13:25 +05303379 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003380 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303381 if (rc == 0)
3382 goto read_complete;
3383
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 page_cache_get(page);
3385 read_data = kmap(page);
3386 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003387
Linus Torvalds1da177e2005-04-16 15:20:36 -07003388 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003389
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 if (rc < 0)
3391 goto io_error;
3392 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003393 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003394
Al Viro496ad9a2013-01-23 17:07:38 -05003395 file_inode(file)->i_atime =
3396 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003397
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398 if (PAGE_CACHE_SIZE > rc)
3399 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3400
3401 flush_dcache_page(page);
3402 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303403
3404 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003405 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003408
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003410 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303412
3413read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 return rc;
3415}
3416
3417static int cifs_readpage(struct file *file, struct page *page)
3418{
3419 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3420 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003421 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003423 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424
3425 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303426 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003427 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303428 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429 }
3430
Joe Perchesf96637b2013-05-04 22:12:25 -05003431 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003432 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433
3434 rc = cifs_readpage_worker(file, page, &offset);
3435
3436 unlock_page(page);
3437
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003438 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 return rc;
3440}
3441
Steve Frencha403a0a2007-07-26 15:54:16 +00003442static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3443{
3444 struct cifsFileInfo *open_file;
3445
Jeff Layton44772882010-10-15 15:34:03 -04003446 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003447 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003448 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003449 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003450 return 1;
3451 }
3452 }
Jeff Layton44772882010-10-15 15:34:03 -04003453 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003454 return 0;
3455}
3456
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457/* We do not want to update the file size from server for inodes
3458 open for write - to avoid races with writepage extending
3459 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003460 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 but this is tricky to do without racing with writebehind
3462 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003463bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464{
Steve Frencha403a0a2007-07-26 15:54:16 +00003465 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003466 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003467
Steve Frencha403a0a2007-07-26 15:54:16 +00003468 if (is_inode_writable(cifsInode)) {
3469 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003470 struct cifs_sb_info *cifs_sb;
3471
Steve Frenchc32a0b62006-01-12 14:41:28 -08003472 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003473 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003474 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003475 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003476 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003477 }
3478
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003479 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003480 return true;
Steve French7ba52632007-02-08 18:14:13 +00003481
Steve French4b18f2a2008-04-29 00:06:05 +00003482 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003483 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003484 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485}
3486
Nick Piggind9414772008-09-24 11:32:59 -04003487static int cifs_write_begin(struct file *file, struct address_space *mapping,
3488 loff_t pos, unsigned len, unsigned flags,
3489 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490{
Nick Piggind9414772008-09-24 11:32:59 -04003491 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3492 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003493 loff_t page_start = pos & PAGE_MASK;
3494 loff_t i_size;
3495 struct page *page;
3496 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497
Joe Perchesf96637b2013-05-04 22:12:25 -05003498 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003499
Nick Piggin54566b22009-01-04 12:00:53 -08003500 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003501 if (!page) {
3502 rc = -ENOMEM;
3503 goto out;
3504 }
Nick Piggind9414772008-09-24 11:32:59 -04003505
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003506 if (PageUptodate(page))
3507 goto out;
Steve French8a236262007-03-06 00:31:00 +00003508
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003509 /*
3510 * If we write a full page it will be up to date, no need to read from
3511 * the server. If the write is short, we'll end up doing a sync write
3512 * instead.
3513 */
3514 if (len == PAGE_CACHE_SIZE)
3515 goto out;
3516
3517 /*
3518 * optimize away the read when we have an oplock, and we're not
3519 * expecting to use any of the data we'd be reading in. That
3520 * is, when the page lies beyond the EOF, or straddles the EOF
3521 * and the write will cover all of the existing data.
3522 */
3523 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3524 i_size = i_size_read(mapping->host);
3525 if (page_start >= i_size ||
3526 (offset == 0 && (pos + len) >= i_size)) {
3527 zero_user_segments(page, 0, offset,
3528 offset + len,
3529 PAGE_CACHE_SIZE);
3530 /*
3531 * PageChecked means that the parts of the page
3532 * to which we're not writing are considered up
3533 * to date. Once the data is copied to the
3534 * page, it can be set uptodate.
3535 */
3536 SetPageChecked(page);
3537 goto out;
3538 }
3539 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540
Nick Piggind9414772008-09-24 11:32:59 -04003541 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003542 /*
3543 * might as well read a page, it is fast enough. If we get
3544 * an error, we don't need to return it. cifs_write_end will
3545 * do a sync write instead since PG_uptodate isn't set.
3546 */
3547 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003548 } else {
3549 /* we could try using another file handle if there is one -
3550 but how would we lock it to prevent close of that handle
3551 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003552 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003553 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003554out:
3555 *pagep = page;
3556 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003557}
3558
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303559static int cifs_release_page(struct page *page, gfp_t gfp)
3560{
3561 if (PagePrivate(page))
3562 return 0;
3563
3564 return cifs_fscache_release_page(page, gfp);
3565}
3566
Lukas Czernerd47992f2013-05-21 23:17:23 -04003567static void cifs_invalidate_page(struct page *page, unsigned int offset,
3568 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303569{
3570 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3571
Lukas Czernerd47992f2013-05-21 23:17:23 -04003572 if (offset == 0 && length == PAGE_CACHE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303573 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3574}
3575
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003576static int cifs_launder_page(struct page *page)
3577{
3578 int rc = 0;
3579 loff_t range_start = page_offset(page);
3580 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3581 struct writeback_control wbc = {
3582 .sync_mode = WB_SYNC_ALL,
3583 .nr_to_write = 0,
3584 .range_start = range_start,
3585 .range_end = range_end,
3586 };
3587
Joe Perchesf96637b2013-05-04 22:12:25 -05003588 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003589
3590 if (clear_page_dirty_for_io(page))
3591 rc = cifs_writepage_locked(page, &wbc);
3592
3593 cifs_fscache_invalidate_page(page, page->mapping->host);
3594 return rc;
3595}
3596
Tejun Heo9b646972010-07-20 22:09:02 +02003597void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003598{
3599 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3600 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003601 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003602 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003603 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003604 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003605
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003606 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3607 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003608 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3609 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003610 cinode->clientCanCacheRead = false;
3611 }
3612
Jeff Layton3bc303c2009-09-21 06:47:50 -04003613 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003614 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003615 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003616 else
Al Viro8737c932009-12-24 06:47:55 -05003617 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003618 rc = filemap_fdatawrite(inode->i_mapping);
3619 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003620 rc = filemap_fdatawait(inode->i_mapping);
3621 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003622 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003623 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003624 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003625 }
3626
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003627 rc = cifs_push_locks(cfile);
3628 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003629 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003630
Jeff Layton3bc303c2009-09-21 06:47:50 -04003631 /*
3632 * releasing stale oplock after recent reconnect of smb session using
3633 * a now incorrect file handle is not a data integrity issue but do
3634 * not bother sending an oplock release if session to server still is
3635 * disconnected since oplock already released by the server
3636 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003637 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003638 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3639 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003640 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003641 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003642}
3643
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003644const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645 .readpage = cifs_readpage,
3646 .readpages = cifs_readpages,
3647 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003648 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003649 .write_begin = cifs_write_begin,
3650 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303652 .releasepage = cifs_release_page,
3653 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003654 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003656
3657/*
3658 * cifs_readpages requires the server to support a buffer large enough to
3659 * contain the header plus one complete page of data. Otherwise, we need
3660 * to leave cifs_readpages out of the address space operations.
3661 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003662const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003663 .readpage = cifs_readpage,
3664 .writepage = cifs_writepage,
3665 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003666 .write_begin = cifs_write_begin,
3667 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003668 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303669 .releasepage = cifs_release_page,
3670 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003671 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003672};