blob: 1dc9dea2ae70d0506f3112bd13627a2fa0da6028 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags &
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400186 struct cifs_open_parms oparms;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300187
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700188 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700189 return -ENOSYS;
190
191 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300192
193/*********************************************************************
194 * open flag mapping table:
195 *
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
203 *
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
209 *?
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
216
217 disposition = cifs_get_disposition(f_flags);
218
219 /* BB pass O_SYNC flag through on file attributes .. BB */
220
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
222 if (!buf)
223 return -ENOMEM;
224
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
227
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400228 oparms.tcon = tcon;
229 oparms.cifs_sb = cifs_sb;
230 oparms.desired_access = desired_access;
231 oparms.create_options = create_options;
232 oparms.disposition = disposition;
233 oparms.path = full_path;
234 oparms.fid = fid;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400235 oparms.reconnect = false;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400236
237 rc = server->ops->open(xid, &oparms, oplock, buf);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300238
239 if (rc)
240 goto out;
241
242 if (tcon->unix_ext)
243 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
244 xid);
245 else
246 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700247 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300248
249out:
250 kfree(buf);
251 return rc;
252}
253
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400254static bool
255cifs_has_mand_locks(struct cifsInodeInfo *cinode)
256{
257 struct cifs_fid_locks *cur;
258 bool has_locks = false;
259
260 down_read(&cinode->lock_sem);
261 list_for_each_entry(cur, &cinode->llist, llist) {
262 if (!list_empty(&cur->locks)) {
263 has_locks = true;
264 break;
265 }
266 }
267 up_read(&cinode->lock_sem);
268 return has_locks;
269}
270
Jeff Layton15ecb432010-10-15 15:34:02 -0400271struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700272cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400273 struct tcon_link *tlink, __u32 oplock)
274{
275 struct dentry *dentry = file->f_path.dentry;
276 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700277 struct cifsInodeInfo *cinode = CIFS_I(inode);
278 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700279 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700280 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400281 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400282
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700283 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284 if (cfile == NULL)
285 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400286
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700287 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
288 if (!fdlocks) {
289 kfree(cfile);
290 return NULL;
291 }
292
293 INIT_LIST_HEAD(&fdlocks->locks);
294 fdlocks->cfile = cfile;
295 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700296 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700297 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700298 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700299
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700300 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700301 cfile->pid = current->tgid;
302 cfile->uid = current_fsuid();
303 cfile->dentry = dget(dentry);
304 cfile->f_flags = file->f_flags;
305 cfile->invalidHandle = false;
306 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700307 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700308 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400309
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100310 cifs_sb_active(inode->i_sb);
311
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400312 /*
313 * If the server returned a read oplock and we have mandatory brlocks,
314 * set oplock level to None.
315 */
316 if (oplock == server->vals->oplock_read &&
317 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500318 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 oplock = 0;
320 }
321
Jeff Layton44772882010-10-15 15:34:03 -0400322 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400323 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700324 oplock = fid->pending_open->oplock;
325 list_del(&fid->pending_open->olist);
326
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400327 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700328
329 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400330 /* if readable file instance put first in list*/
331 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700332 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400333 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700334 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400335 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400336
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700337 file->private_data = cfile;
338 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400339}
340
Jeff Layton764a1b12012-07-25 14:59:54 -0400341struct cifsFileInfo *
342cifsFileInfo_get(struct cifsFileInfo *cifs_file)
343{
344 spin_lock(&cifs_file_list_lock);
345 cifsFileInfo_get_locked(cifs_file);
346 spin_unlock(&cifs_file_list_lock);
347 return cifs_file;
348}
349
Steve Frenchcdff08e2010-10-21 22:46:14 +0000350/*
351 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400352 * the filehandle out on the server. Must be called without holding
353 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000354 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400355void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
356{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300357 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000358 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700359 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300360 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100361 struct super_block *sb = inode->i_sb;
362 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700364 struct cifs_fid fid;
365 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000366
367 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400368 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000369 spin_unlock(&cifs_file_list_lock);
370 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400371 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000372
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700373 if (server->ops->get_lease_key)
374 server->ops->get_lease_key(inode, &fid);
375
376 /* store open in pending opens to make sure we don't miss lease break */
377 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
378
Steve Frenchcdff08e2010-10-21 22:46:14 +0000379 /* remove it from the lists */
380 list_del(&cifs_file->flist);
381 list_del(&cifs_file->tlist);
382
383 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500384 cifs_dbg(FYI, "closing last open instance for inode %p\n",
385 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700386 /*
387 * In strict cache mode we need invalidate mapping on the last
388 * close because it may cause a error when we open this file
389 * again and get at least level II oplock.
390 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300391 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
392 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300393 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000394 }
395 spin_unlock(&cifs_file_list_lock);
396
Jeff Laytonad635942011-07-26 12:20:17 -0400397 cancel_work_sync(&cifs_file->oplock_break);
398
Steve Frenchcdff08e2010-10-21 22:46:14 +0000399 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700400 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400401 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700402
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400403 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700404 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400405 server->ops->close(xid, tcon, &cifs_file->fid);
406 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000407 }
408
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700409 cifs_del_pending_open(&open);
410
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700411 /*
412 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000413 * is closed anyway.
414 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700415 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700416 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000417 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400418 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000419 kfree(li);
420 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700421 list_del(&cifs_file->llist->llist);
422 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700423 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000424
425 cifs_put_tlink(cifs_file->tlink);
426 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100427 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000428 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433{
434 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400435 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400436 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700438 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000439 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400440 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700441 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300443 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700444 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700445 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400447 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400450 tlink = cifs_sb_tlink(cifs_sb);
451 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400452 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400453 return PTR_ERR(tlink);
454 }
455 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700456 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800458 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530460 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400461 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 }
463
Joe Perchesf96637b2013-05-04 22:12:25 -0500464 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000465 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000466
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700467 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000468 oplock = REQ_OPLOCK;
469 else
470 oplock = 0;
471
Steve French64cc2c62009-03-04 19:54:08 +0000472 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400473 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
474 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000475 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400476 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000477 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700478 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000479 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500480 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300481 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000482 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
483 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500484 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
485 tcon->ses->serverName,
486 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000487 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000488 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
489 (rc != -EOPNOTSUPP)) /* path not found or net err */
490 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700491 /*
492 * Else fallthrough to retry open the old way on network i/o
493 * or DFS errors.
494 */
Steve French276a74a2009-03-03 18:00:34 +0000495 }
496
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700497 if (server->ops->get_lease_key)
498 server->ops->get_lease_key(inode, &fid);
499
500 cifs_add_pending_open(&fid, tlink, &open);
501
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700503 if (server->ops->get_lease_key)
504 server->ops->get_lease_key(inode, &fid);
505
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300506 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700507 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700508 if (rc) {
509 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300510 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700511 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300512 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400513
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700514 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
515 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700516 if (server->ops->close)
517 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700518 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 rc = -ENOMEM;
520 goto out;
521 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530523 cifs_fscache_set_inode_cookie(inode, file);
524
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300525 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700526 /*
527 * Time to set mode which we can not set earlier due to
528 * problems creating new read-only files.
529 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300530 struct cifs_unix_set_info_args args = {
531 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800532 .uid = INVALID_UID, /* no change */
533 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300534 .ctime = NO_CHANGE_64,
535 .atime = NO_CHANGE_64,
536 .mtime = NO_CHANGE_64,
537 .device = 0,
538 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700539 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
540 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 }
542
543out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400545 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400546 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 return rc;
548}
549
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400550static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
551
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700552/*
553 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400554 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700555 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400556static int
557cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400559 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
560 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
561 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 int rc = 0;
563
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400564 /* we are going to update can_cache_brlcks here - need a write access */
565 down_write(&cinode->lock_sem);
566 if (cinode->can_cache_brlcks) {
567 /* can cache locks - no need to push them */
568 up_write(&cinode->lock_sem);
569 return rc;
570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400572 if (cap_unix(tcon->ses) &&
573 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
574 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
575 rc = cifs_push_posix_locks(cfile);
576 else
577 rc = tcon->ses->server->ops->push_mand_locks(cfile);
578
579 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 return rc;
581}
582
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583static int
584cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585{
586 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400587 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400588 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000590 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700591 struct TCP_Server_Info *server;
592 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000593 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700595 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500597 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400598 struct cifs_open_parms oparms;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400600 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700601 mutex_lock(&cfile->fh_mutex);
602 if (!cfile->invalidHandle) {
603 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530604 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400605 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530606 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
608
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700609 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700611 tcon = tlink_tcon(cfile->tlink);
612 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000613
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700614 /*
615 * Can not grab rename sem here because various ops, including those
616 * that already have the rename sem can end up causing writepage to get
617 * called and if the server was down that means we end up here, and we
618 * can never tell if the caller already has the rename_sem.
619 */
620 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000622 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700623 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400624 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000625 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 }
627
Joe Perchesf96637b2013-05-04 22:12:25 -0500628 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
629 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300631 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 oplock = REQ_OPLOCK;
633 else
Steve French4b18f2a2008-04-29 00:06:05 +0000634 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400636 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000637 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400638 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400639 /*
640 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
641 * original open. Must mask them off for a reopen.
642 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700643 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400644 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400645
Jeff Layton2422f672010-06-16 13:40:16 -0400646 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700647 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400648 oflags, &oplock, &cfile->fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000649 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500650 cifs_dbg(FYI, "posix reopen succeeded\n");
Steve French7fc8f4e2009-02-23 20:43:11 +0000651 goto reopen_success;
652 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700653 /*
654 * fallthrough to retry open the old way on errors, especially
655 * in the reconnect path it is important to retry hard
656 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000657 }
658
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700659 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000660
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500661 if (backup_cred(cifs_sb))
662 create_options |= CREATE_OPEN_BACKUP_INTENT;
663
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700664 if (server->ops->get_lease_key)
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400665 server->ops->get_lease_key(inode, &cfile->fid);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700666
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400667 oparms.tcon = tcon;
668 oparms.cifs_sb = cifs_sb;
669 oparms.desired_access = desired_access;
670 oparms.create_options = create_options;
671 oparms.disposition = disposition;
672 oparms.path = full_path;
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400673 oparms.fid = &cfile->fid;
674 oparms.reconnect = true;
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400675
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700676 /*
677 * Can not refresh inode by passing in file_info buf to be returned by
678 * CIFSSMBOpen and then calling get_inode_info with returned buf since
679 * file might have write behind data that needs to be flushed and server
680 * version of file size can be stale. If we knew for sure that inode was
681 * not dirty locally we could do this.
682 */
Pavel Shilovsky226730b2013-07-05 12:00:30 +0400683 rc = server->ops->open(xid, &oparms, &oplock, NULL);
Pavel Shilovskyb33fcf12013-07-11 10:58:30 +0400684 if (rc == -ENOENT && oparms.reconnect == false) {
685 /* durable handle timeout is expired - open the file again */
686 rc = server->ops->open(xid, &oparms, &oplock, NULL);
687 /* indicate that we need to relock the file */
688 oparms.reconnect = true;
689 }
690
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700692 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500693 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
694 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400695 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
Jeff Layton15886172010-10-15 15:33:59 -0400697
698reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700699 cfile->invalidHandle = false;
700 mutex_unlock(&cfile->fh_mutex);
701 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400702
703 if (can_flush) {
704 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400705 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400706
Jeff Layton15886172010-10-15 15:33:59 -0400707 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700708 rc = cifs_get_inode_info_unix(&inode, full_path,
709 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400710 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700711 rc = cifs_get_inode_info(&inode, full_path, NULL,
712 inode->i_sb, xid, NULL);
713 }
714 /*
715 * Else we are writing out data to server already and could deadlock if
716 * we tried to flush data, and since we do not know if we have data that
717 * would invalidate the current end of file on the server we can not go
718 * to the server to get the new inode info.
719 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300720
Pavel Shilovsky9cbc0b72013-07-09 18:40:58 +0400721 server->ops->set_fid(cfile, &cfile->fid, oplock);
722 if (oparms.reconnect)
723 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400724
725reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400727 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 return rc;
729}
730
731int cifs_close(struct inode *inode, struct file *file)
732{
Jeff Layton77970692011-04-05 16:23:47 -0700733 if (file->private_data != NULL) {
734 cifsFileInfo_put(file->private_data);
735 file->private_data = NULL;
736 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Steve Frenchcdff08e2010-10-21 22:46:14 +0000738 /* return code from the ->release op is always ignored */
739 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740}
741
742int cifs_closedir(struct inode *inode, struct file *file)
743{
744 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400745 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700746 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700747 struct cifs_tcon *tcon;
748 struct TCP_Server_Info *server;
749 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Joe Perchesf96637b2013-05-04 22:12:25 -0500751 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700753 if (cfile == NULL)
754 return rc;
755
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400756 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700757 tcon = tlink_tcon(cfile->tlink);
758 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Joe Perchesf96637b2013-05-04 22:12:25 -0500760 cifs_dbg(FYI, "Freeing private data in close dir\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700761 spin_lock(&cifs_file_list_lock);
762 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
763 cfile->invalidHandle = true;
764 spin_unlock(&cifs_file_list_lock);
765 if (server->ops->close_dir)
766 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
767 else
768 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500769 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700770 /* not much we can do if it fails anyway, ignore rc */
771 rc = 0;
772 } else
773 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700775 buf = cfile->srch_inf.ntwrk_buf_start;
776 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500777 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700778 cfile->srch_inf.ntwrk_buf_start = NULL;
779 if (cfile->srch_inf.smallBuf)
780 cifs_small_buf_release(buf);
781 else
782 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700784
785 cifs_put_tlink(cfile->tlink);
786 kfree(file->private_data);
787 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400789 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return rc;
791}
792
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400793static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300794cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000795{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400796 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000797 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400798 if (!lock)
799 return lock;
800 lock->offset = offset;
801 lock->length = length;
802 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400803 lock->pid = current->tgid;
804 INIT_LIST_HEAD(&lock->blist);
805 init_waitqueue_head(&lock->block_q);
806 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400807}
808
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700809void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400810cifs_del_lock_waiters(struct cifsLockInfo *lock)
811{
812 struct cifsLockInfo *li, *tmp;
813 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
814 list_del_init(&li->blist);
815 wake_up(&li->block_q);
816 }
817}
818
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400819#define CIFS_LOCK_OP 0
820#define CIFS_READ_OP 1
821#define CIFS_WRITE_OP 2
822
823/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400824static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700825cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
826 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400827 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400828{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300829 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700830 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300831 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400832
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700833 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400834 if (offset + length <= li->offset ||
835 offset >= li->offset + li->length)
836 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400837 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
838 server->ops->compare_fids(cfile, cur_cfile)) {
839 /* shared lock prevents write op through the same fid */
840 if (!(li->type & server->vals->shared_lock_type) ||
841 rw_check != CIFS_WRITE_OP)
842 continue;
843 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700844 if ((type & server->vals->shared_lock_type) &&
845 ((server->ops->compare_fids(cfile, cur_cfile) &&
846 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400847 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700848 if (conf_lock)
849 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700850 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400851 }
852 return false;
853}
854
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700855bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300856cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700857 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400858 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400859{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300860 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700861 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300862 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300863
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700864 list_for_each_entry(cur, &cinode->llist, llist) {
865 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700866 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300867 if (rc)
868 break;
869 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300870
871 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400872}
873
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300874/*
875 * Check if there is another lock that prevents us to set the lock (mandatory
876 * style). If such a lock exists, update the flock structure with its
877 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
878 * or leave it the same if we can't. Returns 0 if we don't need to request to
879 * the server or 1 otherwise.
880 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400881static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300882cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
883 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400884{
885 int rc = 0;
886 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300887 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300888 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400889 bool exist;
890
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700891 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400892
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300893 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400894 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400895 if (exist) {
896 flock->fl_start = conf_lock->offset;
897 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
898 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300899 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400900 flock->fl_type = F_RDLCK;
901 else
902 flock->fl_type = F_WRLCK;
903 } else if (!cinode->can_cache_brlcks)
904 rc = 1;
905 else
906 flock->fl_type = F_UNLCK;
907
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700908 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400909 return rc;
910}
911
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400912static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300913cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400914{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300915 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700916 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700917 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700918 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000919}
920
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300921/*
922 * Set the byte-range lock (mandatory style). Returns:
923 * 1) 0, if we set the lock and don't need to request to the server;
924 * 2) 1, if no locks prevent us but we need to request to the server;
925 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
926 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400927static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300928cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400929 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400930{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400931 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300932 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400933 bool exist;
934 int rc = 0;
935
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400936try_again:
937 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700938 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400939
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300940 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400941 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400942 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700943 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700944 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400945 return rc;
946 }
947
948 if (!exist)
949 rc = 1;
950 else if (!wait)
951 rc = -EACCES;
952 else {
953 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700954 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400955 rc = wait_event_interruptible(lock->block_q,
956 (lock->blist.prev == &lock->blist) &&
957 (lock->blist.next == &lock->blist));
958 if (!rc)
959 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700960 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400961 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400962 }
963
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700964 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400965 return rc;
966}
967
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300968/*
969 * Check if there is another lock that prevents us to set the lock (posix
970 * style). If such a lock exists, update the flock structure with its
971 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
972 * or leave it the same if we can't. Returns 0 if we don't need to request to
973 * the server or 1 otherwise.
974 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400975static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400976cifs_posix_lock_test(struct file *file, struct file_lock *flock)
977{
978 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500979 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400980 unsigned char saved_type = flock->fl_type;
981
Pavel Shilovsky50792762011-10-29 17:17:57 +0400982 if ((flock->fl_flags & FL_POSIX) == 0)
983 return 1;
984
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700985 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400986 posix_test_lock(file, flock);
987
988 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
989 flock->fl_type = saved_type;
990 rc = 1;
991 }
992
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700993 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400994 return rc;
995}
996
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300997/*
998 * Set the byte-range lock (posix style). Returns:
999 * 1) 0, if we set the lock and don't need to request to the server;
1000 * 2) 1, if we need to request to the server;
1001 * 3) <0, if the error occurs while setting the lock.
1002 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001003static int
1004cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1005{
Al Viro496ad9a2013-01-23 17:07:38 -05001006 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +04001007 int rc = 1;
1008
1009 if ((flock->fl_flags & FL_POSIX) == 0)
1010 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001011
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001012try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001013 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001014 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001015 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +04001016 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001017 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001018
1019 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001020 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001021 if (rc == FILE_LOCK_DEFERRED) {
1022 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1023 if (!rc)
1024 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001025 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001026 }
Steve French9ebb3892012-04-01 13:52:54 -05001027 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001028}
1029
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001030int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001031cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001032{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001033 unsigned int xid;
1034 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001035 struct cifsLockInfo *li, *tmp;
1036 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001037 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001038 LOCKING_ANDX_RANGE *buf, *cur;
1039 int types[] = {LOCKING_ANDX_LARGE_FILES,
1040 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1041 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001042
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001043 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001044 tcon = tlink_tcon(cfile->tlink);
1045
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001046 /*
1047 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1048 * and check it for zero before using.
1049 */
1050 max_buf = tcon->ses->server->maxBuf;
1051 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001052 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001053 return -EINVAL;
1054 }
1055
1056 max_num = (max_buf - sizeof(struct smb_hdr)) /
1057 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001058 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1059 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001060 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001061 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001062 }
1063
1064 for (i = 0; i < 2; i++) {
1065 cur = buf;
1066 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001067 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001068 if (li->type != types[i])
1069 continue;
1070 cur->Pid = cpu_to_le16(li->pid);
1071 cur->LengthLow = cpu_to_le32((u32)li->length);
1072 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1073 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1074 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1075 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001076 stored_rc = cifs_lockv(xid, tcon,
1077 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001078 (__u8)li->type, 0, num,
1079 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001080 if (stored_rc)
1081 rc = stored_rc;
1082 cur = buf;
1083 num = 0;
1084 } else
1085 cur++;
1086 }
1087
1088 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001089 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001090 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001091 if (stored_rc)
1092 rc = stored_rc;
1093 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001094 }
1095
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001096 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001097 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001098 return rc;
1099}
1100
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101/* copied from fs/locks.c with a name change */
1102#define cifs_for_each_lock(inode, lockp) \
1103 for (lockp = &inode->i_flock; *lockp != NULL; \
1104 lockp = &(*lockp)->fl_next)
1105
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001106struct lock_to_push {
1107 struct list_head llist;
1108 __u64 offset;
1109 __u64 length;
1110 __u32 pid;
1111 __u16 netfid;
1112 __u8 type;
1113};
1114
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001116cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001117{
Jeff Layton1c8c6012013-06-21 08:58:15 -04001118 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001119 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1120 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001121 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001122 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001123 struct list_head locks_to_send, *el;
1124 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001125 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001126
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001127 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001128
Jeff Layton1c8c6012013-06-21 08:58:15 -04001129 spin_lock(&inode->i_lock);
1130 cifs_for_each_lock(inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001131 if ((*before)->fl_flags & FL_POSIX)
1132 count++;
1133 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001134 spin_unlock(&inode->i_lock);
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001135
1136 INIT_LIST_HEAD(&locks_to_send);
1137
1138 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001139 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001140 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001141 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001142 */
1143 for (; i < count; i++) {
1144 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1145 if (!lck) {
1146 rc = -ENOMEM;
1147 goto err_out;
1148 }
1149 list_add_tail(&lck->llist, &locks_to_send);
1150 }
1151
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001152 el = locks_to_send.next;
Jeff Layton1c8c6012013-06-21 08:58:15 -04001153 spin_lock(&inode->i_lock);
1154 cifs_for_each_lock(inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001155 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001156 if ((flock->fl_flags & FL_POSIX) == 0)
1157 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001158 if (el == &locks_to_send) {
1159 /*
1160 * The list ended. We don't have enough allocated
1161 * structures - something is really wrong.
1162 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001163 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001164 break;
1165 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001166 length = 1 + flock->fl_end - flock->fl_start;
1167 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1168 type = CIFS_RDLCK;
1169 else
1170 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001171 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001172 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001173 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001174 lck->length = length;
1175 lck->type = type;
1176 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001177 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001178 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001179 spin_unlock(&inode->i_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001180
1181 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001182 int stored_rc;
1183
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001184 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001185 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001186 lck->type, 0);
1187 if (stored_rc)
1188 rc = stored_rc;
1189 list_del(&lck->llist);
1190 kfree(lck);
1191 }
1192
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001193out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001194 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001195 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001196err_out:
1197 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1198 list_del(&lck->llist);
1199 kfree(lck);
1200 }
1201 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001202}
1203
1204static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001205cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001206{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001207 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001208 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001209 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001210 int rc = 0;
1211
1212 /* we are going to update can_cache_brlcks here - need a write access */
1213 down_write(&cinode->lock_sem);
1214 if (!cinode->can_cache_brlcks) {
1215 up_write(&cinode->lock_sem);
1216 return rc;
1217 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001218
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001219 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001220 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1221 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001222 rc = cifs_push_posix_locks(cfile);
1223 else
1224 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001225
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001226 cinode->can_cache_brlcks = false;
1227 up_write(&cinode->lock_sem);
1228 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001229}
1230
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001231static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001232cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001233 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001235 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001236 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001237 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001238 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001239 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001240 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001241 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001243 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001244 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001245 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001246 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001247 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001248 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1249 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001250 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001252 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001253 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001254 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001255 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001256 *lock = 1;
1257 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001258 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001259 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001260 *unlock = 1;
1261 /* Check if unlock includes more than one lock range */
1262 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001263 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001264 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001265 *lock = 1;
1266 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001267 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001268 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001269 *lock = 1;
1270 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001271 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001272 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001273 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001275 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001276}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001278static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001279cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001280 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001281{
1282 int rc = 0;
1283 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001284 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1285 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001286 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001287 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001289 if (posix_lck) {
1290 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001291
1292 rc = cifs_posix_lock_test(file, flock);
1293 if (!rc)
1294 return rc;
1295
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001296 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001297 posix_lock_type = CIFS_RDLCK;
1298 else
1299 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001300 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001301 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001302 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 return rc;
1304 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001305
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001306 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001307 if (!rc)
1308 return rc;
1309
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001310 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001311 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1312 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001313 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001314 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1315 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001316 flock->fl_type = F_UNLCK;
1317 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001318 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1319 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001320 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001321 }
1322
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001323 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001324 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001325 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001326 }
1327
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001328 type &= ~server->vals->exclusive_lock_type;
1329
1330 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1331 type | server->vals->shared_lock_type,
1332 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001333 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001334 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1335 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001336 flock->fl_type = F_RDLCK;
1337 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001338 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1339 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001340 } else
1341 flock->fl_type = F_WRLCK;
1342
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001343 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001344}
1345
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001346void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001347cifs_move_llist(struct list_head *source, struct list_head *dest)
1348{
1349 struct list_head *li, *tmp;
1350 list_for_each_safe(li, tmp, source)
1351 list_move(li, dest);
1352}
1353
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001354void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001355cifs_free_llist(struct list_head *llist)
1356{
1357 struct cifsLockInfo *li, *tmp;
1358 list_for_each_entry_safe(li, tmp, llist, llist) {
1359 cifs_del_lock_waiters(li);
1360 list_del(&li->llist);
1361 kfree(li);
1362 }
1363}
1364
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001365int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001366cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1367 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001368{
1369 int rc = 0, stored_rc;
1370 int types[] = {LOCKING_ANDX_LARGE_FILES,
1371 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1372 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001373 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001374 LOCKING_ANDX_RANGE *buf, *cur;
1375 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1376 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1377 struct cifsLockInfo *li, *tmp;
1378 __u64 length = 1 + flock->fl_end - flock->fl_start;
1379 struct list_head tmp_llist;
1380
1381 INIT_LIST_HEAD(&tmp_llist);
1382
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001383 /*
1384 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1385 * and check it for zero before using.
1386 */
1387 max_buf = tcon->ses->server->maxBuf;
1388 if (!max_buf)
1389 return -EINVAL;
1390
1391 max_num = (max_buf - sizeof(struct smb_hdr)) /
1392 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001393 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1394 if (!buf)
1395 return -ENOMEM;
1396
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001397 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001398 for (i = 0; i < 2; i++) {
1399 cur = buf;
1400 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001401 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001402 if (flock->fl_start > li->offset ||
1403 (flock->fl_start + length) <
1404 (li->offset + li->length))
1405 continue;
1406 if (current->tgid != li->pid)
1407 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001408 if (types[i] != li->type)
1409 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001410 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001411 /*
1412 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001413 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001414 */
1415 list_del(&li->llist);
1416 cifs_del_lock_waiters(li);
1417 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001418 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001419 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001420 cur->Pid = cpu_to_le16(li->pid);
1421 cur->LengthLow = cpu_to_le32((u32)li->length);
1422 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1423 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1424 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1425 /*
1426 * We need to save a lock here to let us add it again to
1427 * the file's list if the unlock range request fails on
1428 * the server.
1429 */
1430 list_move(&li->llist, &tmp_llist);
1431 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001432 stored_rc = cifs_lockv(xid, tcon,
1433 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001434 li->type, num, 0, buf);
1435 if (stored_rc) {
1436 /*
1437 * We failed on the unlock range
1438 * request - add all locks from the tmp
1439 * list to the head of the file's list.
1440 */
1441 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001442 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001443 rc = stored_rc;
1444 } else
1445 /*
1446 * The unlock range request succeed -
1447 * free the tmp list.
1448 */
1449 cifs_free_llist(&tmp_llist);
1450 cur = buf;
1451 num = 0;
1452 } else
1453 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001454 }
1455 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001456 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001457 types[i], num, 0, buf);
1458 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001459 cifs_move_llist(&tmp_llist,
1460 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001461 rc = stored_rc;
1462 } else
1463 cifs_free_llist(&tmp_llist);
1464 }
1465 }
1466
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001467 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001468 kfree(buf);
1469 return rc;
1470}
1471
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001472static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001473cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001474 bool wait_flag, bool posix_lck, int lock, int unlock,
1475 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001476{
1477 int rc = 0;
1478 __u64 length = 1 + flock->fl_end - flock->fl_start;
1479 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1480 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001481 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001482 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001483
1484 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001485 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001486
1487 rc = cifs_posix_lock_set(file, flock);
1488 if (!rc || rc < 0)
1489 return rc;
1490
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001491 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001492 posix_lock_type = CIFS_RDLCK;
1493 else
1494 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001495
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001496 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001497 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001498
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001499 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1500 current->tgid, flock->fl_start, length,
1501 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001502 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001503 }
1504
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001505 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001506 struct cifsLockInfo *lock;
1507
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001508 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001509 if (!lock)
1510 return -ENOMEM;
1511
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001512 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001513 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001514 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001515 return rc;
1516 }
1517 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001518 goto out;
1519
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001520 /*
1521 * Windows 7 server can delay breaking lease from read to None
1522 * if we set a byte-range lock on a file - break it explicitly
1523 * before sending the lock to the server to be sure the next
1524 * read won't conflict with non-overlapted locks due to
1525 * pagereading.
1526 */
1527 if (!CIFS_I(inode)->clientCanCacheAll &&
1528 CIFS_I(inode)->clientCanCacheRead) {
1529 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001530 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1531 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001532 CIFS_I(inode)->clientCanCacheRead = false;
1533 }
1534
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001535 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1536 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001537 if (rc) {
1538 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001539 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001540 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001541
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001542 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001543 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001544 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001545
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001546out:
1547 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001548 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001549 return rc;
1550}
1551
1552int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1553{
1554 int rc, xid;
1555 int lock = 0, unlock = 0;
1556 bool wait_flag = false;
1557 bool posix_lck = false;
1558 struct cifs_sb_info *cifs_sb;
1559 struct cifs_tcon *tcon;
1560 struct cifsInodeInfo *cinode;
1561 struct cifsFileInfo *cfile;
1562 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001563 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001564
1565 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001566 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001567
Joe Perchesf96637b2013-05-04 22:12:25 -05001568 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1569 cmd, flock->fl_flags, flock->fl_type,
1570 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001571
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001572 cfile = (struct cifsFileInfo *)file->private_data;
1573 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001574
1575 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1576 tcon->ses->server);
1577
1578 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001579 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001580 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001581
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001582 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001583 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1584 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1585 posix_lck = true;
1586 /*
1587 * BB add code here to normalize offset and length to account for
1588 * negative length which we can not accept over the wire.
1589 */
1590 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001591 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001592 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001593 return rc;
1594 }
1595
1596 if (!lock && !unlock) {
1597 /*
1598 * if no lock or unlock then nothing to do since we do not
1599 * know what it is
1600 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001601 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001602 return -EOPNOTSUPP;
1603 }
1604
1605 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1606 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001607 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 return rc;
1609}
1610
Jeff Layton597b0272012-03-23 14:40:56 -04001611/*
1612 * update the file size (if needed) after a write. Should be called with
1613 * the inode->i_lock held
1614 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001615void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001616cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1617 unsigned int bytes_written)
1618{
1619 loff_t end_of_write = offset + bytes_written;
1620
1621 if (end_of_write > cifsi->server_eof)
1622 cifsi->server_eof = end_of_write;
1623}
1624
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001625static ssize_t
1626cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1627 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628{
1629 int rc = 0;
1630 unsigned int bytes_written = 0;
1631 unsigned int total_written;
1632 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001633 struct cifs_tcon *tcon;
1634 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001635 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001636 struct dentry *dentry = open_file->dentry;
1637 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001638 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
Jeff Layton7da4b492010-10-15 15:34:00 -04001640 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
Joe Perchesf96637b2013-05-04 22:12:25 -05001642 cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n",
1643 write_size, *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001645 tcon = tlink_tcon(open_file->tlink);
1646 server = tcon->ses->server;
1647
1648 if (!server->ops->sync_write)
1649 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001650
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001651 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 for (total_written = 0; write_size > total_written;
1654 total_written += bytes_written) {
1655 rc = -EAGAIN;
1656 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001657 struct kvec iov[2];
1658 unsigned int len;
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 /* we could deadlock if we called
1662 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001663 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001665 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 if (rc != 0)
1667 break;
1668 }
Steve French3e844692005-10-03 13:37:24 -07001669
Jeff Laytonca83ce32011-04-12 09:13:44 -04001670 len = min((size_t)cifs_sb->wsize,
1671 write_size - total_written);
1672 /* iov[0] is reserved for smb header */
1673 iov[1].iov_base = (char *)write_data + total_written;
1674 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001675 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001676 io_parms.tcon = tcon;
1677 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001678 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001679 rc = server->ops->sync_write(xid, open_file, &io_parms,
1680 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 }
1682 if (rc || (bytes_written == 0)) {
1683 if (total_written)
1684 break;
1685 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001686 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 return rc;
1688 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001689 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001690 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001691 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001692 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001693 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001694 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 }
1696
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001697 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
Jeff Layton7da4b492010-10-15 15:34:00 -04001699 if (total_written > 0) {
1700 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001701 if (*offset > dentry->d_inode->i_size)
1702 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001703 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001705 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001706 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 return total_written;
1708}
1709
Jeff Layton6508d902010-09-29 19:51:11 -04001710struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1711 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001712{
1713 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001714 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1715
1716 /* only filter by fsuid on multiuser mounts */
1717 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1718 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001719
Jeff Layton44772882010-10-15 15:34:03 -04001720 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001721 /* we could simply get the first_list_entry since write-only entries
1722 are always at the end of the list but since the first entry might
1723 have a close pending, we go through the whole list */
1724 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001725 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001726 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001727 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001728 if (!open_file->invalidHandle) {
1729 /* found a good file */
1730 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001731 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001732 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001733 return open_file;
1734 } /* else might as well continue, and look for
1735 another, or simply have the caller reopen it
1736 again rather than trying to fix this handle */
1737 } else /* write only file */
1738 break; /* write only files are last so must be done */
1739 }
Jeff Layton44772882010-10-15 15:34:03 -04001740 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001741 return NULL;
1742}
Steve French630f3f0c2007-10-25 21:17:17 +00001743
Jeff Layton6508d902010-09-29 19:51:11 -04001744struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1745 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001746{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001747 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001748 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001749 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001750 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001751 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001752
Steve French60808232006-04-22 15:53:05 +00001753 /* Having a null inode here (because mapping->host was set to zero by
1754 the VFS or MM) should not happen but we had reports of on oops (due to
1755 it being zero) during stress testcases so we need to check for it */
1756
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001757 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001758 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001759 dump_stack();
1760 return NULL;
1761 }
1762
Jeff Laytond3892292010-11-02 16:22:50 -04001763 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1764
Jeff Layton6508d902010-09-29 19:51:11 -04001765 /* only filter by fsuid on multiuser mounts */
1766 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1767 fsuid_only = false;
1768
Jeff Layton44772882010-10-15 15:34:03 -04001769 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001770refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001771 if (refind > MAX_REOPEN_ATT) {
1772 spin_unlock(&cifs_file_list_lock);
1773 return NULL;
1774 }
Steve French6148a742005-10-05 12:23:19 -07001775 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001776 if (!any_available && open_file->pid != current->tgid)
1777 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001778 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001779 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001780 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001781 if (!open_file->invalidHandle) {
1782 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001783 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001784 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001785 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001786 } else {
1787 if (!inv_file)
1788 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001789 }
Steve French6148a742005-10-05 12:23:19 -07001790 }
1791 }
Jeff Layton2846d382008-09-22 21:33:33 -04001792 /* couldn't find useable FH with same pid, try any available */
1793 if (!any_available) {
1794 any_available = true;
1795 goto refind_writable;
1796 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001797
1798 if (inv_file) {
1799 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001800 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001801 }
1802
Jeff Layton44772882010-10-15 15:34:03 -04001803 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001804
1805 if (inv_file) {
1806 rc = cifs_reopen_file(inv_file, false);
1807 if (!rc)
1808 return inv_file;
1809 else {
1810 spin_lock(&cifs_file_list_lock);
1811 list_move_tail(&inv_file->flist,
1812 &cifs_inode->openFileList);
1813 spin_unlock(&cifs_file_list_lock);
1814 cifsFileInfo_put(inv_file);
1815 spin_lock(&cifs_file_list_lock);
1816 ++refind;
1817 goto refind_writable;
1818 }
1819 }
1820
Steve French6148a742005-10-05 12:23:19 -07001821 return NULL;
1822}
1823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1825{
1826 struct address_space *mapping = page->mapping;
1827 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1828 char *write_data;
1829 int rc = -EFAULT;
1830 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001832 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
1834 if (!mapping || !mapping->host)
1835 return -EFAULT;
1836
1837 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
1839 offset += (loff_t)from;
1840 write_data = kmap(page);
1841 write_data += from;
1842
1843 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1844 kunmap(page);
1845 return -EIO;
1846 }
1847
1848 /* racing with truncate? */
1849 if (offset > mapping->host->i_size) {
1850 kunmap(page);
1851 return 0; /* don't care */
1852 }
1853
1854 /* check to make sure that we are not extending the file */
1855 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001856 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
Jeff Layton6508d902010-09-29 19:51:11 -04001858 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001859 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001860 bytes_written = cifs_write(open_file, open_file->pid,
1861 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001862 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001864 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001865 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001866 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001867 else if (bytes_written < 0)
1868 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001869 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001870 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 rc = -EIO;
1872 }
1873
1874 kunmap(page);
1875 return rc;
1876}
1877
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001879 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001881 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1882 bool done = false, scanned = false, range_whole = false;
1883 pgoff_t end, index;
1884 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001885 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001886 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001887 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001888
Steve French37c0eb42005-10-05 14:50:29 -07001889 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001890 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001891 * one page at a time via cifs_writepage
1892 */
1893 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1894 return generic_writepages(mapping, wbc);
1895
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001896 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001897 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001898 end = -1;
1899 } else {
1900 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1901 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1902 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001903 range_whole = true;
1904 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001905 }
1906retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001907 while (!done && index <= end) {
1908 unsigned int i, nr_pages, found_pages;
1909 pgoff_t next = 0, tofind;
1910 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001911
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001912 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1913 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001914
Jeff Laytonc2e87642012-03-23 14:40:55 -04001915 wdata = cifs_writedata_alloc((unsigned int)tofind,
1916 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001917 if (!wdata) {
1918 rc = -ENOMEM;
1919 break;
1920 }
1921
1922 /*
1923 * find_get_pages_tag seems to return a max of 256 on each
1924 * iteration, so we must call it several times in order to
1925 * fill the array or the wsize is effectively limited to
1926 * 256 * PAGE_CACHE_SIZE.
1927 */
1928 found_pages = 0;
1929 pages = wdata->pages;
1930 do {
1931 nr_pages = find_get_pages_tag(mapping, &index,
1932 PAGECACHE_TAG_DIRTY,
1933 tofind, pages);
1934 found_pages += nr_pages;
1935 tofind -= nr_pages;
1936 pages += nr_pages;
1937 } while (nr_pages && tofind && index <= end);
1938
1939 if (found_pages == 0) {
1940 kref_put(&wdata->refcount, cifs_writedata_release);
1941 break;
1942 }
1943
1944 nr_pages = 0;
1945 for (i = 0; i < found_pages; i++) {
1946 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001947 /*
1948 * At this point we hold neither mapping->tree_lock nor
1949 * lock on the page itself: the page may be truncated or
1950 * invalidated (changing page->mapping to NULL), or even
1951 * swizzled back from swapper_space to tmpfs file
1952 * mapping
1953 */
1954
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001955 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001956 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001957 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001958 break;
1959
1960 if (unlikely(page->mapping != mapping)) {
1961 unlock_page(page);
1962 break;
1963 }
1964
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001965 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001966 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001967 unlock_page(page);
1968 break;
1969 }
1970
1971 if (next && (page->index != next)) {
1972 /* Not next consecutive page */
1973 unlock_page(page);
1974 break;
1975 }
1976
1977 if (wbc->sync_mode != WB_SYNC_NONE)
1978 wait_on_page_writeback(page);
1979
1980 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001981 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001982 unlock_page(page);
1983 break;
1984 }
Steve French84d2f072005-10-12 15:32:05 -07001985
Linus Torvaldscb876f42006-12-23 16:19:07 -08001986 /*
1987 * This actually clears the dirty bit in the radix tree.
1988 * See cifs_writepage() for more commentary.
1989 */
1990 set_page_writeback(page);
1991
Jeff Layton3a98b862012-11-26 09:48:41 -05001992 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001993 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001994 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001995 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001996 break;
1997 }
1998
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001999 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07002000 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002001 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07002002 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002003
2004 /* reset index to refind any pages skipped */
2005 if (nr_pages == 0)
2006 index = wdata->pages[0]->index + 1;
2007
2008 /* put any pages we aren't going to use */
2009 for (i = nr_pages; i < found_pages; i++) {
2010 page_cache_release(wdata->pages[i]);
2011 wdata->pages[i] = NULL;
2012 }
2013
2014 /* nothing to write? */
2015 if (nr_pages == 0) {
2016 kref_put(&wdata->refcount, cifs_writedata_release);
2017 continue;
2018 }
2019
2020 wdata->sync_mode = wbc->sync_mode;
2021 wdata->nr_pages = nr_pages;
2022 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07002023 wdata->pagesz = PAGE_CACHE_SIZE;
2024 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002025 min(i_size_read(mapping->host) -
2026 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002027 (loff_t)PAGE_CACHE_SIZE);
2028 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2029 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002030
2031 do {
2032 if (wdata->cfile != NULL)
2033 cifsFileInfo_put(wdata->cfile);
2034 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2035 false);
2036 if (!wdata->cfile) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002037 cifs_dbg(VFS, "No writable handles for inode\n");
Steve French23e7dd72005-10-20 13:44:56 -07002038 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002039 break;
Steve French37c0eb42005-10-05 14:50:29 -07002040 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002041 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002042 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2043 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002044 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002045
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002046 for (i = 0; i < nr_pages; ++i)
2047 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002048
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002049 /* send failure -- clean up the mess */
2050 if (rc != 0) {
2051 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002052 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002053 redirty_page_for_writepage(wbc,
2054 wdata->pages[i]);
2055 else
2056 SetPageError(wdata->pages[i]);
2057 end_page_writeback(wdata->pages[i]);
2058 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002059 }
Jeff Layton941b8532011-01-11 07:24:01 -05002060 if (rc != -EAGAIN)
2061 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002062 }
2063 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002064
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002065 wbc->nr_to_write -= nr_pages;
2066 if (wbc->nr_to_write <= 0)
2067 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002068
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002069 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002070 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002071
Steve French37c0eb42005-10-05 14:50:29 -07002072 if (!scanned && !done) {
2073 /*
2074 * We hit the last page and there is more work to be done: wrap
2075 * back to the start of the file
2076 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002077 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002078 index = 0;
2079 goto retry;
2080 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002081
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002082 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002083 mapping->writeback_index = index;
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 return rc;
2086}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002088static int
2089cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002091 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002092 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002094 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095/* BB add check for wbc flags */
2096 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002097 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002098 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002099
2100 /*
2101 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2102 *
2103 * A writepage() implementation always needs to do either this,
2104 * or re-dirty the page with "redirty_page_for_writepage()" in
2105 * the case of a failure.
2106 *
2107 * Just unlocking the page will cause the radix tree tag-bits
2108 * to fail to update with the state of the page correctly.
2109 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002110 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002111retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002113 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2114 goto retry_write;
2115 else if (rc == -EAGAIN)
2116 redirty_page_for_writepage(wbc, page);
2117 else if (rc != 0)
2118 SetPageError(page);
2119 else
2120 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002121 end_page_writeback(page);
2122 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002123 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 return rc;
2125}
2126
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002127static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2128{
2129 int rc = cifs_writepage_locked(page, wbc);
2130 unlock_page(page);
2131 return rc;
2132}
2133
Nick Piggind9414772008-09-24 11:32:59 -04002134static int cifs_write_end(struct file *file, struct address_space *mapping,
2135 loff_t pos, unsigned len, unsigned copied,
2136 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137{
Nick Piggind9414772008-09-24 11:32:59 -04002138 int rc;
2139 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002140 struct cifsFileInfo *cfile = file->private_data;
2141 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2142 __u32 pid;
2143
2144 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2145 pid = cfile->pid;
2146 else
2147 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
Joe Perchesf96637b2013-05-04 22:12:25 -05002149 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002150 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002151
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002152 if (PageChecked(page)) {
2153 if (copied == len)
2154 SetPageUptodate(page);
2155 ClearPageChecked(page);
2156 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002157 SetPageUptodate(page);
2158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002160 char *page_data;
2161 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002162 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002163
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002164 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 /* this is probably better than directly calling
2166 partialpage_write since in this function the file handle is
2167 known which we might as well leverage */
2168 /* BB check if anything else missing out of ppw
2169 such as updating last write time */
2170 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002171 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002172 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002174
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002175 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002176 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002177 rc = copied;
2178 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002179 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 }
2181
Nick Piggind9414772008-09-24 11:32:59 -04002182 if (rc > 0) {
2183 spin_lock(&inode->i_lock);
2184 if (pos > inode->i_size)
2185 i_size_write(inode, pos);
2186 spin_unlock(&inode->i_lock);
2187 }
2188
2189 unlock_page(page);
2190 page_cache_release(page);
2191
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 return rc;
2193}
2194
Josef Bacik02c24a82011-07-16 20:44:56 -04002195int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2196 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002198 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002200 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002201 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002202 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002203 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002204 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
Josef Bacik02c24a82011-07-16 20:44:56 -04002206 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2207 if (rc)
2208 return rc;
2209 mutex_lock(&inode->i_mutex);
2210
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002211 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
Joe Perchesf96637b2013-05-04 22:12:25 -05002213 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2214 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002215
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002216 if (!CIFS_I(inode)->clientCanCacheRead) {
2217 rc = cifs_invalidate_mapping(inode);
2218 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002219 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002220 rc = 0; /* don't care about it in fsync */
2221 }
2222 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002223
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002224 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002225 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2226 server = tcon->ses->server;
2227 if (server->ops->flush)
2228 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2229 else
2230 rc = -ENOSYS;
2231 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002232
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002233 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002234 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002235 return rc;
2236}
2237
Josef Bacik02c24a82011-07-16 20:44:56 -04002238int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002239{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002240 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002241 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002242 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002243 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002244 struct cifsFileInfo *smbfile = file->private_data;
2245 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002246 struct inode *inode = file->f_mapping->host;
2247
2248 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2249 if (rc)
2250 return rc;
2251 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002252
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002253 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002254
Joe Perchesf96637b2013-05-04 22:12:25 -05002255 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2256 file->f_path.dentry->d_name.name, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002257
2258 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002259 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2260 server = tcon->ses->server;
2261 if (server->ops->flush)
2262 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2263 else
2264 rc = -ENOSYS;
2265 }
Steve Frenchb298f222009-02-21 21:17:43 +00002266
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002267 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002268 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 return rc;
2270}
2271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272/*
2273 * As file closes, flush all cached write data for this inode checking
2274 * for write behind errors.
2275 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002276int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277{
Al Viro496ad9a2013-01-23 17:07:38 -05002278 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 int rc = 0;
2280
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002281 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002282 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002283
Joe Perchesf96637b2013-05-04 22:12:25 -05002284 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
2286 return rc;
2287}
2288
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002289static int
2290cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2291{
2292 int rc = 0;
2293 unsigned long i;
2294
2295 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002296 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002297 if (!pages[i]) {
2298 /*
2299 * save number of pages we have already allocated and
2300 * return with ENOMEM error
2301 */
2302 num_pages = i;
2303 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002304 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002305 }
2306 }
2307
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002308 if (rc) {
2309 for (i = 0; i < num_pages; i++)
2310 put_page(pages[i]);
2311 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002312 return rc;
2313}
2314
2315static inline
2316size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2317{
2318 size_t num_pages;
2319 size_t clen;
2320
2321 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002322 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002323
2324 if (cur_len)
2325 *cur_len = clen;
2326
2327 return num_pages;
2328}
2329
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002330static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002331cifs_uncached_writev_complete(struct work_struct *work)
2332{
2333 int i;
2334 struct cifs_writedata *wdata = container_of(work,
2335 struct cifs_writedata, work);
2336 struct inode *inode = wdata->cfile->dentry->d_inode;
2337 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2338
2339 spin_lock(&inode->i_lock);
2340 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2341 if (cifsi->server_eof > inode->i_size)
2342 i_size_write(inode, cifsi->server_eof);
2343 spin_unlock(&inode->i_lock);
2344
2345 complete(&wdata->done);
2346
2347 if (wdata->result != -EAGAIN) {
2348 for (i = 0; i < wdata->nr_pages; i++)
2349 put_page(wdata->pages[i]);
2350 }
2351
2352 kref_put(&wdata->refcount, cifs_writedata_release);
2353}
2354
2355/* attempt to send write to server, retry on any -EAGAIN errors */
2356static int
2357cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2358{
2359 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002360 struct TCP_Server_Info *server;
2361
2362 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002363
2364 do {
2365 if (wdata->cfile->invalidHandle) {
2366 rc = cifs_reopen_file(wdata->cfile, false);
2367 if (rc != 0)
2368 continue;
2369 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002370 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002371 } while (rc == -EAGAIN);
2372
2373 return rc;
2374}
2375
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002376static ssize_t
2377cifs_iovec_write(struct file *file, const struct iovec *iov,
2378 unsigned long nr_segs, loff_t *poffset)
2379{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002380 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002381 size_t copied, len, cur_len;
2382 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002383 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002384 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002385 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002386 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002387 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002388 struct cifs_writedata *wdata, *tmp;
2389 struct list_head wdata_list;
2390 int rc;
2391 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002392
2393 len = iov_length(iov, nr_segs);
2394 if (!len)
2395 return 0;
2396
2397 rc = generic_write_checks(file, poffset, &len, 0);
2398 if (rc)
2399 return rc;
2400
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002401 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002402 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002403 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002404 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002405
2406 if (!tcon->ses->server->ops->async_writev)
2407 return -ENOSYS;
2408
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002409 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002410
2411 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2412 pid = open_file->pid;
2413 else
2414 pid = current->tgid;
2415
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002416 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002417 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002418 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002419
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002420 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2421 wdata = cifs_writedata_alloc(nr_pages,
2422 cifs_uncached_writev_complete);
2423 if (!wdata) {
2424 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002425 break;
2426 }
2427
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002428 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2429 if (rc) {
2430 kfree(wdata);
2431 break;
2432 }
2433
2434 save_len = cur_len;
2435 for (i = 0; i < nr_pages; i++) {
2436 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2437 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2438 0, copied);
2439 cur_len -= copied;
2440 iov_iter_advance(&it, copied);
2441 }
2442 cur_len = save_len - cur_len;
2443
2444 wdata->sync_mode = WB_SYNC_ALL;
2445 wdata->nr_pages = nr_pages;
2446 wdata->offset = (__u64)offset;
2447 wdata->cfile = cifsFileInfo_get(open_file);
2448 wdata->pid = pid;
2449 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002450 wdata->pagesz = PAGE_SIZE;
2451 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002452 rc = cifs_uncached_retry_writev(wdata);
2453 if (rc) {
2454 kref_put(&wdata->refcount, cifs_writedata_release);
2455 break;
2456 }
2457
2458 list_add_tail(&wdata->list, &wdata_list);
2459 offset += cur_len;
2460 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002461 } while (len > 0);
2462
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002463 /*
2464 * If at least one write was successfully sent, then discard any rc
2465 * value from the later writes. If the other write succeeds, then
2466 * we'll end up returning whatever was written. If it fails, then
2467 * we'll get a new rc value from that.
2468 */
2469 if (!list_empty(&wdata_list))
2470 rc = 0;
2471
2472 /*
2473 * Wait for and collect replies for any successful sends in order of
2474 * increasing offset. Once an error is hit or we get a fatal signal
2475 * while waiting, then return without waiting for any more replies.
2476 */
2477restart_loop:
2478 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2479 if (!rc) {
2480 /* FIXME: freezable too? */
2481 rc = wait_for_completion_killable(&wdata->done);
2482 if (rc)
2483 rc = -EINTR;
2484 else if (wdata->result)
2485 rc = wdata->result;
2486 else
2487 total_written += wdata->bytes;
2488
2489 /* resend call if it's a retryable error */
2490 if (rc == -EAGAIN) {
2491 rc = cifs_uncached_retry_writev(wdata);
2492 goto restart_loop;
2493 }
2494 }
2495 list_del_init(&wdata->list);
2496 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002497 }
2498
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002499 if (total_written > 0)
2500 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002501
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002502 cifs_stats_bytes_written(tcon, total_written);
2503 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002504}
2505
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002506ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002507 unsigned long nr_segs, loff_t pos)
2508{
2509 ssize_t written;
2510 struct inode *inode;
2511
Al Viro496ad9a2013-01-23 17:07:38 -05002512 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002513
2514 /*
2515 * BB - optimize the way when signing is disabled. We can drop this
2516 * extra memory-to-memory copying and use iovec buffers for constructing
2517 * write request.
2518 */
2519
2520 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2521 if (written > 0) {
2522 CIFS_I(inode)->invalid_mapping = true;
2523 iocb->ki_pos = pos;
2524 }
2525
2526 return written;
2527}
2528
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002529static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002530cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2531 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002532{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002533 struct file *file = iocb->ki_filp;
2534 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2535 struct inode *inode = file->f_mapping->host;
2536 struct cifsInodeInfo *cinode = CIFS_I(inode);
2537 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2538 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002539
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002540 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002541
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002542 /*
2543 * We need to hold the sem to be sure nobody modifies lock list
2544 * with a brlock that prevents writing.
2545 */
2546 down_read(&cinode->lock_sem);
2547 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2548 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002549 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002550 mutex_lock(&inode->i_mutex);
2551 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002552 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002553 mutex_unlock(&inode->i_mutex);
2554 }
2555
2556 if (rc > 0 || rc == -EIOCBQUEUED) {
2557 ssize_t err;
2558
2559 err = generic_write_sync(file, pos, rc);
2560 if (err < 0 && rc > 0)
2561 rc = err;
2562 }
2563
2564 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002565 return rc;
2566}
2567
2568ssize_t
2569cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2570 unsigned long nr_segs, loff_t pos)
2571{
Al Viro496ad9a2013-01-23 17:07:38 -05002572 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002573 struct cifsInodeInfo *cinode = CIFS_I(inode);
2574 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2575 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2576 iocb->ki_filp->private_data;
2577 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002578 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002579
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002580 if (cinode->clientCanCacheAll) {
2581 if (cap_unix(tcon->ses) &&
2582 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2583 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2584 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2585 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002586 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002587 /*
2588 * For non-oplocked files in strict cache mode we need to write the data
2589 * to the server exactly from the pos to pos+len-1 rather than flush all
2590 * affected pages because it may cause a error with mandatory locks on
2591 * these pages but not on the region from pos to ppos+len-1.
2592 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002593 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2594 if (written > 0 && cinode->clientCanCacheRead) {
2595 /*
2596 * Windows 7 server can delay breaking level2 oplock if a write
2597 * request comes - break it on the client to prevent reading
2598 * an old data.
2599 */
2600 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002601 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2602 inode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002603 cinode->clientCanCacheRead = false;
2604 }
2605 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002606}
2607
Jeff Layton0471ca32012-05-16 07:13:16 -04002608static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002609cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002610{
2611 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002612
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002613 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2614 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002615 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002616 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002617 INIT_LIST_HEAD(&rdata->list);
2618 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002619 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002620 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002621
Jeff Layton0471ca32012-05-16 07:13:16 -04002622 return rdata;
2623}
2624
Jeff Layton6993f742012-05-16 07:13:17 -04002625void
2626cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002627{
Jeff Layton6993f742012-05-16 07:13:17 -04002628 struct cifs_readdata *rdata = container_of(refcount,
2629 struct cifs_readdata, refcount);
2630
2631 if (rdata->cfile)
2632 cifsFileInfo_put(rdata->cfile);
2633
Jeff Layton0471ca32012-05-16 07:13:16 -04002634 kfree(rdata);
2635}
2636
Jeff Layton2a1bb132012-05-16 07:13:17 -04002637static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002638cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002639{
2640 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002641 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002642 unsigned int i;
2643
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002644 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002645 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2646 if (!page) {
2647 rc = -ENOMEM;
2648 break;
2649 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002650 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002651 }
2652
2653 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002654 for (i = 0; i < nr_pages; i++) {
2655 put_page(rdata->pages[i]);
2656 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002657 }
2658 }
2659 return rc;
2660}
2661
2662static void
2663cifs_uncached_readdata_release(struct kref *refcount)
2664{
Jeff Layton1c892542012-05-16 07:13:17 -04002665 struct cifs_readdata *rdata = container_of(refcount,
2666 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002667 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002668
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002669 for (i = 0; i < rdata->nr_pages; i++) {
2670 put_page(rdata->pages[i]);
2671 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002672 }
2673 cifs_readdata_release(refcount);
2674}
2675
2676static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002677cifs_retry_async_readv(struct cifs_readdata *rdata)
2678{
2679 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002680 struct TCP_Server_Info *server;
2681
2682 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002683
2684 do {
2685 if (rdata->cfile->invalidHandle) {
2686 rc = cifs_reopen_file(rdata->cfile, true);
2687 if (rc != 0)
2688 continue;
2689 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002690 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002691 } while (rc == -EAGAIN);
2692
2693 return rc;
2694}
2695
Jeff Layton1c892542012-05-16 07:13:17 -04002696/**
2697 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2698 * @rdata: the readdata response with list of pages holding data
2699 * @iov: vector in which we should copy the data
2700 * @nr_segs: number of segments in vector
2701 * @offset: offset into file of the first iovec
2702 * @copied: used to return the amount of data copied to the iov
2703 *
2704 * This function copies data from a list of pages in a readdata response into
2705 * an array of iovecs. It will first calculate where the data should go
2706 * based on the info in the readdata and then copy the data into that spot.
2707 */
2708static ssize_t
2709cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2710 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2711{
2712 int rc = 0;
2713 struct iov_iter ii;
2714 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002715 ssize_t remaining = rdata->bytes;
2716 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002717 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002718
2719 /* set up iov_iter and advance to the correct offset */
2720 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2721 iov_iter_advance(&ii, pos);
2722
2723 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002724 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002725 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002726 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002727
2728 /* copy a whole page or whatever's left */
2729 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2730
2731 /* ...but limit it to whatever space is left in the iov */
2732 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2733
2734 /* go while there's data to be copied and no errors */
2735 if (copy && !rc) {
2736 pdata = kmap(page);
2737 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2738 (int)copy);
2739 kunmap(page);
2740 if (!rc) {
2741 *copied += copy;
2742 remaining -= copy;
2743 iov_iter_advance(&ii, copy);
2744 }
2745 }
Jeff Layton1c892542012-05-16 07:13:17 -04002746 }
2747
2748 return rc;
2749}
2750
2751static void
2752cifs_uncached_readv_complete(struct work_struct *work)
2753{
2754 struct cifs_readdata *rdata = container_of(work,
2755 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002756
2757 complete(&rdata->done);
2758 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2759}
2760
2761static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002762cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2763 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002764{
Jeff Layton8321fec2012-09-19 06:22:32 -07002765 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002766 unsigned int i;
2767 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002768 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002769
Jeff Layton8321fec2012-09-19 06:22:32 -07002770 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002771 for (i = 0; i < nr_pages; i++) {
2772 struct page *page = rdata->pages[i];
2773
Jeff Layton8321fec2012-09-19 06:22:32 -07002774 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002775 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002776 iov.iov_base = kmap(page);
2777 iov.iov_len = PAGE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05002778 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2779 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002780 len -= PAGE_SIZE;
2781 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002782 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002783 iov.iov_base = kmap(page);
2784 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05002785 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2786 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002787 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2788 rdata->tailsz = len;
2789 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002790 } else {
2791 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002792 rdata->pages[i] = NULL;
2793 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002794 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002795 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002796 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002797
2798 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2799 kunmap(page);
2800 if (result < 0)
2801 break;
2802
2803 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002804 }
2805
Jeff Layton8321fec2012-09-19 06:22:32 -07002806 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002807}
2808
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002809static ssize_t
2810cifs_iovec_read(struct file *file, const struct iovec *iov,
2811 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812{
Jeff Layton1c892542012-05-16 07:13:17 -04002813 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002814 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002815 ssize_t total_read = 0;
2816 loff_t offset = *poffset;
2817 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002819 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002821 struct cifs_readdata *rdata, *tmp;
2822 struct list_head rdata_list;
2823 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002824
2825 if (!nr_segs)
2826 return 0;
2827
2828 len = iov_length(iov, nr_segs);
2829 if (!len)
2830 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831
Jeff Layton1c892542012-05-16 07:13:17 -04002832 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002833 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002834 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002835 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002837 if (!tcon->ses->server->ops->async_readv)
2838 return -ENOSYS;
2839
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002840 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2841 pid = open_file->pid;
2842 else
2843 pid = current->tgid;
2844
Steve Frenchad7a2922008-02-07 23:25:02 +00002845 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05002846 cifs_dbg(FYI, "attempting read on write only file instance\n");
Steve Frenchad7a2922008-02-07 23:25:02 +00002847
Jeff Layton1c892542012-05-16 07:13:17 -04002848 do {
2849 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2850 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002851
Jeff Layton1c892542012-05-16 07:13:17 -04002852 /* allocate a readdata struct */
2853 rdata = cifs_readdata_alloc(npages,
2854 cifs_uncached_readv_complete);
2855 if (!rdata) {
2856 rc = -ENOMEM;
2857 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002859
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002860 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002861 if (rc)
2862 goto error;
2863
2864 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002865 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002866 rdata->offset = offset;
2867 rdata->bytes = cur_len;
2868 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002869 rdata->pagesz = PAGE_SIZE;
2870 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002871
2872 rc = cifs_retry_async_readv(rdata);
2873error:
2874 if (rc) {
2875 kref_put(&rdata->refcount,
2876 cifs_uncached_readdata_release);
2877 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 }
Jeff Layton1c892542012-05-16 07:13:17 -04002879
2880 list_add_tail(&rdata->list, &rdata_list);
2881 offset += cur_len;
2882 len -= cur_len;
2883 } while (len > 0);
2884
2885 /* if at least one read request send succeeded, then reset rc */
2886 if (!list_empty(&rdata_list))
2887 rc = 0;
2888
2889 /* the loop below should proceed in the order of increasing offsets */
2890restart_loop:
2891 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2892 if (!rc) {
2893 ssize_t copied;
2894
2895 /* FIXME: freezable sleep too? */
2896 rc = wait_for_completion_killable(&rdata->done);
2897 if (rc)
2898 rc = -EINTR;
2899 else if (rdata->result)
2900 rc = rdata->result;
2901 else {
2902 rc = cifs_readdata_to_iov(rdata, iov,
2903 nr_segs, *poffset,
2904 &copied);
2905 total_read += copied;
2906 }
2907
2908 /* resend call if it's a retryable error */
2909 if (rc == -EAGAIN) {
2910 rc = cifs_retry_async_readv(rdata);
2911 goto restart_loop;
2912 }
2913 }
2914 list_del_init(&rdata->list);
2915 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002917
Jeff Layton1c892542012-05-16 07:13:17 -04002918 cifs_stats_bytes_read(tcon, total_read);
2919 *poffset += total_read;
2920
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002921 /* mask nodata case */
2922 if (rc == -ENODATA)
2923 rc = 0;
2924
Jeff Layton1c892542012-05-16 07:13:17 -04002925 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926}
2927
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002928ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002929 unsigned long nr_segs, loff_t pos)
2930{
2931 ssize_t read;
2932
2933 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2934 if (read > 0)
2935 iocb->ki_pos = pos;
2936
2937 return read;
2938}
2939
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002940ssize_t
2941cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2942 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002943{
Al Viro496ad9a2013-01-23 17:07:38 -05002944 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002945 struct cifsInodeInfo *cinode = CIFS_I(inode);
2946 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2947 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2948 iocb->ki_filp->private_data;
2949 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2950 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002951
2952 /*
2953 * In strict cache mode we need to read from the server all the time
2954 * if we don't have level II oplock because the server can delay mtime
2955 * change - so we can't make a decision about inode invalidating.
2956 * And we can also fail with pagereading if there are mandatory locks
2957 * on pages affected by this read but not on the region from pos to
2958 * pos+len-1.
2959 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002960 if (!cinode->clientCanCacheRead)
2961 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002962
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002963 if (cap_unix(tcon->ses) &&
2964 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2965 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2966 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2967
2968 /*
2969 * We need to hold the sem to be sure nobody modifies lock list
2970 * with a brlock that prevents reading.
2971 */
2972 down_read(&cinode->lock_sem);
2973 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2974 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002975 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002976 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2977 up_read(&cinode->lock_sem);
2978 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002979}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002981static ssize_t
2982cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983{
2984 int rc = -EACCES;
2985 unsigned int bytes_read = 0;
2986 unsigned int total_read;
2987 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002988 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002990 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002991 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002992 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002993 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002995 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002996 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002997 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002999 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003000 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003002 /* FIXME: set up handlers for larger reads and/or convert to async */
3003 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3004
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303006 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003007 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303008 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003009 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07003010 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003011 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003012 server = tcon->ses->server;
3013
3014 if (!server->ops->sync_read) {
3015 free_xid(xid);
3016 return -ENOSYS;
3017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003019 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3020 pid = open_file->pid;
3021 else
3022 pid = current->tgid;
3023
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003025 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003027 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3028 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003029 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003030 /*
3031 * For windows me and 9x we do not want to request more than it
3032 * negotiated since it will refuse the read then.
3033 */
3034 if ((tcon->ses) && !(tcon->ses->capabilities &
3035 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003036 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003037 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039 rc = -EAGAIN;
3040 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003041 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003042 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043 if (rc != 0)
3044 break;
3045 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003046 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003047 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003048 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003049 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003050 rc = server->ops->sync_read(xid, open_file, &io_parms,
3051 &bytes_read, &cur_offset,
3052 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 }
3054 if (rc || (bytes_read == 0)) {
3055 if (total_read) {
3056 break;
3057 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003058 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059 return rc;
3060 }
3061 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003062 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003063 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 }
3065 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003066 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003067 return total_read;
3068}
3069
Jeff Laytonca83ce32011-04-12 09:13:44 -04003070/*
3071 * If the page is mmap'ed into a process' page tables, then we need to make
3072 * sure that it doesn't change while being written back.
3073 */
3074static int
3075cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3076{
3077 struct page *page = vmf->page;
3078
3079 lock_page(page);
3080 return VM_FAULT_LOCKED;
3081}
3082
3083static struct vm_operations_struct cifs_file_vm_ops = {
3084 .fault = filemap_fault,
3085 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003086 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003087};
3088
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003089int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3090{
3091 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003092 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003093
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003094 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003095
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003096 if (!CIFS_I(inode)->clientCanCacheRead) {
3097 rc = cifs_invalidate_mapping(inode);
3098 if (rc)
3099 return rc;
3100 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003101
3102 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003103 if (rc == 0)
3104 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003105 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003106 return rc;
3107}
3108
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3110{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111 int rc, xid;
3112
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003113 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003114 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003115 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003116 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3117 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003118 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003119 return rc;
3120 }
3121 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003122 if (rc == 0)
3123 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003124 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125 return rc;
3126}
3127
Jeff Layton0471ca32012-05-16 07:13:16 -04003128static void
3129cifs_readv_complete(struct work_struct *work)
3130{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003131 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003132 struct cifs_readdata *rdata = container_of(work,
3133 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003134
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003135 for (i = 0; i < rdata->nr_pages; i++) {
3136 struct page *page = rdata->pages[i];
3137
Jeff Layton0471ca32012-05-16 07:13:16 -04003138 lru_cache_add_file(page);
3139
3140 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003141 flush_dcache_page(page);
3142 SetPageUptodate(page);
3143 }
3144
3145 unlock_page(page);
3146
3147 if (rdata->result == 0)
3148 cifs_readpage_to_fscache(rdata->mapping->host, page);
3149
3150 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003151 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003152 }
Jeff Layton6993f742012-05-16 07:13:17 -04003153 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003154}
3155
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003156static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003157cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3158 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003159{
Jeff Layton8321fec2012-09-19 06:22:32 -07003160 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003161 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003162 u64 eof;
3163 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003164 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003165 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003166
3167 /* determine the eof that the server (probably) has */
3168 eof = CIFS_I(rdata->mapping->host)->server_eof;
3169 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003170 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003171
Jeff Layton8321fec2012-09-19 06:22:32 -07003172 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003173 for (i = 0; i < nr_pages; i++) {
3174 struct page *page = rdata->pages[i];
3175
Jeff Layton8321fec2012-09-19 06:22:32 -07003176 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003177 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003178 iov.iov_base = kmap(page);
3179 iov.iov_len = PAGE_CACHE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05003180 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3181 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003182 len -= PAGE_CACHE_SIZE;
3183 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003184 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003185 iov.iov_base = kmap(page);
3186 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05003187 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3188 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003189 memset(iov.iov_base + len,
3190 '\0', PAGE_CACHE_SIZE - len);
3191 rdata->tailsz = len;
3192 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003193 } else if (page->index > eof_index) {
3194 /*
3195 * The VFS will not try to do readahead past the
3196 * i_size, but it's possible that we have outstanding
3197 * writes with gaps in the middle and the i_size hasn't
3198 * caught up yet. Populate those with zeroed out pages
3199 * to prevent the VFS from repeatedly attempting to
3200 * fill them until the writes are flushed.
3201 */
3202 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003203 lru_cache_add_file(page);
3204 flush_dcache_page(page);
3205 SetPageUptodate(page);
3206 unlock_page(page);
3207 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003208 rdata->pages[i] = NULL;
3209 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003210 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003211 } else {
3212 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003213 lru_cache_add_file(page);
3214 unlock_page(page);
3215 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003216 rdata->pages[i] = NULL;
3217 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003218 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003219 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003220
3221 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3222 kunmap(page);
3223 if (result < 0)
3224 break;
3225
3226 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003227 }
3228
Jeff Layton8321fec2012-09-19 06:22:32 -07003229 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003230}
3231
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232static int cifs_readpages(struct file *file, struct address_space *mapping,
3233 struct list_head *page_list, unsigned num_pages)
3234{
Jeff Layton690c5e32011-10-19 15:30:16 -04003235 int rc;
3236 struct list_head tmplist;
3237 struct cifsFileInfo *open_file = file->private_data;
3238 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3239 unsigned int rsize = cifs_sb->rsize;
3240 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241
Jeff Layton690c5e32011-10-19 15:30:16 -04003242 /*
3243 * Give up immediately if rsize is too small to read an entire page.
3244 * The VFS will fall back to readpage. We should never reach this
3245 * point however since we set ra_pages to 0 when the rsize is smaller
3246 * than a cache page.
3247 */
3248 if (unlikely(rsize < PAGE_CACHE_SIZE))
3249 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003250
Suresh Jayaraman566982362010-07-05 18:13:25 +05303251 /*
3252 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3253 * immediately if the cookie is negative
3254 */
3255 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3256 &num_pages);
3257 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003258 return rc;
Suresh Jayaraman566982362010-07-05 18:13:25 +05303259
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003260 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3261 pid = open_file->pid;
3262 else
3263 pid = current->tgid;
3264
Jeff Layton690c5e32011-10-19 15:30:16 -04003265 rc = 0;
3266 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267
Joe Perchesf96637b2013-05-04 22:12:25 -05003268 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3269 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003270
3271 /*
3272 * Start with the page at end of list and move it to private
3273 * list. Do the same with any following pages until we hit
3274 * the rsize limit, hit an index discontinuity, or run out of
3275 * pages. Issue the async read and then start the loop again
3276 * until the list is empty.
3277 *
3278 * Note that list order is important. The page_list is in
3279 * the order of declining indexes. When we put the pages in
3280 * the rdata->pages, then we want them in increasing order.
3281 */
3282 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003283 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003284 unsigned int bytes = PAGE_CACHE_SIZE;
3285 unsigned int expected_index;
3286 unsigned int nr_pages = 1;
3287 loff_t offset;
3288 struct page *page, *tpage;
3289 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
3291 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292
Jeff Layton690c5e32011-10-19 15:30:16 -04003293 /*
3294 * Lock the page and put it in the cache. Since no one else
3295 * should have access to this page, we're safe to simply set
3296 * PG_locked without checking it first.
3297 */
3298 __set_page_locked(page);
3299 rc = add_to_page_cache_locked(page, mapping,
3300 page->index, GFP_KERNEL);
3301
3302 /* give up if we can't stick it in the cache */
3303 if (rc) {
3304 __clear_page_locked(page);
3305 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
Jeff Layton690c5e32011-10-19 15:30:16 -04003308 /* move first page to the tmplist */
3309 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3310 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311
Jeff Layton690c5e32011-10-19 15:30:16 -04003312 /* now try and add more pages onto the request */
3313 expected_index = page->index + 1;
3314 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3315 /* discontinuity ? */
3316 if (page->index != expected_index)
3317 break;
3318
3319 /* would this page push the read over the rsize? */
3320 if (bytes + PAGE_CACHE_SIZE > rsize)
3321 break;
3322
3323 __set_page_locked(page);
3324 if (add_to_page_cache_locked(page, mapping,
3325 page->index, GFP_KERNEL)) {
3326 __clear_page_locked(page);
3327 break;
3328 }
3329 list_move_tail(&page->lru, &tmplist);
3330 bytes += PAGE_CACHE_SIZE;
3331 expected_index++;
3332 nr_pages++;
3333 }
3334
Jeff Layton0471ca32012-05-16 07:13:16 -04003335 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003336 if (!rdata) {
3337 /* best to give up if we're out of mem */
3338 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3339 list_del(&page->lru);
3340 lru_cache_add_file(page);
3341 unlock_page(page);
3342 page_cache_release(page);
3343 }
3344 rc = -ENOMEM;
3345 break;
3346 }
3347
Jeff Layton6993f742012-05-16 07:13:17 -04003348 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003349 rdata->mapping = mapping;
3350 rdata->offset = offset;
3351 rdata->bytes = bytes;
3352 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003353 rdata->pagesz = PAGE_CACHE_SIZE;
3354 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003355
3356 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3357 list_del(&page->lru);
3358 rdata->pages[rdata->nr_pages++] = page;
3359 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003360
Jeff Layton2a1bb132012-05-16 07:13:17 -04003361 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003362 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003363 for (i = 0; i < rdata->nr_pages; i++) {
3364 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003365 lru_cache_add_file(page);
3366 unlock_page(page);
3367 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 }
Jeff Layton6993f742012-05-16 07:13:17 -04003369 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 break;
3371 }
Jeff Layton6993f742012-05-16 07:13:17 -04003372
3373 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 }
3375
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 return rc;
3377}
3378
3379static int cifs_readpage_worker(struct file *file, struct page *page,
3380 loff_t *poffset)
3381{
3382 char *read_data;
3383 int rc;
3384
Suresh Jayaraman566982362010-07-05 18:13:25 +05303385 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003386 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05303387 if (rc == 0)
3388 goto read_complete;
3389
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 page_cache_get(page);
3391 read_data = kmap(page);
3392 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003393
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003395
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396 if (rc < 0)
3397 goto io_error;
3398 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003399 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003400
Al Viro496ad9a2013-01-23 17:07:38 -05003401 file_inode(file)->i_atime =
3402 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003403
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 if (PAGE_CACHE_SIZE > rc)
3405 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3406
3407 flush_dcache_page(page);
3408 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303409
3410 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003411 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303412
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003414
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003416 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003417 page_cache_release(page);
Suresh Jayaraman566982362010-07-05 18:13:25 +05303418
3419read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 return rc;
3421}
3422
3423static int cifs_readpage(struct file *file, struct page *page)
3424{
3425 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3426 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003427 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003429 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430
3431 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303432 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003433 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303434 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 }
3436
Joe Perchesf96637b2013-05-04 22:12:25 -05003437 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003438 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439
3440 rc = cifs_readpage_worker(file, page, &offset);
3441
3442 unlock_page(page);
3443
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003444 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 return rc;
3446}
3447
Steve Frencha403a0a2007-07-26 15:54:16 +00003448static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3449{
3450 struct cifsFileInfo *open_file;
3451
Jeff Layton44772882010-10-15 15:34:03 -04003452 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003453 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003454 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003455 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003456 return 1;
3457 }
3458 }
Jeff Layton44772882010-10-15 15:34:03 -04003459 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003460 return 0;
3461}
3462
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463/* We do not want to update the file size from server for inodes
3464 open for write - to avoid races with writepage extending
3465 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003466 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 but this is tricky to do without racing with writebehind
3468 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003469bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470{
Steve Frencha403a0a2007-07-26 15:54:16 +00003471 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003472 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003473
Steve Frencha403a0a2007-07-26 15:54:16 +00003474 if (is_inode_writable(cifsInode)) {
3475 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003476 struct cifs_sb_info *cifs_sb;
3477
Steve Frenchc32a0b62006-01-12 14:41:28 -08003478 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003479 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003480 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003481 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003482 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003483 }
3484
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003485 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003486 return true;
Steve French7ba526312007-02-08 18:14:13 +00003487
Steve French4b18f2a2008-04-29 00:06:05 +00003488 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003489 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003490 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491}
3492
Nick Piggind9414772008-09-24 11:32:59 -04003493static int cifs_write_begin(struct file *file, struct address_space *mapping,
3494 loff_t pos, unsigned len, unsigned flags,
3495 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496{
Nick Piggind9414772008-09-24 11:32:59 -04003497 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3498 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003499 loff_t page_start = pos & PAGE_MASK;
3500 loff_t i_size;
3501 struct page *page;
3502 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503
Joe Perchesf96637b2013-05-04 22:12:25 -05003504 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003505
Nick Piggin54566b22009-01-04 12:00:53 -08003506 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003507 if (!page) {
3508 rc = -ENOMEM;
3509 goto out;
3510 }
Nick Piggind9414772008-09-24 11:32:59 -04003511
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003512 if (PageUptodate(page))
3513 goto out;
Steve French8a236262007-03-06 00:31:00 +00003514
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003515 /*
3516 * If we write a full page it will be up to date, no need to read from
3517 * the server. If the write is short, we'll end up doing a sync write
3518 * instead.
3519 */
3520 if (len == PAGE_CACHE_SIZE)
3521 goto out;
3522
3523 /*
3524 * optimize away the read when we have an oplock, and we're not
3525 * expecting to use any of the data we'd be reading in. That
3526 * is, when the page lies beyond the EOF, or straddles the EOF
3527 * and the write will cover all of the existing data.
3528 */
3529 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3530 i_size = i_size_read(mapping->host);
3531 if (page_start >= i_size ||
3532 (offset == 0 && (pos + len) >= i_size)) {
3533 zero_user_segments(page, 0, offset,
3534 offset + len,
3535 PAGE_CACHE_SIZE);
3536 /*
3537 * PageChecked means that the parts of the page
3538 * to which we're not writing are considered up
3539 * to date. Once the data is copied to the
3540 * page, it can be set uptodate.
3541 */
3542 SetPageChecked(page);
3543 goto out;
3544 }
3545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546
Nick Piggind9414772008-09-24 11:32:59 -04003547 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003548 /*
3549 * might as well read a page, it is fast enough. If we get
3550 * an error, we don't need to return it. cifs_write_end will
3551 * do a sync write instead since PG_uptodate isn't set.
3552 */
3553 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003554 } else {
3555 /* we could try using another file handle if there is one -
3556 but how would we lock it to prevent close of that handle
3557 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003558 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003559 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003560out:
3561 *pagep = page;
3562 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563}
3564
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303565static int cifs_release_page(struct page *page, gfp_t gfp)
3566{
3567 if (PagePrivate(page))
3568 return 0;
3569
3570 return cifs_fscache_release_page(page, gfp);
3571}
3572
Lukas Czernerd47992f2013-05-21 23:17:23 -04003573static void cifs_invalidate_page(struct page *page, unsigned int offset,
3574 unsigned int length)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303575{
3576 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3577
Lukas Czernerd47992f2013-05-21 23:17:23 -04003578 if (offset == 0 && length == PAGE_CACHE_SIZE)
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303579 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3580}
3581
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003582static int cifs_launder_page(struct page *page)
3583{
3584 int rc = 0;
3585 loff_t range_start = page_offset(page);
3586 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3587 struct writeback_control wbc = {
3588 .sync_mode = WB_SYNC_ALL,
3589 .nr_to_write = 0,
3590 .range_start = range_start,
3591 .range_end = range_end,
3592 };
3593
Joe Perchesf96637b2013-05-04 22:12:25 -05003594 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003595
3596 if (clear_page_dirty_for_io(page))
3597 rc = cifs_writepage_locked(page, &wbc);
3598
3599 cifs_fscache_invalidate_page(page, page->mapping->host);
3600 return rc;
3601}
3602
Tejun Heo9b646972010-07-20 22:09:02 +02003603void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003604{
3605 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3606 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003607 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003608 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003609 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003610 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003611
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003612 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3613 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003614 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3615 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003616 cinode->clientCanCacheRead = false;
3617 }
3618
Jeff Layton3bc303c2009-09-21 06:47:50 -04003619 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003620 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003621 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003622 else
Al Viro8737c932009-12-24 06:47:55 -05003623 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003624 rc = filemap_fdatawrite(inode->i_mapping);
3625 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003626 rc = filemap_fdatawait(inode->i_mapping);
3627 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003628 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003629 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003630 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003631 }
3632
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003633 rc = cifs_push_locks(cfile);
3634 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003635 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003636
Jeff Layton3bc303c2009-09-21 06:47:50 -04003637 /*
3638 * releasing stale oplock after recent reconnect of smb session using
3639 * a now incorrect file handle is not a data integrity issue but do
3640 * not bother sending an oplock release if session to server still is
3641 * disconnected since oplock already released by the server
3642 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003643 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003644 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3645 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003646 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003647 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003648}
3649
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003650const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 .readpage = cifs_readpage,
3652 .readpages = cifs_readpages,
3653 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003654 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003655 .write_begin = cifs_write_begin,
3656 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303658 .releasepage = cifs_release_page,
3659 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003660 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003662
3663/*
3664 * cifs_readpages requires the server to support a buffer large enough to
3665 * contain the header plus one complete page of data. Otherwise, we need
3666 * to leave cifs_readpages out of the address space operations.
3667 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003668const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003669 .readpage = cifs_readpage,
3670 .writepage = cifs_writepage,
3671 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003672 .write_begin = cifs_write_begin,
3673 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003674 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303675 .releasepage = cifs_release_page,
3676 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003677 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003678};