blob: 0630710a9c3ff5ffdf2b0588379e1be06b3cb555 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Steve French07b92d02013-02-18 10:34:26 -060046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047static inline int cifs_convert_flags(unsigned int flags)
48{
49 if ((flags & O_ACCMODE) == O_RDONLY)
50 return GENERIC_READ;
51 else if ((flags & O_ACCMODE) == O_WRONLY)
52 return GENERIC_WRITE;
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
58 }
59
Jeff Laytone10f7b52008-05-14 10:21:33 -070060 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
62 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000063}
Jeff Laytone10f7b52008-05-14 10:21:33 -070064
Jeff Layton608712f2010-10-15 15:33:56 -040065static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000066{
Jeff Layton608712f2010-10-15 15:33:56 -040067 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070068
Steve French7fc8f4e2009-02-23 20:43:11 +000069 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040070 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000071 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040072 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
75
Steve French07b92d02013-02-18 10:34:26 -060076 if (flags & O_CREAT) {
Jeff Layton608712f2010-10-15 15:33:56 -040077 posix_flags |= SMB_O_CREAT;
Steve French07b92d02013-02-18 10:34:26 -060078 if (flags & O_EXCL)
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
Joe Perchesf96637b2013-05-04 22:12:25 -050081 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
Steve French07b92d02013-02-18 10:34:26 -060083
Jeff Layton608712f2010-10-15 15:33:56 -040084 if (flags & O_TRUNC)
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010087 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040088 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000089 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040090 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000091 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040092 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000093 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040094 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000095
96 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99static inline int cifs_get_disposition(unsigned int flags)
100{
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
102 return FILE_CREATE;
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
106 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 else
110 return FILE_OPEN;
111}
112
Jeff Layton608712f2010-10-15 15:33:56 -0400113int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400116{
117 int rc;
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000123 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400124
Joe Perchesf96637b2013-05-04 22:12:25 -0500125 cifs_dbg(FYI, "posix open %s\n", full_path);
Jeff Layton608712f2010-10-15 15:33:56 -0400126
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
129 return -ENOMEM;
130
131 tlink = cifs_sb_tlink(cifs_sb);
132 if (IS_ERR(tlink)) {
133 rc = PTR_ERR(tlink);
134 goto posix_open_ret;
135 }
136
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
139
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_sb->mnt_cifs_flags &
144 CIFS_MOUNT_MAP_SPECIAL_CHR);
145 cifs_put_tlink(tlink);
146
147 if (rc)
148 goto posix_open_ret;
149
150 if (presp_data->Type == cpu_to_le32(-1))
151 goto posix_open_ret; /* open ok, caller does qpathinfo */
152
153 if (!pinode)
154 goto posix_open_ret; /* caller does not need info */
155
156 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157
158 /* get new inode and set it up */
159 if (*pinode == NULL) {
160 cifs_fill_uniqueid(sb, &fattr);
161 *pinode = cifs_iget(sb, &fattr);
162 if (!*pinode) {
163 rc = -ENOMEM;
164 goto posix_open_ret;
165 }
166 } else {
167 cifs_fattr_to_inode(*pinode, &fattr);
168 }
169
170posix_open_ret:
171 kfree(presp_data);
172 return rc;
173}
174
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300175static int
176cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179{
180 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300182 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500183 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300184 FILE_ALL_INFO *buf;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700185 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300186
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700187 if (!server->ops->open)
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700188 return -ENOSYS;
189
190 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300191
192/*********************************************************************
193 * open flag mapping table:
194 *
195 * POSIX Flag CIFS Disposition
196 * ---------- ----------------
197 * O_CREAT FILE_OPEN_IF
198 * O_CREAT | O_EXCL FILE_CREATE
199 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
200 * O_TRUNC FILE_OVERWRITE
201 * none of the above FILE_OPEN
202 *
203 * Note that there is not a direct match between disposition
204 * FILE_SUPERSEDE (ie create whether or not file exists although
205 * O_CREAT | O_TRUNC is similar but truncates the existing
206 * file rather than creating a new file as FILE_SUPERSEDE does
207 * (which uses the attributes / metadata passed in on open call)
208 *?
209 *? O_SYNC is a reasonable match to CIFS writethrough flag
210 *? and the read write flags match reasonably. O_LARGEFILE
211 *? is irrelevant because largefile support is always used
212 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
213 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
214 *********************************************************************/
215
216 disposition = cifs_get_disposition(f_flags);
217
218 /* BB pass O_SYNC flag through on file attributes .. BB */
219
220 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500224 if (backup_cred(cifs_sb))
225 create_options |= CREATE_OPEN_BACKUP_INTENT;
226
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700227 rc = server->ops->open(xid, tcon, full_path, disposition,
228 desired_access, create_options, fid, oplock, buf,
229 cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300230
231 if (rc)
232 goto out;
233
234 if (tcon->unix_ext)
235 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
236 xid);
237 else
238 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700239 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300240
241out:
242 kfree(buf);
243 return rc;
244}
245
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400246static bool
247cifs_has_mand_locks(struct cifsInodeInfo *cinode)
248{
249 struct cifs_fid_locks *cur;
250 bool has_locks = false;
251
252 down_read(&cinode->lock_sem);
253 list_for_each_entry(cur, &cinode->llist, llist) {
254 if (!list_empty(&cur->locks)) {
255 has_locks = true;
256 break;
257 }
258 }
259 up_read(&cinode->lock_sem);
260 return has_locks;
261}
262
Jeff Layton15ecb432010-10-15 15:34:02 -0400263struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700264cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400265 struct tcon_link *tlink, __u32 oplock)
266{
267 struct dentry *dentry = file->f_path.dentry;
268 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700269 struct cifsInodeInfo *cinode = CIFS_I(inode);
270 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700271 struct cifs_fid_locks *fdlocks;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700272 struct cifs_tcon *tcon = tlink_tcon(tlink);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400273 struct TCP_Server_Info *server = tcon->ses->server;
Jeff Layton15ecb432010-10-15 15:34:02 -0400274
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700275 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
276 if (cfile == NULL)
277 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400278
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700279 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
280 if (!fdlocks) {
281 kfree(cfile);
282 return NULL;
283 }
284
285 INIT_LIST_HEAD(&fdlocks->locks);
286 fdlocks->cfile = cfile;
287 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700288 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700289 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700290 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700291
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700292 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700293 cfile->pid = current->tgid;
294 cfile->uid = current_fsuid();
295 cfile->dentry = dget(dentry);
296 cfile->f_flags = file->f_flags;
297 cfile->invalidHandle = false;
298 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700299 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700300 mutex_init(&cfile->fh_mutex);
Jeff Layton15ecb432010-10-15 15:34:02 -0400301
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100302 cifs_sb_active(inode->i_sb);
303
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400304 /*
305 * If the server returned a read oplock and we have mandatory brlocks,
306 * set oplock level to None.
307 */
308 if (oplock == server->vals->oplock_read &&
309 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500310 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400311 oplock = 0;
312 }
313
Jeff Layton44772882010-10-15 15:34:03 -0400314 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400315 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700316 oplock = fid->pending_open->oplock;
317 list_del(&fid->pending_open->olist);
318
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +0400319 server->ops->set_fid(cfile, fid, oplock);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700320
321 list_add(&cfile->tlist, &tcon->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400322 /* if readable file instance put first in list*/
323 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700324 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400325 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700326 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400327 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400328
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700329 file->private_data = cfile;
330 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400331}
332
Jeff Layton764a1b12012-07-25 14:59:54 -0400333struct cifsFileInfo *
334cifsFileInfo_get(struct cifsFileInfo *cifs_file)
335{
336 spin_lock(&cifs_file_list_lock);
337 cifsFileInfo_get_locked(cifs_file);
338 spin_unlock(&cifs_file_list_lock);
339 return cifs_file;
340}
341
Steve Frenchcdff08e2010-10-21 22:46:14 +0000342/*
343 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400344 * the filehandle out on the server. Must be called without holding
345 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000346 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400347void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
348{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300349 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000350 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700351 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300352 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100353 struct super_block *sb = inode->i_sb;
354 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000355 struct cifsLockInfo *li, *tmp;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700356 struct cifs_fid fid;
357 struct cifs_pending_open open;
Steve Frenchcdff08e2010-10-21 22:46:14 +0000358
359 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400360 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000361 spin_unlock(&cifs_file_list_lock);
362 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400363 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000364
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700365 if (server->ops->get_lease_key)
366 server->ops->get_lease_key(inode, &fid);
367
368 /* store open in pending opens to make sure we don't miss lease break */
369 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
370
Steve Frenchcdff08e2010-10-21 22:46:14 +0000371 /* remove it from the lists */
372 list_del(&cifs_file->flist);
373 list_del(&cifs_file->tlist);
374
375 if (list_empty(&cifsi->openFileList)) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500376 cifs_dbg(FYI, "closing last open instance for inode %p\n",
377 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700378 /*
379 * In strict cache mode we need invalidate mapping on the last
380 * close because it may cause a error when we open this file
381 * again and get at least level II oplock.
382 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
384 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300385 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000386 }
387 spin_unlock(&cifs_file_list_lock);
388
Jeff Laytonad635942011-07-26 12:20:17 -0400389 cancel_work_sync(&cifs_file->oplock_break);
390
Steve Frenchcdff08e2010-10-21 22:46:14 +0000391 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700392 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400393 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700394
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400395 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700396 if (server->ops->close)
Pavel Shilovsky760ad0c2012-09-25 11:00:07 +0400397 server->ops->close(xid, tcon, &cifs_file->fid);
398 _free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000399 }
400
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700401 cifs_del_pending_open(&open);
402
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700403 /*
404 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000405 * is closed anyway.
406 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700407 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700408 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000409 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400410 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000411 kfree(li);
412 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700413 list_del(&cifs_file->llist->llist);
414 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700415 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000416
417 cifs_put_tlink(cifs_file->tlink);
418 dput(cifs_file->dentry);
Mateusz Guzik24261fc2013-03-08 16:30:03 +0100419 cifs_sb_deactive(sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000420 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423int cifs_open(struct inode *inode, struct file *file)
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
426 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400427 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400428 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700430 struct TCP_Server_Info *server;
Steve French96daf2b2011-05-27 04:34:02 +0000431 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400432 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700433 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300435 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700436 struct cifs_fid fid;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700437 struct cifs_pending_open open;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400439 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
441 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400442 tlink = cifs_sb_tlink(cifs_sb);
443 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400444 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400445 return PTR_ERR(tlink);
446 }
447 tcon = tlink_tcon(tlink);
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700448 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800450 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530452 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400453 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
455
Joe Perchesf96637b2013-05-04 22:12:25 -0500456 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +0000457 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000458
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700459 if (server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000460 oplock = REQ_OPLOCK;
461 else
462 oplock = 0;
463
Steve French64cc2c62009-03-04 19:54:08 +0000464 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400465 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
466 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000467 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400468 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000469 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700470 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000471 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500472 cifs_dbg(FYI, "posix open succeeded\n");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300473 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000474 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
475 if (tcon->ses->serverNOS)
Joe Perchesf96637b2013-05-04 22:12:25 -0500476 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
477 tcon->ses->serverName,
478 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000479 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000480 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
481 (rc != -EOPNOTSUPP)) /* path not found or net err */
482 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700483 /*
484 * Else fallthrough to retry open the old way on network i/o
485 * or DFS errors.
486 */
Steve French276a74a2009-03-03 18:00:34 +0000487 }
488
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700489 if (server->ops->get_lease_key)
490 server->ops->get_lease_key(inode, &fid);
491
492 cifs_add_pending_open(&fid, tlink, &open);
493
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300494 if (!posix_open_ok) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700495 if (server->ops->get_lease_key)
496 server->ops->get_lease_key(inode, &fid);
497
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300498 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700499 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700500 if (rc) {
501 cifs_del_pending_open(&open);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300502 goto out;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700503 }
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300504 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400505
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700506 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
507 if (cfile == NULL) {
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700508 if (server->ops->close)
509 server->ops->close(xid, tcon, &fid);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700510 cifs_del_pending_open(&open);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 rc = -ENOMEM;
512 goto out;
513 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530515 cifs_fscache_set_inode_cookie(inode, file);
516
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300517 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700518 /*
519 * Time to set mode which we can not set earlier due to
520 * problems creating new read-only files.
521 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300522 struct cifs_unix_set_info_args args = {
523 .mode = inode->i_mode,
Eric W. Biederman49418b22013-02-06 00:57:56 -0800524 .uid = INVALID_UID, /* no change */
525 .gid = INVALID_GID, /* no change */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300526 .ctime = NO_CHANGE_64,
527 .atime = NO_CHANGE_64,
528 .mtime = NO_CHANGE_64,
529 .device = 0,
530 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700531 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
532 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 }
534
535out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400537 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400538 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return rc;
540}
541
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400542static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
543
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700544/*
545 * Try to reacquire byte range locks that were released when session
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400546 * to server was lost.
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700547 */
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400548static int
549cifs_relock_file(struct cifsFileInfo *cfile)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400551 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
552 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
553 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 int rc = 0;
555
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400556 /* we are going to update can_cache_brlcks here - need a write access */
557 down_write(&cinode->lock_sem);
558 if (cinode->can_cache_brlcks) {
559 /* can cache locks - no need to push them */
560 up_write(&cinode->lock_sem);
561 return rc;
562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
Pavel Shilovskyf152fd52012-11-22 17:10:57 +0400564 if (cap_unix(tcon->ses) &&
565 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
566 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
567 rc = cifs_push_posix_locks(cfile);
568 else
569 rc = tcon->ses->server->ops->push_mand_locks(cfile);
570
571 up_write(&cinode->lock_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 return rc;
573}
574
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700575static int
576cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
578 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400579 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400580 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000582 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700583 struct TCP_Server_Info *server;
584 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000585 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700587 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500589 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700590 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400592 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700593 mutex_lock(&cfile->fh_mutex);
594 if (!cfile->invalidHandle) {
595 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530596 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400597 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530598 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 }
600
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700601 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700603 tcon = tlink_tcon(cfile->tlink);
604 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000605
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700606 /*
607 * Can not grab rename sem here because various ops, including those
608 * that already have the rename sem can end up causing writepage to get
609 * called and if the server was down that means we end up here, and we
610 * can never tell if the caller already has the rename_sem.
611 */
612 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000614 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700615 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400616 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000617 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 }
619
Joe Perchesf96637b2013-05-04 22:12:25 -0500620 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
621 inode, cfile->f_flags, full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300623 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 oplock = REQ_OPLOCK;
625 else
Steve French4b18f2a2008-04-29 00:06:05 +0000626 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400628 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000629 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400630 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400631 /*
632 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
633 * original open. Must mask them off for a reopen.
634 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700635 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400636 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400637
Jeff Layton2422f672010-06-16 13:40:16 -0400638 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700639 cifs_sb->mnt_file_mode /* ignored */,
640 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000641 if (rc == 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500642 cifs_dbg(FYI, "posix reopen succeeded\n");
Steve French7fc8f4e2009-02-23 20:43:11 +0000643 goto reopen_success;
644 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700645 /*
646 * fallthrough to retry open the old way on errors, especially
647 * in the reconnect path it is important to retry hard
648 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000649 }
650
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700651 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000652
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500653 if (backup_cred(cifs_sb))
654 create_options |= CREATE_OPEN_BACKUP_INTENT;
655
Pavel Shilovskyb8c32db2012-09-19 06:22:44 -0700656 if (server->ops->get_lease_key)
657 server->ops->get_lease_key(inode, &fid);
658
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700659 /*
660 * Can not refresh inode by passing in file_info buf to be returned by
661 * CIFSSMBOpen and then calling get_inode_info with returned buf since
662 * file might have write behind data that needs to be flushed and server
663 * version of file size can be stale. If we knew for sure that inode was
664 * not dirty locally we could do this.
665 */
666 rc = server->ops->open(xid, tcon, full_path, disposition,
667 desired_access, create_options, &fid, &oplock,
668 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700670 mutex_unlock(&cfile->fh_mutex);
Joe Perchesf96637b2013-05-04 22:12:25 -0500671 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
672 cifs_dbg(FYI, "oplock: %d\n", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400673 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 }
Jeff Layton15886172010-10-15 15:33:59 -0400675
676reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700677 cfile->invalidHandle = false;
678 mutex_unlock(&cfile->fh_mutex);
679 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400680
681 if (can_flush) {
682 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400683 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400684
Jeff Layton15886172010-10-15 15:33:59 -0400685 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700686 rc = cifs_get_inode_info_unix(&inode, full_path,
687 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400688 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700689 rc = cifs_get_inode_info(&inode, full_path, NULL,
690 inode->i_sb, xid, NULL);
691 }
692 /*
693 * Else we are writing out data to server already and could deadlock if
694 * we tried to flush data, and since we do not know if we have data that
695 * would invalidate the current end of file on the server we can not go
696 * to the server to get the new inode info.
697 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300698
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700699 server->ops->set_fid(cfile, &fid, oplock);
700 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400701
702reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400704 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 return rc;
706}
707
708int cifs_close(struct inode *inode, struct file *file)
709{
Jeff Layton77970692011-04-05 16:23:47 -0700710 if (file->private_data != NULL) {
711 cifsFileInfo_put(file->private_data);
712 file->private_data = NULL;
713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
Steve Frenchcdff08e2010-10-21 22:46:14 +0000715 /* return code from the ->release op is always ignored */
716 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
719int cifs_closedir(struct inode *inode, struct file *file)
720{
721 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400722 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700723 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700724 struct cifs_tcon *tcon;
725 struct TCP_Server_Info *server;
726 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
Joe Perchesf96637b2013-05-04 22:12:25 -0500728 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700730 if (cfile == NULL)
731 return rc;
732
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400733 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700734 tcon = tlink_tcon(cfile->tlink);
735 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Joe Perchesf96637b2013-05-04 22:12:25 -0500737 cifs_dbg(FYI, "Freeing private data in close dir\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700738 spin_lock(&cifs_file_list_lock);
739 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
740 cfile->invalidHandle = true;
741 spin_unlock(&cifs_file_list_lock);
742 if (server->ops->close_dir)
743 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
744 else
745 rc = -ENOSYS;
Joe Perchesf96637b2013-05-04 22:12:25 -0500746 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700747 /* not much we can do if it fails anyway, ignore rc */
748 rc = 0;
749 } else
750 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700752 buf = cfile->srch_inf.ntwrk_buf_start;
753 if (buf) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500754 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700755 cfile->srch_inf.ntwrk_buf_start = NULL;
756 if (cfile->srch_inf.smallBuf)
757 cifs_small_buf_release(buf);
758 else
759 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700761
762 cifs_put_tlink(cfile->tlink);
763 kfree(file->private_data);
764 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400766 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 return rc;
768}
769
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400770static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300771cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000772{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400773 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000774 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400775 if (!lock)
776 return lock;
777 lock->offset = offset;
778 lock->length = length;
779 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400780 lock->pid = current->tgid;
781 INIT_LIST_HEAD(&lock->blist);
782 init_waitqueue_head(&lock->block_q);
783 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400784}
785
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700786void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400787cifs_del_lock_waiters(struct cifsLockInfo *lock)
788{
789 struct cifsLockInfo *li, *tmp;
790 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
791 list_del_init(&li->blist);
792 wake_up(&li->block_q);
793 }
794}
795
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400796#define CIFS_LOCK_OP 0
797#define CIFS_READ_OP 1
798#define CIFS_WRITE_OP 2
799
800/* @rw_check : 0 - no op, 1 - read, 2 - write */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400801static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700802cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
803 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400804 struct cifsLockInfo **conf_lock, int rw_check)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400805{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300806 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700807 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300808 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400809
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700810 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400811 if (offset + length <= li->offset ||
812 offset >= li->offset + li->length)
813 continue;
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400814 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
815 server->ops->compare_fids(cfile, cur_cfile)) {
816 /* shared lock prevents write op through the same fid */
817 if (!(li->type & server->vals->shared_lock_type) ||
818 rw_check != CIFS_WRITE_OP)
819 continue;
820 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700821 if ((type & server->vals->shared_lock_type) &&
822 ((server->ops->compare_fids(cfile, cur_cfile) &&
823 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400824 continue;
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700825 if (conf_lock)
826 *conf_lock = li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700827 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400828 }
829 return false;
830}
831
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700832bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300833cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700834 __u8 type, struct cifsLockInfo **conf_lock,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400835 int rw_check)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400836{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300837 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700838 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300839 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300840
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700841 list_for_each_entry(cur, &cinode->llist, llist) {
842 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky579f9052012-09-19 06:22:44 -0700843 cfile, conf_lock, rw_check);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300844 if (rc)
845 break;
846 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300847
848 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400849}
850
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300851/*
852 * Check if there is another lock that prevents us to set the lock (mandatory
853 * style). If such a lock exists, update the flock structure with its
854 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
855 * or leave it the same if we can't. Returns 0 if we don't need to request to
856 * the server or 1 otherwise.
857 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400858static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300859cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
860 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400861{
862 int rc = 0;
863 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300864 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300865 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400866 bool exist;
867
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700868 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400869
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300870 exist = cifs_find_lock_conflict(cfile, offset, length, type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400871 &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400872 if (exist) {
873 flock->fl_start = conf_lock->offset;
874 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
875 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300876 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400877 flock->fl_type = F_RDLCK;
878 else
879 flock->fl_type = F_WRLCK;
880 } else if (!cinode->can_cache_brlcks)
881 rc = 1;
882 else
883 flock->fl_type = F_UNLCK;
884
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700885 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400886 return rc;
887}
888
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400889static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300890cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400891{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300892 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700893 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700894 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700895 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000896}
897
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300898/*
899 * Set the byte-range lock (mandatory style). Returns:
900 * 1) 0, if we set the lock and don't need to request to the server;
901 * 2) 1, if no locks prevent us but we need to request to the server;
902 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
903 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400904static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300905cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400906 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400907{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400908 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300909 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400910 bool exist;
911 int rc = 0;
912
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400913try_again:
914 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700915 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400916
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300917 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
Pavel Shilovsky081c0412012-11-27 18:38:53 +0400918 lock->type, &conf_lock, CIFS_LOCK_OP);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400919 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700920 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700921 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400922 return rc;
923 }
924
925 if (!exist)
926 rc = 1;
927 else if (!wait)
928 rc = -EACCES;
929 else {
930 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700931 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400932 rc = wait_event_interruptible(lock->block_q,
933 (lock->blist.prev == &lock->blist) &&
934 (lock->blist.next == &lock->blist));
935 if (!rc)
936 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700937 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400938 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400939 }
940
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700941 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400942 return rc;
943}
944
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300945/*
946 * Check if there is another lock that prevents us to set the lock (posix
947 * style). If such a lock exists, update the flock structure with its
948 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
949 * or leave it the same if we can't. Returns 0 if we don't need to request to
950 * the server or 1 otherwise.
951 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400952static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400953cifs_posix_lock_test(struct file *file, struct file_lock *flock)
954{
955 int rc = 0;
Al Viro496ad9a2013-01-23 17:07:38 -0500956 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400957 unsigned char saved_type = flock->fl_type;
958
Pavel Shilovsky50792762011-10-29 17:17:57 +0400959 if ((flock->fl_flags & FL_POSIX) == 0)
960 return 1;
961
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700962 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400963 posix_test_lock(file, flock);
964
965 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
966 flock->fl_type = saved_type;
967 rc = 1;
968 }
969
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700970 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400971 return rc;
972}
973
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300974/*
975 * Set the byte-range lock (posix style). Returns:
976 * 1) 0, if we set the lock and don't need to request to the server;
977 * 2) 1, if we need to request to the server;
978 * 3) <0, if the error occurs while setting the lock.
979 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400980static int
981cifs_posix_lock_set(struct file *file, struct file_lock *flock)
982{
Al Viro496ad9a2013-01-23 17:07:38 -0500983 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
Pavel Shilovsky50792762011-10-29 17:17:57 +0400984 int rc = 1;
985
986 if ((flock->fl_flags & FL_POSIX) == 0)
987 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400988
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400989try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700990 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400991 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700992 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400993 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400994 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400995
996 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700997 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400998 if (rc == FILE_LOCK_DEFERRED) {
999 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1000 if (!rc)
1001 goto try_again;
Jeff Layton1a9e64a2013-06-21 08:58:10 -04001002 posix_unblock_lock(flock);
Pavel Shilovsky66189be2012-03-28 21:56:19 +04001003 }
Steve French9ebb3892012-04-01 13:52:54 -05001004 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001005}
1006
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001007int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001008cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001009{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001010 unsigned int xid;
1011 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001012 struct cifsLockInfo *li, *tmp;
1013 struct cifs_tcon *tcon;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001014 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001015 LOCKING_ANDX_RANGE *buf, *cur;
1016 int types[] = {LOCKING_ANDX_LARGE_FILES,
1017 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1018 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001019
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001020 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001021 tcon = tlink_tcon(cfile->tlink);
1022
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001023 /*
1024 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1025 * and check it for zero before using.
1026 */
1027 max_buf = tcon->ses->server->maxBuf;
1028 if (!max_buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001029 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001030 return -EINVAL;
1031 }
1032
1033 max_num = (max_buf - sizeof(struct smb_hdr)) /
1034 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001035 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1036 if (!buf) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001037 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +04001038 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001039 }
1040
1041 for (i = 0; i < 2; i++) {
1042 cur = buf;
1043 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001044 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001045 if (li->type != types[i])
1046 continue;
1047 cur->Pid = cpu_to_le16(li->pid);
1048 cur->LengthLow = cpu_to_le32((u32)li->length);
1049 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1050 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1051 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1052 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001053 stored_rc = cifs_lockv(xid, tcon,
1054 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001055 (__u8)li->type, 0, num,
1056 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001057 if (stored_rc)
1058 rc = stored_rc;
1059 cur = buf;
1060 num = 0;
1061 } else
1062 cur++;
1063 }
1064
1065 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001066 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001067 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001068 if (stored_rc)
1069 rc = stored_rc;
1070 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001071 }
1072
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +04001073 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001074 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001075 return rc;
1076}
1077
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001078/* copied from fs/locks.c with a name change */
1079#define cifs_for_each_lock(inode, lockp) \
1080 for (lockp = &inode->i_flock; *lockp != NULL; \
1081 lockp = &(*lockp)->fl_next)
1082
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001083struct lock_to_push {
1084 struct list_head llist;
1085 __u64 offset;
1086 __u64 length;
1087 __u32 pid;
1088 __u16 netfid;
1089 __u8 type;
1090};
1091
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001093cifs_push_posix_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001094{
Jeff Layton1c8c6012013-06-21 08:58:15 -04001095 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001096 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1097 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001098 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001099 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001100 struct list_head locks_to_send, *el;
1101 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001102 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001103
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001104 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001105
Jeff Layton1c8c6012013-06-21 08:58:15 -04001106 spin_lock(&inode->i_lock);
1107 cifs_for_each_lock(inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001108 if ((*before)->fl_flags & FL_POSIX)
1109 count++;
1110 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001111 spin_unlock(&inode->i_lock);
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001112
1113 INIT_LIST_HEAD(&locks_to_send);
1114
1115 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001116 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001117 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001118 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001119 */
1120 for (; i < count; i++) {
1121 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1122 if (!lck) {
1123 rc = -ENOMEM;
1124 goto err_out;
1125 }
1126 list_add_tail(&lck->llist, &locks_to_send);
1127 }
1128
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001129 el = locks_to_send.next;
Jeff Layton1c8c6012013-06-21 08:58:15 -04001130 spin_lock(&inode->i_lock);
1131 cifs_for_each_lock(inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001132 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001133 if ((flock->fl_flags & FL_POSIX) == 0)
1134 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001135 if (el == &locks_to_send) {
1136 /*
1137 * The list ended. We don't have enough allocated
1138 * structures - something is really wrong.
1139 */
Joe Perchesf96637b2013-05-04 22:12:25 -05001140 cifs_dbg(VFS, "Can't push all brlocks!\n");
Pavel Shilovskyce858522012-03-17 09:46:55 +03001141 break;
1142 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001143 length = 1 + flock->fl_end - flock->fl_start;
1144 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1145 type = CIFS_RDLCK;
1146 else
1147 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001148 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001149 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001150 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001151 lck->length = length;
1152 lck->type = type;
1153 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001154 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001155 }
Jeff Layton1c8c6012013-06-21 08:58:15 -04001156 spin_unlock(&inode->i_lock);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001157
1158 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001159 int stored_rc;
1160
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001161 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001162 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001163 lck->type, 0);
1164 if (stored_rc)
1165 rc = stored_rc;
1166 list_del(&lck->llist);
1167 kfree(lck);
1168 }
1169
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001170out:
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001171 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001172 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001173err_out:
1174 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1175 list_del(&lck->llist);
1176 kfree(lck);
1177 }
1178 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001179}
1180
1181static int
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001182cifs_push_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001183{
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001184 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001185 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001186 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky9ec3c882012-11-22 17:00:10 +04001187 int rc = 0;
1188
1189 /* we are going to update can_cache_brlcks here - need a write access */
1190 down_write(&cinode->lock_sem);
1191 if (!cinode->can_cache_brlcks) {
1192 up_write(&cinode->lock_sem);
1193 return rc;
1194 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001195
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001196 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001197 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1198 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001199 rc = cifs_push_posix_locks(cfile);
1200 else
1201 rc = tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001202
Pavel Shilovskyb8db9282012-11-22 17:07:16 +04001203 cinode->can_cache_brlcks = false;
1204 up_write(&cinode->lock_sem);
1205 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001206}
1207
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001208static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001209cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001210 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001212 if (flock->fl_flags & FL_POSIX)
Joe Perchesf96637b2013-05-04 22:12:25 -05001213 cifs_dbg(FYI, "Posix\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001214 if (flock->fl_flags & FL_FLOCK)
Joe Perchesf96637b2013-05-04 22:12:25 -05001215 cifs_dbg(FYI, "Flock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001216 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001217 cifs_dbg(FYI, "Blocking lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001218 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001220 if (flock->fl_flags & FL_ACCESS)
Joe Perchesf96637b2013-05-04 22:12:25 -05001221 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001222 if (flock->fl_flags & FL_LEASE)
Joe Perchesf96637b2013-05-04 22:12:25 -05001223 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001224 if (flock->fl_flags &
Jeff Layton3d6d8542012-09-19 06:22:46 -07001225 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1226 FL_ACCESS | FL_LEASE | FL_CLOSE)))
Joe Perchesf96637b2013-05-04 22:12:25 -05001227 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001229 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001230 if (flock->fl_type == F_WRLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001231 cifs_dbg(FYI, "F_WRLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001232 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001233 *lock = 1;
1234 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001235 cifs_dbg(FYI, "F_UNLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001236 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001237 *unlock = 1;
1238 /* Check if unlock includes more than one lock range */
1239 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001240 cifs_dbg(FYI, "F_RDLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001241 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001242 *lock = 1;
1243 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001244 cifs_dbg(FYI, "F_EXLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001245 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001246 *lock = 1;
1247 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001248 cifs_dbg(FYI, "F_SHLCK\n");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001249 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001250 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 } else
Joe Perchesf96637b2013-05-04 22:12:25 -05001252 cifs_dbg(FYI, "Unknown type of lock\n");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001253}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001255static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001256cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001257 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001258{
1259 int rc = 0;
1260 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001261 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1262 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001263 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001264 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001266 if (posix_lck) {
1267 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001268
1269 rc = cifs_posix_lock_test(file, flock);
1270 if (!rc)
1271 return rc;
1272
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001273 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001274 posix_lock_type = CIFS_RDLCK;
1275 else
1276 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001277 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001278 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001279 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 return rc;
1281 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001282
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001283 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001284 if (!rc)
1285 return rc;
1286
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001287 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001288 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1289 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001290 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001291 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1292 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001293 flock->fl_type = F_UNLCK;
1294 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001295 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1296 rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001297 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001298 }
1299
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001300 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001301 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001302 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001303 }
1304
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001305 type &= ~server->vals->exclusive_lock_type;
1306
1307 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1308 type | server->vals->shared_lock_type,
1309 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001310 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001311 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1312 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001313 flock->fl_type = F_RDLCK;
1314 if (rc != 0)
Joe Perchesf96637b2013-05-04 22:12:25 -05001315 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1316 rc);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001317 } else
1318 flock->fl_type = F_WRLCK;
1319
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001320 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001321}
1322
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001323void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001324cifs_move_llist(struct list_head *source, struct list_head *dest)
1325{
1326 struct list_head *li, *tmp;
1327 list_for_each_safe(li, tmp, source)
1328 list_move(li, dest);
1329}
1330
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001331void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001332cifs_free_llist(struct list_head *llist)
1333{
1334 struct cifsLockInfo *li, *tmp;
1335 list_for_each_entry_safe(li, tmp, llist, llist) {
1336 cifs_del_lock_waiters(li);
1337 list_del(&li->llist);
1338 kfree(li);
1339 }
1340}
1341
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001342int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001343cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1344 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001345{
1346 int rc = 0, stored_rc;
1347 int types[] = {LOCKING_ANDX_LARGE_FILES,
1348 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1349 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001350 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001351 LOCKING_ANDX_RANGE *buf, *cur;
1352 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1353 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1354 struct cifsLockInfo *li, *tmp;
1355 __u64 length = 1 + flock->fl_end - flock->fl_start;
1356 struct list_head tmp_llist;
1357
1358 INIT_LIST_HEAD(&tmp_llist);
1359
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001360 /*
1361 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1362 * and check it for zero before using.
1363 */
1364 max_buf = tcon->ses->server->maxBuf;
1365 if (!max_buf)
1366 return -EINVAL;
1367
1368 max_num = (max_buf - sizeof(struct smb_hdr)) /
1369 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001370 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1371 if (!buf)
1372 return -ENOMEM;
1373
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001374 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001375 for (i = 0; i < 2; i++) {
1376 cur = buf;
1377 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001378 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001379 if (flock->fl_start > li->offset ||
1380 (flock->fl_start + length) <
1381 (li->offset + li->length))
1382 continue;
1383 if (current->tgid != li->pid)
1384 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001385 if (types[i] != li->type)
1386 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001387 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001388 /*
1389 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001390 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001391 */
1392 list_del(&li->llist);
1393 cifs_del_lock_waiters(li);
1394 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001395 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001396 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001397 cur->Pid = cpu_to_le16(li->pid);
1398 cur->LengthLow = cpu_to_le32((u32)li->length);
1399 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1400 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1401 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1402 /*
1403 * We need to save a lock here to let us add it again to
1404 * the file's list if the unlock range request fails on
1405 * the server.
1406 */
1407 list_move(&li->llist, &tmp_llist);
1408 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001409 stored_rc = cifs_lockv(xid, tcon,
1410 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001411 li->type, num, 0, buf);
1412 if (stored_rc) {
1413 /*
1414 * We failed on the unlock range
1415 * request - add all locks from the tmp
1416 * list to the head of the file's list.
1417 */
1418 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001419 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001420 rc = stored_rc;
1421 } else
1422 /*
1423 * The unlock range request succeed -
1424 * free the tmp list.
1425 */
1426 cifs_free_llist(&tmp_llist);
1427 cur = buf;
1428 num = 0;
1429 } else
1430 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001431 }
1432 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001433 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001434 types[i], num, 0, buf);
1435 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001436 cifs_move_llist(&tmp_llist,
1437 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001438 rc = stored_rc;
1439 } else
1440 cifs_free_llist(&tmp_llist);
1441 }
1442 }
1443
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001444 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001445 kfree(buf);
1446 return rc;
1447}
1448
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001449static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001450cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001451 bool wait_flag, bool posix_lck, int lock, int unlock,
1452 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001453{
1454 int rc = 0;
1455 __u64 length = 1 + flock->fl_end - flock->fl_start;
1456 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1457 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001458 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001459 struct inode *inode = cfile->dentry->d_inode;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001460
1461 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001462 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001463
1464 rc = cifs_posix_lock_set(file, flock);
1465 if (!rc || rc < 0)
1466 return rc;
1467
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001468 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001469 posix_lock_type = CIFS_RDLCK;
1470 else
1471 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001472
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001473 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001474 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001475
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001476 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1477 current->tgid, flock->fl_start, length,
1478 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001479 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001480 }
1481
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001482 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001483 struct cifsLockInfo *lock;
1484
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001485 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001486 if (!lock)
1487 return -ENOMEM;
1488
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001489 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001490 if (rc < 0) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001491 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001492 return rc;
1493 }
1494 if (!rc)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001495 goto out;
1496
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001497 /*
1498 * Windows 7 server can delay breaking lease from read to None
1499 * if we set a byte-range lock on a file - break it explicitly
1500 * before sending the lock to the server to be sure the next
1501 * read won't conflict with non-overlapted locks due to
1502 * pagereading.
1503 */
1504 if (!CIFS_I(inode)->clientCanCacheAll &&
1505 CIFS_I(inode)->clientCanCacheRead) {
1506 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05001507 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1508 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04001509 CIFS_I(inode)->clientCanCacheRead = false;
1510 }
1511
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001512 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1513 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001514 if (rc) {
1515 kfree(lock);
Pavel Shilovsky21cb2d92012-11-22 18:56:39 +04001516 return rc;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001517 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001518
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001519 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001520 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001521 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001522
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001523out:
1524 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001525 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001526 return rc;
1527}
1528
1529int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1530{
1531 int rc, xid;
1532 int lock = 0, unlock = 0;
1533 bool wait_flag = false;
1534 bool posix_lck = false;
1535 struct cifs_sb_info *cifs_sb;
1536 struct cifs_tcon *tcon;
1537 struct cifsInodeInfo *cinode;
1538 struct cifsFileInfo *cfile;
1539 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001540 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001541
1542 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001543 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001544
Joe Perchesf96637b2013-05-04 22:12:25 -05001545 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1546 cmd, flock->fl_flags, flock->fl_type,
1547 flock->fl_start, flock->fl_end);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001548
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001549 cfile = (struct cifsFileInfo *)file->private_data;
1550 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001551
1552 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1553 tcon->ses->server);
1554
1555 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001556 netfid = cfile->fid.netfid;
Al Viro496ad9a2013-01-23 17:07:38 -05001557 cinode = CIFS_I(file_inode(file));
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001558
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001559 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001560 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1561 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1562 posix_lck = true;
1563 /*
1564 * BB add code here to normalize offset and length to account for
1565 * negative length which we can not accept over the wire.
1566 */
1567 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001568 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001569 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001570 return rc;
1571 }
1572
1573 if (!lock && !unlock) {
1574 /*
1575 * if no lock or unlock then nothing to do since we do not
1576 * know what it is
1577 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001578 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001579 return -EOPNOTSUPP;
1580 }
1581
1582 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1583 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001584 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 return rc;
1586}
1587
Jeff Layton597b0272012-03-23 14:40:56 -04001588/*
1589 * update the file size (if needed) after a write. Should be called with
1590 * the inode->i_lock held
1591 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001592void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001593cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1594 unsigned int bytes_written)
1595{
1596 loff_t end_of_write = offset + bytes_written;
1597
1598 if (end_of_write > cifsi->server_eof)
1599 cifsi->server_eof = end_of_write;
1600}
1601
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001602static ssize_t
1603cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1604 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
1606 int rc = 0;
1607 unsigned int bytes_written = 0;
1608 unsigned int total_written;
1609 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001610 struct cifs_tcon *tcon;
1611 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001612 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001613 struct dentry *dentry = open_file->dentry;
1614 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001615 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616
Jeff Layton7da4b492010-10-15 15:34:00 -04001617 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
Joe Perchesf96637b2013-05-04 22:12:25 -05001619 cifs_dbg(FYI, "write %zd bytes to offset %lld of %s\n",
1620 write_size, *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001622 tcon = tlink_tcon(open_file->tlink);
1623 server = tcon->ses->server;
1624
1625 if (!server->ops->sync_write)
1626 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001627
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001628 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 for (total_written = 0; write_size > total_written;
1631 total_written += bytes_written) {
1632 rc = -EAGAIN;
1633 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001634 struct kvec iov[2];
1635 unsigned int len;
1636
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 /* we could deadlock if we called
1639 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001640 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001642 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 if (rc != 0)
1644 break;
1645 }
Steve French3e844692005-10-03 13:37:24 -07001646
Jeff Laytonca83ce32011-04-12 09:13:44 -04001647 len = min((size_t)cifs_sb->wsize,
1648 write_size - total_written);
1649 /* iov[0] is reserved for smb header */
1650 iov[1].iov_base = (char *)write_data + total_written;
1651 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001652 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001653 io_parms.tcon = tcon;
1654 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001655 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001656 rc = server->ops->sync_write(xid, open_file, &io_parms,
1657 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 }
1659 if (rc || (bytes_written == 0)) {
1660 if (total_written)
1661 break;
1662 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001663 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 return rc;
1665 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001666 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001667 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001668 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001669 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001670 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001671 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 }
1673
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001674 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
Jeff Layton7da4b492010-10-15 15:34:00 -04001676 if (total_written > 0) {
1677 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001678 if (*offset > dentry->d_inode->i_size)
1679 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001680 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001682 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001683 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 return total_written;
1685}
1686
Jeff Layton6508d902010-09-29 19:51:11 -04001687struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1688 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001689{
1690 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001691 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1692
1693 /* only filter by fsuid on multiuser mounts */
1694 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1695 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001696
Jeff Layton44772882010-10-15 15:34:03 -04001697 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001698 /* we could simply get the first_list_entry since write-only entries
1699 are always at the end of the list but since the first entry might
1700 have a close pending, we go through the whole list */
1701 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001702 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001703 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001704 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001705 if (!open_file->invalidHandle) {
1706 /* found a good file */
1707 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001708 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001709 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001710 return open_file;
1711 } /* else might as well continue, and look for
1712 another, or simply have the caller reopen it
1713 again rather than trying to fix this handle */
1714 } else /* write only file */
1715 break; /* write only files are last so must be done */
1716 }
Jeff Layton44772882010-10-15 15:34:03 -04001717 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001718 return NULL;
1719}
Steve French630f3f0c2007-10-25 21:17:17 +00001720
Jeff Layton6508d902010-09-29 19:51:11 -04001721struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1722 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001723{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001724 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001725 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001726 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001727 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001728 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001729
Steve French60808232006-04-22 15:53:05 +00001730 /* Having a null inode here (because mapping->host was set to zero by
1731 the VFS or MM) should not happen but we had reports of on oops (due to
1732 it being zero) during stress testcases so we need to check for it */
1733
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001734 if (cifs_inode == NULL) {
Joe Perchesf96637b2013-05-04 22:12:25 -05001735 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
Steve French60808232006-04-22 15:53:05 +00001736 dump_stack();
1737 return NULL;
1738 }
1739
Jeff Laytond3892292010-11-02 16:22:50 -04001740 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1741
Jeff Layton6508d902010-09-29 19:51:11 -04001742 /* only filter by fsuid on multiuser mounts */
1743 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1744 fsuid_only = false;
1745
Jeff Layton44772882010-10-15 15:34:03 -04001746 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001747refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001748 if (refind > MAX_REOPEN_ATT) {
1749 spin_unlock(&cifs_file_list_lock);
1750 return NULL;
1751 }
Steve French6148a742005-10-05 12:23:19 -07001752 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001753 if (!any_available && open_file->pid != current->tgid)
1754 continue;
Eric W. Biedermanfef59fd2013-02-06 02:23:02 -08001755 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
Jeff Layton6508d902010-09-29 19:51:11 -04001756 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001757 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001758 if (!open_file->invalidHandle) {
1759 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001760 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001761 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001762 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001763 } else {
1764 if (!inv_file)
1765 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001766 }
Steve French6148a742005-10-05 12:23:19 -07001767 }
1768 }
Jeff Layton2846d382008-09-22 21:33:33 -04001769 /* couldn't find useable FH with same pid, try any available */
1770 if (!any_available) {
1771 any_available = true;
1772 goto refind_writable;
1773 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001774
1775 if (inv_file) {
1776 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001777 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001778 }
1779
Jeff Layton44772882010-10-15 15:34:03 -04001780 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001781
1782 if (inv_file) {
1783 rc = cifs_reopen_file(inv_file, false);
1784 if (!rc)
1785 return inv_file;
1786 else {
1787 spin_lock(&cifs_file_list_lock);
1788 list_move_tail(&inv_file->flist,
1789 &cifs_inode->openFileList);
1790 spin_unlock(&cifs_file_list_lock);
1791 cifsFileInfo_put(inv_file);
1792 spin_lock(&cifs_file_list_lock);
1793 ++refind;
1794 goto refind_writable;
1795 }
1796 }
1797
Steve French6148a742005-10-05 12:23:19 -07001798 return NULL;
1799}
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1802{
1803 struct address_space *mapping = page->mapping;
1804 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1805 char *write_data;
1806 int rc = -EFAULT;
1807 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001809 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
1811 if (!mapping || !mapping->host)
1812 return -EFAULT;
1813
1814 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
1816 offset += (loff_t)from;
1817 write_data = kmap(page);
1818 write_data += from;
1819
1820 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1821 kunmap(page);
1822 return -EIO;
1823 }
1824
1825 /* racing with truncate? */
1826 if (offset > mapping->host->i_size) {
1827 kunmap(page);
1828 return 0; /* don't care */
1829 }
1830
1831 /* check to make sure that we are not extending the file */
1832 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001833 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
Jeff Layton6508d902010-09-29 19:51:11 -04001835 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001836 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001837 bytes_written = cifs_write(open_file, open_file->pid,
1838 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001839 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001841 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001842 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001843 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001844 else if (bytes_written < 0)
1845 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001846 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -05001847 cifs_dbg(FYI, "No writeable filehandles for inode\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 rc = -EIO;
1849 }
1850
1851 kunmap(page);
1852 return rc;
1853}
1854
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001856 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001858 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1859 bool done = false, scanned = false, range_whole = false;
1860 pgoff_t end, index;
1861 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001862 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001863 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001864 int rc = 0;
Steve French50c2f752007-07-13 00:33:32 +00001865
Steve French37c0eb42005-10-05 14:50:29 -07001866 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001867 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001868 * one page at a time via cifs_writepage
1869 */
1870 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1871 return generic_writepages(mapping, wbc);
1872
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001873 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001874 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001875 end = -1;
1876 } else {
1877 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1878 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1879 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001880 range_whole = true;
1881 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001882 }
1883retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001884 while (!done && index <= end) {
1885 unsigned int i, nr_pages, found_pages;
1886 pgoff_t next = 0, tofind;
1887 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001888
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001889 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1890 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001891
Jeff Laytonc2e87642012-03-23 14:40:55 -04001892 wdata = cifs_writedata_alloc((unsigned int)tofind,
1893 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001894 if (!wdata) {
1895 rc = -ENOMEM;
1896 break;
1897 }
1898
1899 /*
1900 * find_get_pages_tag seems to return a max of 256 on each
1901 * iteration, so we must call it several times in order to
1902 * fill the array or the wsize is effectively limited to
1903 * 256 * PAGE_CACHE_SIZE.
1904 */
1905 found_pages = 0;
1906 pages = wdata->pages;
1907 do {
1908 nr_pages = find_get_pages_tag(mapping, &index,
1909 PAGECACHE_TAG_DIRTY,
1910 tofind, pages);
1911 found_pages += nr_pages;
1912 tofind -= nr_pages;
1913 pages += nr_pages;
1914 } while (nr_pages && tofind && index <= end);
1915
1916 if (found_pages == 0) {
1917 kref_put(&wdata->refcount, cifs_writedata_release);
1918 break;
1919 }
1920
1921 nr_pages = 0;
1922 for (i = 0; i < found_pages; i++) {
1923 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001924 /*
1925 * At this point we hold neither mapping->tree_lock nor
1926 * lock on the page itself: the page may be truncated or
1927 * invalidated (changing page->mapping to NULL), or even
1928 * swizzled back from swapper_space to tmpfs file
1929 * mapping
1930 */
1931
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001932 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001933 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001934 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001935 break;
1936
1937 if (unlikely(page->mapping != mapping)) {
1938 unlock_page(page);
1939 break;
1940 }
1941
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001942 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001943 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001944 unlock_page(page);
1945 break;
1946 }
1947
1948 if (next && (page->index != next)) {
1949 /* Not next consecutive page */
1950 unlock_page(page);
1951 break;
1952 }
1953
1954 if (wbc->sync_mode != WB_SYNC_NONE)
1955 wait_on_page_writeback(page);
1956
1957 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001958 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001959 unlock_page(page);
1960 break;
1961 }
Steve French84d2f072005-10-12 15:32:05 -07001962
Linus Torvaldscb876f42006-12-23 16:19:07 -08001963 /*
1964 * This actually clears the dirty bit in the radix tree.
1965 * See cifs_writepage() for more commentary.
1966 */
1967 set_page_writeback(page);
1968
Jeff Layton3a98b862012-11-26 09:48:41 -05001969 if (page_offset(page) >= i_size_read(mapping->host)) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001970 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001971 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001972 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001973 break;
1974 }
1975
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001976 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001977 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001978 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001979 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001980
1981 /* reset index to refind any pages skipped */
1982 if (nr_pages == 0)
1983 index = wdata->pages[0]->index + 1;
1984
1985 /* put any pages we aren't going to use */
1986 for (i = nr_pages; i < found_pages; i++) {
1987 page_cache_release(wdata->pages[i]);
1988 wdata->pages[i] = NULL;
1989 }
1990
1991 /* nothing to write? */
1992 if (nr_pages == 0) {
1993 kref_put(&wdata->refcount, cifs_writedata_release);
1994 continue;
1995 }
1996
1997 wdata->sync_mode = wbc->sync_mode;
1998 wdata->nr_pages = nr_pages;
1999 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07002000 wdata->pagesz = PAGE_CACHE_SIZE;
2001 wdata->tailsz =
Jeff Layton3a98b862012-11-26 09:48:41 -05002002 min(i_size_read(mapping->host) -
2003 page_offset(wdata->pages[nr_pages - 1]),
Jeff Laytoneddb0792012-09-18 16:20:35 -07002004 (loff_t)PAGE_CACHE_SIZE);
2005 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
2006 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002007
2008 do {
2009 if (wdata->cfile != NULL)
2010 cifsFileInfo_put(wdata->cfile);
2011 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
2012 false);
2013 if (!wdata->cfile) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002014 cifs_dbg(VFS, "No writable handles for inode\n");
Steve French23e7dd72005-10-20 13:44:56 -07002015 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002016 break;
Steve French37c0eb42005-10-05 14:50:29 -07002017 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04002018 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002019 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2020 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002021 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07002022
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002023 for (i = 0; i < nr_pages; ++i)
2024 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05002025
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002026 /* send failure -- clean up the mess */
2027 if (rc != 0) {
2028 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05002029 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002030 redirty_page_for_writepage(wbc,
2031 wdata->pages[i]);
2032 else
2033 SetPageError(wdata->pages[i]);
2034 end_page_writeback(wdata->pages[i]);
2035 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07002036 }
Jeff Layton941b8532011-01-11 07:24:01 -05002037 if (rc != -EAGAIN)
2038 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002039 }
2040 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05002041
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002042 wbc->nr_to_write -= nr_pages;
2043 if (wbc->nr_to_write <= 0)
2044 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00002045
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002046 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07002047 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002048
Steve French37c0eb42005-10-05 14:50:29 -07002049 if (!scanned && !done) {
2050 /*
2051 * We hit the last page and there is more work to be done: wrap
2052 * back to the start of the file
2053 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002054 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07002055 index = 0;
2056 goto retry;
2057 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04002058
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07002059 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07002060 mapping->writeback_index = index;
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 return rc;
2063}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002065static int
2066cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002068 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002069 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002071 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072/* BB add check for wbc flags */
2073 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00002074 if (!PageUptodate(page))
Joe Perchesf96637b2013-05-04 22:12:25 -05002075 cifs_dbg(FYI, "ppw - page not up to date\n");
Linus Torvaldscb876f42006-12-23 16:19:07 -08002076
2077 /*
2078 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2079 *
2080 * A writepage() implementation always needs to do either this,
2081 * or re-dirty the page with "redirty_page_for_writepage()" in
2082 * the case of a failure.
2083 *
2084 * Just unlocking the page will cause the radix tree tag-bits
2085 * to fail to update with the state of the page correctly.
2086 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002087 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002088retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002090 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2091 goto retry_write;
2092 else if (rc == -EAGAIN)
2093 redirty_page_for_writepage(wbc, page);
2094 else if (rc != 0)
2095 SetPageError(page);
2096 else
2097 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08002098 end_page_writeback(page);
2099 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002100 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 return rc;
2102}
2103
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04002104static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2105{
2106 int rc = cifs_writepage_locked(page, wbc);
2107 unlock_page(page);
2108 return rc;
2109}
2110
Nick Piggind9414772008-09-24 11:32:59 -04002111static int cifs_write_end(struct file *file, struct address_space *mapping,
2112 loff_t pos, unsigned len, unsigned copied,
2113 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114{
Nick Piggind9414772008-09-24 11:32:59 -04002115 int rc;
2116 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002117 struct cifsFileInfo *cfile = file->private_data;
2118 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2119 __u32 pid;
2120
2121 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2122 pid = cfile->pid;
2123 else
2124 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
Joe Perchesf96637b2013-05-04 22:12:25 -05002126 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00002127 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002128
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002129 if (PageChecked(page)) {
2130 if (copied == len)
2131 SetPageUptodate(page);
2132 ClearPageChecked(page);
2133 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002134 SetPageUptodate(page);
2135
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002137 char *page_data;
2138 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002139 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002140
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002141 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 /* this is probably better than directly calling
2143 partialpage_write since in this function the file handle is
2144 known which we might as well leverage */
2145 /* BB check if anything else missing out of ppw
2146 such as updating last write time */
2147 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002148 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002149 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002151
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002152 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002153 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002154 rc = copied;
2155 pos += copied;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002156 set_page_dirty(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 }
2158
Nick Piggind9414772008-09-24 11:32:59 -04002159 if (rc > 0) {
2160 spin_lock(&inode->i_lock);
2161 if (pos > inode->i_size)
2162 i_size_write(inode, pos);
2163 spin_unlock(&inode->i_lock);
2164 }
2165
2166 unlock_page(page);
2167 page_cache_release(page);
2168
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 return rc;
2170}
2171
Josef Bacik02c24a82011-07-16 20:44:56 -04002172int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2173 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002175 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002177 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002178 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002179 struct cifsFileInfo *smbfile = file->private_data;
Al Viro496ad9a2013-01-23 17:07:38 -05002180 struct inode *inode = file_inode(file);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002181 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
Josef Bacik02c24a82011-07-16 20:44:56 -04002183 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2184 if (rc)
2185 return rc;
2186 mutex_lock(&inode->i_mutex);
2187
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002188 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Joe Perchesf96637b2013-05-04 22:12:25 -05002190 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2191 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002192
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002193 if (!CIFS_I(inode)->clientCanCacheRead) {
2194 rc = cifs_invalidate_mapping(inode);
2195 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05002196 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002197 rc = 0; /* don't care about it in fsync */
2198 }
2199 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002200
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002201 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002202 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2203 server = tcon->ses->server;
2204 if (server->ops->flush)
2205 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2206 else
2207 rc = -ENOSYS;
2208 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002209
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002210 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002211 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002212 return rc;
2213}
2214
Josef Bacik02c24a82011-07-16 20:44:56 -04002215int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002216{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002217 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002218 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002219 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002220 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002221 struct cifsFileInfo *smbfile = file->private_data;
2222 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002223 struct inode *inode = file->f_mapping->host;
2224
2225 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2226 if (rc)
2227 return rc;
2228 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002229
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002230 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002231
Joe Perchesf96637b2013-05-04 22:12:25 -05002232 cifs_dbg(FYI, "Sync file - name: %s datasync: 0x%x\n",
2233 file->f_path.dentry->d_name.name, datasync);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002234
2235 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002236 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2237 server = tcon->ses->server;
2238 if (server->ops->flush)
2239 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2240 else
2241 rc = -ENOSYS;
2242 }
Steve Frenchb298f222009-02-21 21:17:43 +00002243
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002244 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002245 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return rc;
2247}
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249/*
2250 * As file closes, flush all cached write data for this inode checking
2251 * for write behind errors.
2252 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002253int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254{
Al Viro496ad9a2013-01-23 17:07:38 -05002255 struct inode *inode = file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 int rc = 0;
2257
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002258 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002259 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002260
Joe Perchesf96637b2013-05-04 22:12:25 -05002261 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262
2263 return rc;
2264}
2265
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002266static int
2267cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2268{
2269 int rc = 0;
2270 unsigned long i;
2271
2272 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002273 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002274 if (!pages[i]) {
2275 /*
2276 * save number of pages we have already allocated and
2277 * return with ENOMEM error
2278 */
2279 num_pages = i;
2280 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002281 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002282 }
2283 }
2284
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002285 if (rc) {
2286 for (i = 0; i < num_pages; i++)
2287 put_page(pages[i]);
2288 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002289 return rc;
2290}
2291
2292static inline
2293size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2294{
2295 size_t num_pages;
2296 size_t clen;
2297
2298 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002299 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002300
2301 if (cur_len)
2302 *cur_len = clen;
2303
2304 return num_pages;
2305}
2306
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002307static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002308cifs_uncached_writev_complete(struct work_struct *work)
2309{
2310 int i;
2311 struct cifs_writedata *wdata = container_of(work,
2312 struct cifs_writedata, work);
2313 struct inode *inode = wdata->cfile->dentry->d_inode;
2314 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2315
2316 spin_lock(&inode->i_lock);
2317 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2318 if (cifsi->server_eof > inode->i_size)
2319 i_size_write(inode, cifsi->server_eof);
2320 spin_unlock(&inode->i_lock);
2321
2322 complete(&wdata->done);
2323
2324 if (wdata->result != -EAGAIN) {
2325 for (i = 0; i < wdata->nr_pages; i++)
2326 put_page(wdata->pages[i]);
2327 }
2328
2329 kref_put(&wdata->refcount, cifs_writedata_release);
2330}
2331
2332/* attempt to send write to server, retry on any -EAGAIN errors */
2333static int
2334cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2335{
2336 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002337 struct TCP_Server_Info *server;
2338
2339 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002340
2341 do {
2342 if (wdata->cfile->invalidHandle) {
2343 rc = cifs_reopen_file(wdata->cfile, false);
2344 if (rc != 0)
2345 continue;
2346 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002347 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002348 } while (rc == -EAGAIN);
2349
2350 return rc;
2351}
2352
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002353static ssize_t
2354cifs_iovec_write(struct file *file, const struct iovec *iov,
2355 unsigned long nr_segs, loff_t *poffset)
2356{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002357 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002358 size_t copied, len, cur_len;
2359 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002360 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002361 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002362 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002363 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002364 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002365 struct cifs_writedata *wdata, *tmp;
2366 struct list_head wdata_list;
2367 int rc;
2368 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002369
2370 len = iov_length(iov, nr_segs);
2371 if (!len)
2372 return 0;
2373
2374 rc = generic_write_checks(file, poffset, &len, 0);
2375 if (rc)
2376 return rc;
2377
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002378 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002379 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002380 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002381 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002382
2383 if (!tcon->ses->server->ops->async_writev)
2384 return -ENOSYS;
2385
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002386 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002387
2388 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2389 pid = open_file->pid;
2390 else
2391 pid = current->tgid;
2392
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002393 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002394 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002395 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002396
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002397 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2398 wdata = cifs_writedata_alloc(nr_pages,
2399 cifs_uncached_writev_complete);
2400 if (!wdata) {
2401 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002402 break;
2403 }
2404
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002405 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2406 if (rc) {
2407 kfree(wdata);
2408 break;
2409 }
2410
2411 save_len = cur_len;
2412 for (i = 0; i < nr_pages; i++) {
2413 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2414 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2415 0, copied);
2416 cur_len -= copied;
2417 iov_iter_advance(&it, copied);
2418 }
2419 cur_len = save_len - cur_len;
2420
2421 wdata->sync_mode = WB_SYNC_ALL;
2422 wdata->nr_pages = nr_pages;
2423 wdata->offset = (__u64)offset;
2424 wdata->cfile = cifsFileInfo_get(open_file);
2425 wdata->pid = pid;
2426 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002427 wdata->pagesz = PAGE_SIZE;
2428 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002429 rc = cifs_uncached_retry_writev(wdata);
2430 if (rc) {
2431 kref_put(&wdata->refcount, cifs_writedata_release);
2432 break;
2433 }
2434
2435 list_add_tail(&wdata->list, &wdata_list);
2436 offset += cur_len;
2437 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002438 } while (len > 0);
2439
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002440 /*
2441 * If at least one write was successfully sent, then discard any rc
2442 * value from the later writes. If the other write succeeds, then
2443 * we'll end up returning whatever was written. If it fails, then
2444 * we'll get a new rc value from that.
2445 */
2446 if (!list_empty(&wdata_list))
2447 rc = 0;
2448
2449 /*
2450 * Wait for and collect replies for any successful sends in order of
2451 * increasing offset. Once an error is hit or we get a fatal signal
2452 * while waiting, then return without waiting for any more replies.
2453 */
2454restart_loop:
2455 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2456 if (!rc) {
2457 /* FIXME: freezable too? */
2458 rc = wait_for_completion_killable(&wdata->done);
2459 if (rc)
2460 rc = -EINTR;
2461 else if (wdata->result)
2462 rc = wdata->result;
2463 else
2464 total_written += wdata->bytes;
2465
2466 /* resend call if it's a retryable error */
2467 if (rc == -EAGAIN) {
2468 rc = cifs_uncached_retry_writev(wdata);
2469 goto restart_loop;
2470 }
2471 }
2472 list_del_init(&wdata->list);
2473 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002474 }
2475
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002476 if (total_written > 0)
2477 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002478
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002479 cifs_stats_bytes_written(tcon, total_written);
2480 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002481}
2482
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002483ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002484 unsigned long nr_segs, loff_t pos)
2485{
2486 ssize_t written;
2487 struct inode *inode;
2488
Al Viro496ad9a2013-01-23 17:07:38 -05002489 inode = file_inode(iocb->ki_filp);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002490
2491 /*
2492 * BB - optimize the way when signing is disabled. We can drop this
2493 * extra memory-to-memory copying and use iovec buffers for constructing
2494 * write request.
2495 */
2496
2497 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2498 if (written > 0) {
2499 CIFS_I(inode)->invalid_mapping = true;
2500 iocb->ki_pos = pos;
2501 }
2502
2503 return written;
2504}
2505
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002506static ssize_t
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002507cifs_writev(struct kiocb *iocb, const struct iovec *iov,
2508 unsigned long nr_segs, loff_t pos)
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002509{
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002510 struct file *file = iocb->ki_filp;
2511 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2512 struct inode *inode = file->f_mapping->host;
2513 struct cifsInodeInfo *cinode = CIFS_I(inode);
2514 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2515 ssize_t rc = -EACCES;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002516
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002517 BUG_ON(iocb->ki_pos != pos);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002518
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002519 /*
2520 * We need to hold the sem to be sure nobody modifies lock list
2521 * with a brlock that prevents writing.
2522 */
2523 down_read(&cinode->lock_sem);
2524 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2525 server->vals->exclusive_lock_type, NULL,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002526 CIFS_WRITE_OP)) {
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002527 mutex_lock(&inode->i_mutex);
2528 rc = __generic_file_aio_write(iocb, iov, nr_segs,
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002529 &iocb->ki_pos);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002530 mutex_unlock(&inode->i_mutex);
2531 }
2532
2533 if (rc > 0 || rc == -EIOCBQUEUED) {
2534 ssize_t err;
2535
2536 err = generic_write_sync(file, pos, rc);
2537 if (err < 0 && rc > 0)
2538 rc = err;
2539 }
2540
2541 up_read(&cinode->lock_sem);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002542 return rc;
2543}
2544
2545ssize_t
2546cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2547 unsigned long nr_segs, loff_t pos)
2548{
Al Viro496ad9a2013-01-23 17:07:38 -05002549 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002550 struct cifsInodeInfo *cinode = CIFS_I(inode);
2551 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2552 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2553 iocb->ki_filp->private_data;
2554 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002555 ssize_t written;
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002556
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002557 if (cinode->clientCanCacheAll) {
2558 if (cap_unix(tcon->ses) &&
2559 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2560 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2561 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2562 return cifs_writev(iocb, iov, nr_segs, pos);
Pavel Shilovskyc299dd02012-12-06 22:07:52 +04002563 }
Pavel Shilovskyca8aa292012-12-21 15:05:47 +04002564 /*
2565 * For non-oplocked files in strict cache mode we need to write the data
2566 * to the server exactly from the pos to pos+len-1 rather than flush all
2567 * affected pages because it may cause a error with mandatory locks on
2568 * these pages but not on the region from pos to ppos+len-1.
2569 */
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002570 written = cifs_user_writev(iocb, iov, nr_segs, pos);
2571 if (written > 0 && cinode->clientCanCacheRead) {
2572 /*
2573 * Windows 7 server can delay breaking level2 oplock if a write
2574 * request comes - break it on the client to prevent reading
2575 * an old data.
2576 */
2577 cifs_invalidate_mapping(inode);
Joe Perchesf96637b2013-05-04 22:12:25 -05002578 cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
2579 inode);
Pavel Shilovsky88cf75a2012-12-21 15:07:52 +04002580 cinode->clientCanCacheRead = false;
2581 }
2582 return written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002583}
2584
Jeff Layton0471ca32012-05-16 07:13:16 -04002585static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002586cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002587{
2588 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002589
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002590 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2591 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002592 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002593 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002594 INIT_LIST_HEAD(&rdata->list);
2595 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002596 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002597 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002598
Jeff Layton0471ca32012-05-16 07:13:16 -04002599 return rdata;
2600}
2601
Jeff Layton6993f742012-05-16 07:13:17 -04002602void
2603cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002604{
Jeff Layton6993f742012-05-16 07:13:17 -04002605 struct cifs_readdata *rdata = container_of(refcount,
2606 struct cifs_readdata, refcount);
2607
2608 if (rdata->cfile)
2609 cifsFileInfo_put(rdata->cfile);
2610
Jeff Layton0471ca32012-05-16 07:13:16 -04002611 kfree(rdata);
2612}
2613
Jeff Layton2a1bb132012-05-16 07:13:17 -04002614static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002615cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002616{
2617 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002618 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002619 unsigned int i;
2620
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002621 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002622 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2623 if (!page) {
2624 rc = -ENOMEM;
2625 break;
2626 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002627 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002628 }
2629
2630 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002631 for (i = 0; i < nr_pages; i++) {
2632 put_page(rdata->pages[i]);
2633 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002634 }
2635 }
2636 return rc;
2637}
2638
2639static void
2640cifs_uncached_readdata_release(struct kref *refcount)
2641{
Jeff Layton1c892542012-05-16 07:13:17 -04002642 struct cifs_readdata *rdata = container_of(refcount,
2643 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002644 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002645
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002646 for (i = 0; i < rdata->nr_pages; i++) {
2647 put_page(rdata->pages[i]);
2648 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002649 }
2650 cifs_readdata_release(refcount);
2651}
2652
2653static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002654cifs_retry_async_readv(struct cifs_readdata *rdata)
2655{
2656 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002657 struct TCP_Server_Info *server;
2658
2659 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002660
2661 do {
2662 if (rdata->cfile->invalidHandle) {
2663 rc = cifs_reopen_file(rdata->cfile, true);
2664 if (rc != 0)
2665 continue;
2666 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002667 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002668 } while (rc == -EAGAIN);
2669
2670 return rc;
2671}
2672
Jeff Layton1c892542012-05-16 07:13:17 -04002673/**
2674 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2675 * @rdata: the readdata response with list of pages holding data
2676 * @iov: vector in which we should copy the data
2677 * @nr_segs: number of segments in vector
2678 * @offset: offset into file of the first iovec
2679 * @copied: used to return the amount of data copied to the iov
2680 *
2681 * This function copies data from a list of pages in a readdata response into
2682 * an array of iovecs. It will first calculate where the data should go
2683 * based on the info in the readdata and then copy the data into that spot.
2684 */
2685static ssize_t
2686cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2687 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2688{
2689 int rc = 0;
2690 struct iov_iter ii;
2691 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002692 ssize_t remaining = rdata->bytes;
2693 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002694 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002695
2696 /* set up iov_iter and advance to the correct offset */
2697 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2698 iov_iter_advance(&ii, pos);
2699
2700 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002701 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002702 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002703 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002704
2705 /* copy a whole page or whatever's left */
2706 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2707
2708 /* ...but limit it to whatever space is left in the iov */
2709 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2710
2711 /* go while there's data to be copied and no errors */
2712 if (copy && !rc) {
2713 pdata = kmap(page);
2714 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2715 (int)copy);
2716 kunmap(page);
2717 if (!rc) {
2718 *copied += copy;
2719 remaining -= copy;
2720 iov_iter_advance(&ii, copy);
2721 }
2722 }
Jeff Layton1c892542012-05-16 07:13:17 -04002723 }
2724
2725 return rc;
2726}
2727
2728static void
2729cifs_uncached_readv_complete(struct work_struct *work)
2730{
2731 struct cifs_readdata *rdata = container_of(work,
2732 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002733
2734 complete(&rdata->done);
2735 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2736}
2737
2738static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002739cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2740 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002741{
Jeff Layton8321fec2012-09-19 06:22:32 -07002742 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002743 unsigned int i;
2744 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002745 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002746
Jeff Layton8321fec2012-09-19 06:22:32 -07002747 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002748 for (i = 0; i < nr_pages; i++) {
2749 struct page *page = rdata->pages[i];
2750
Jeff Layton8321fec2012-09-19 06:22:32 -07002751 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002752 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002753 iov.iov_base = kmap(page);
2754 iov.iov_len = PAGE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05002755 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2756 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002757 len -= PAGE_SIZE;
2758 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002759 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002760 iov.iov_base = kmap(page);
2761 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05002762 cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n",
2763 i, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07002764 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2765 rdata->tailsz = len;
2766 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002767 } else {
2768 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002769 rdata->pages[i] = NULL;
2770 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002771 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002772 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002773 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002774
2775 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2776 kunmap(page);
2777 if (result < 0)
2778 break;
2779
2780 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002781 }
2782
Jeff Layton8321fec2012-09-19 06:22:32 -07002783 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002784}
2785
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002786static ssize_t
2787cifs_iovec_read(struct file *file, const struct iovec *iov,
2788 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789{
Jeff Layton1c892542012-05-16 07:13:17 -04002790 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002791 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002792 ssize_t total_read = 0;
2793 loff_t offset = *poffset;
2794 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002796 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002798 struct cifs_readdata *rdata, *tmp;
2799 struct list_head rdata_list;
2800 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002801
2802 if (!nr_segs)
2803 return 0;
2804
2805 len = iov_length(iov, nr_segs);
2806 if (!len)
2807 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
Jeff Layton1c892542012-05-16 07:13:17 -04002809 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002810 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002811 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002812 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002814 if (!tcon->ses->server->ops->async_readv)
2815 return -ENOSYS;
2816
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002817 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2818 pid = open_file->pid;
2819 else
2820 pid = current->tgid;
2821
Steve Frenchad7a2922008-02-07 23:25:02 +00002822 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05002823 cifs_dbg(FYI, "attempting read on write only file instance\n");
Steve Frenchad7a2922008-02-07 23:25:02 +00002824
Jeff Layton1c892542012-05-16 07:13:17 -04002825 do {
2826 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2827 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002828
Jeff Layton1c892542012-05-16 07:13:17 -04002829 /* allocate a readdata struct */
2830 rdata = cifs_readdata_alloc(npages,
2831 cifs_uncached_readv_complete);
2832 if (!rdata) {
2833 rc = -ENOMEM;
2834 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002836
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002837 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002838 if (rc)
2839 goto error;
2840
2841 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002842 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002843 rdata->offset = offset;
2844 rdata->bytes = cur_len;
2845 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002846 rdata->pagesz = PAGE_SIZE;
2847 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002848
2849 rc = cifs_retry_async_readv(rdata);
2850error:
2851 if (rc) {
2852 kref_put(&rdata->refcount,
2853 cifs_uncached_readdata_release);
2854 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 }
Jeff Layton1c892542012-05-16 07:13:17 -04002856
2857 list_add_tail(&rdata->list, &rdata_list);
2858 offset += cur_len;
2859 len -= cur_len;
2860 } while (len > 0);
2861
2862 /* if at least one read request send succeeded, then reset rc */
2863 if (!list_empty(&rdata_list))
2864 rc = 0;
2865
2866 /* the loop below should proceed in the order of increasing offsets */
2867restart_loop:
2868 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2869 if (!rc) {
2870 ssize_t copied;
2871
2872 /* FIXME: freezable sleep too? */
2873 rc = wait_for_completion_killable(&rdata->done);
2874 if (rc)
2875 rc = -EINTR;
2876 else if (rdata->result)
2877 rc = rdata->result;
2878 else {
2879 rc = cifs_readdata_to_iov(rdata, iov,
2880 nr_segs, *poffset,
2881 &copied);
2882 total_read += copied;
2883 }
2884
2885 /* resend call if it's a retryable error */
2886 if (rc == -EAGAIN) {
2887 rc = cifs_retry_async_readv(rdata);
2888 goto restart_loop;
2889 }
2890 }
2891 list_del_init(&rdata->list);
2892 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002894
Jeff Layton1c892542012-05-16 07:13:17 -04002895 cifs_stats_bytes_read(tcon, total_read);
2896 *poffset += total_read;
2897
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002898 /* mask nodata case */
2899 if (rc == -ENODATA)
2900 rc = 0;
2901
Jeff Layton1c892542012-05-16 07:13:17 -04002902 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903}
2904
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002905ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002906 unsigned long nr_segs, loff_t pos)
2907{
2908 ssize_t read;
2909
2910 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2911 if (read > 0)
2912 iocb->ki_pos = pos;
2913
2914 return read;
2915}
2916
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002917ssize_t
2918cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2919 unsigned long nr_segs, loff_t pos)
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002920{
Al Viro496ad9a2013-01-23 17:07:38 -05002921 struct inode *inode = file_inode(iocb->ki_filp);
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002922 struct cifsInodeInfo *cinode = CIFS_I(inode);
2923 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2924 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2925 iocb->ki_filp->private_data;
2926 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2927 int rc = -EACCES;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002928
2929 /*
2930 * In strict cache mode we need to read from the server all the time
2931 * if we don't have level II oplock because the server can delay mtime
2932 * change - so we can't make a decision about inode invalidating.
2933 * And we can also fail with pagereading if there are mandatory locks
2934 * on pages affected by this read but not on the region from pos to
2935 * pos+len-1.
2936 */
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002937 if (!cinode->clientCanCacheRead)
2938 return cifs_user_readv(iocb, iov, nr_segs, pos);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002939
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002940 if (cap_unix(tcon->ses) &&
2941 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2942 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2943 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2944
2945 /*
2946 * We need to hold the sem to be sure nobody modifies lock list
2947 * with a brlock that prevents reading.
2948 */
2949 down_read(&cinode->lock_sem);
2950 if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
2951 tcon->ses->server->vals->shared_lock_type,
Pavel Shilovsky081c0412012-11-27 18:38:53 +04002952 NULL, CIFS_READ_OP))
Pavel Shilovsky579f9052012-09-19 06:22:44 -07002953 rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
2954 up_read(&cinode->lock_sem);
2955 return rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002956}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002958static ssize_t
2959cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960{
2961 int rc = -EACCES;
2962 unsigned int bytes_read = 0;
2963 unsigned int total_read;
2964 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002965 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002967 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002968 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002969 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002970 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002972 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002973 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002974 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002976 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002977 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002978
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002979 /* FIXME: set up handlers for larger reads and/or convert to async */
2980 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2981
Linus Torvalds1da177e2005-04-16 15:20:36 -07002982 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302983 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002984 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302985 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002987 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002988 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002989 server = tcon->ses->server;
2990
2991 if (!server->ops->sync_read) {
2992 free_xid(xid);
2993 return -ENOSYS;
2994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002996 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2997 pid = open_file->pid;
2998 else
2999 pid = current->tgid;
3000
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesf96637b2013-05-04 22:12:25 -05003002 cifs_dbg(FYI, "attempting read on write only file instance\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003004 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3005 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04003006 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003007 /*
3008 * For windows me and 9x we do not want to request more than it
3009 * negotiated since it will refuse the read then.
3010 */
3011 if ((tcon->ses) && !(tcon->ses->capabilities &
3012 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03003013 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04003014 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07003015 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 rc = -EAGAIN;
3017 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00003018 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04003019 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 if (rc != 0)
3021 break;
3022 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003023 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003024 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003025 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003026 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003027 rc = server->ops->sync_read(xid, open_file, &io_parms,
3028 &bytes_read, &cur_offset,
3029 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 }
3031 if (rc || (bytes_read == 0)) {
3032 if (total_read) {
3033 break;
3034 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003035 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 return rc;
3037 }
3038 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04003039 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07003040 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 }
3042 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003043 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 return total_read;
3045}
3046
Jeff Laytonca83ce32011-04-12 09:13:44 -04003047/*
3048 * If the page is mmap'ed into a process' page tables, then we need to make
3049 * sure that it doesn't change while being written back.
3050 */
3051static int
3052cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3053{
3054 struct page *page = vmf->page;
3055
3056 lock_page(page);
3057 return VM_FAULT_LOCKED;
3058}
3059
3060static struct vm_operations_struct cifs_file_vm_ops = {
3061 .fault = filemap_fault,
3062 .page_mkwrite = cifs_page_mkwrite,
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -07003063 .remap_pages = generic_file_remap_pages,
Jeff Laytonca83ce32011-04-12 09:13:44 -04003064};
3065
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003066int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3067{
3068 int rc, xid;
Al Viro496ad9a2013-01-23 17:07:38 -05003069 struct inode *inode = file_inode(file);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003070
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003071 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003072
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04003073 if (!CIFS_I(inode)->clientCanCacheRead) {
3074 rc = cifs_invalidate_mapping(inode);
3075 if (rc)
3076 return rc;
3077 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003078
3079 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003080 if (rc == 0)
3081 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003082 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03003083 return rc;
3084}
3085
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3087{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 int rc, xid;
3089
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003090 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05003091 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003092 if (rc) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003093 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3094 rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003095 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 return rc;
3097 }
3098 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04003099 if (rc == 0)
3100 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003101 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 return rc;
3103}
3104
Jeff Layton0471ca32012-05-16 07:13:16 -04003105static void
3106cifs_readv_complete(struct work_struct *work)
3107{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003108 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04003109 struct cifs_readdata *rdata = container_of(work,
3110 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04003111
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003112 for (i = 0; i < rdata->nr_pages; i++) {
3113 struct page *page = rdata->pages[i];
3114
Jeff Layton0471ca32012-05-16 07:13:16 -04003115 lru_cache_add_file(page);
3116
3117 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04003118 flush_dcache_page(page);
3119 SetPageUptodate(page);
3120 }
3121
3122 unlock_page(page);
3123
3124 if (rdata->result == 0)
3125 cifs_readpage_to_fscache(rdata->mapping->host, page);
3126
3127 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003128 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04003129 }
Jeff Layton6993f742012-05-16 07:13:17 -04003130 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04003131}
3132
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003133static int
Jeff Layton8321fec2012-09-19 06:22:32 -07003134cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3135 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003136{
Jeff Layton8321fec2012-09-19 06:22:32 -07003137 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003138 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003139 u64 eof;
3140 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003141 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07003142 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003143
3144 /* determine the eof that the server (probably) has */
3145 eof = CIFS_I(rdata->mapping->host)->server_eof;
3146 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
Joe Perchesf96637b2013-05-04 22:12:25 -05003147 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003148
Jeff Layton8321fec2012-09-19 06:22:32 -07003149 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003150 for (i = 0; i < nr_pages; i++) {
3151 struct page *page = rdata->pages[i];
3152
Jeff Layton8321fec2012-09-19 06:22:32 -07003153 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003154 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07003155 iov.iov_base = kmap(page);
3156 iov.iov_len = PAGE_CACHE_SIZE;
Joe Perchesf96637b2013-05-04 22:12:25 -05003157 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3158 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003159 len -= PAGE_CACHE_SIZE;
3160 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003161 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07003162 iov.iov_base = kmap(page);
3163 iov.iov_len = len;
Joe Perchesf96637b2013-05-04 22:12:25 -05003164 cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
3165 i, page->index, iov.iov_base, iov.iov_len);
Jeff Layton8321fec2012-09-19 06:22:32 -07003166 memset(iov.iov_base + len,
3167 '\0', PAGE_CACHE_SIZE - len);
3168 rdata->tailsz = len;
3169 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003170 } else if (page->index > eof_index) {
3171 /*
3172 * The VFS will not try to do readahead past the
3173 * i_size, but it's possible that we have outstanding
3174 * writes with gaps in the middle and the i_size hasn't
3175 * caught up yet. Populate those with zeroed out pages
3176 * to prevent the VFS from repeatedly attempting to
3177 * fill them until the writes are flushed.
3178 */
3179 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003180 lru_cache_add_file(page);
3181 flush_dcache_page(page);
3182 SetPageUptodate(page);
3183 unlock_page(page);
3184 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003185 rdata->pages[i] = NULL;
3186 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003187 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003188 } else {
3189 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003190 lru_cache_add_file(page);
3191 unlock_page(page);
3192 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003193 rdata->pages[i] = NULL;
3194 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003195 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003196 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003197
3198 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3199 kunmap(page);
3200 if (result < 0)
3201 break;
3202
3203 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003204 }
3205
Jeff Layton8321fec2012-09-19 06:22:32 -07003206 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003207}
3208
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209static int cifs_readpages(struct file *file, struct address_space *mapping,
3210 struct list_head *page_list, unsigned num_pages)
3211{
Jeff Layton690c5e32011-10-19 15:30:16 -04003212 int rc;
3213 struct list_head tmplist;
3214 struct cifsFileInfo *open_file = file->private_data;
3215 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3216 unsigned int rsize = cifs_sb->rsize;
3217 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
Jeff Layton690c5e32011-10-19 15:30:16 -04003219 /*
3220 * Give up immediately if rsize is too small to read an entire page.
3221 * The VFS will fall back to readpage. We should never reach this
3222 * point however since we set ra_pages to 0 when the rsize is smaller
3223 * than a cache page.
3224 */
3225 if (unlikely(rsize < PAGE_CACHE_SIZE))
3226 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003227
Suresh Jayaraman56698232010-07-05 18:13:25 +05303228 /*
3229 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3230 * immediately if the cookie is negative
3231 */
3232 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3233 &num_pages);
3234 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003235 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303236
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003237 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3238 pid = open_file->pid;
3239 else
3240 pid = current->tgid;
3241
Jeff Layton690c5e32011-10-19 15:30:16 -04003242 rc = 0;
3243 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244
Joe Perchesf96637b2013-05-04 22:12:25 -05003245 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3246 __func__, file, mapping, num_pages);
Jeff Layton690c5e32011-10-19 15:30:16 -04003247
3248 /*
3249 * Start with the page at end of list and move it to private
3250 * list. Do the same with any following pages until we hit
3251 * the rsize limit, hit an index discontinuity, or run out of
3252 * pages. Issue the async read and then start the loop again
3253 * until the list is empty.
3254 *
3255 * Note that list order is important. The page_list is in
3256 * the order of declining indexes. When we put the pages in
3257 * the rdata->pages, then we want them in increasing order.
3258 */
3259 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003260 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003261 unsigned int bytes = PAGE_CACHE_SIZE;
3262 unsigned int expected_index;
3263 unsigned int nr_pages = 1;
3264 loff_t offset;
3265 struct page *page, *tpage;
3266 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267
3268 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269
Jeff Layton690c5e32011-10-19 15:30:16 -04003270 /*
3271 * Lock the page and put it in the cache. Since no one else
3272 * should have access to this page, we're safe to simply set
3273 * PG_locked without checking it first.
3274 */
3275 __set_page_locked(page);
3276 rc = add_to_page_cache_locked(page, mapping,
3277 page->index, GFP_KERNEL);
3278
3279 /* give up if we can't stick it in the cache */
3280 if (rc) {
3281 __clear_page_locked(page);
3282 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284
Jeff Layton690c5e32011-10-19 15:30:16 -04003285 /* move first page to the tmplist */
3286 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3287 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
Jeff Layton690c5e32011-10-19 15:30:16 -04003289 /* now try and add more pages onto the request */
3290 expected_index = page->index + 1;
3291 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3292 /* discontinuity ? */
3293 if (page->index != expected_index)
3294 break;
3295
3296 /* would this page push the read over the rsize? */
3297 if (bytes + PAGE_CACHE_SIZE > rsize)
3298 break;
3299
3300 __set_page_locked(page);
3301 if (add_to_page_cache_locked(page, mapping,
3302 page->index, GFP_KERNEL)) {
3303 __clear_page_locked(page);
3304 break;
3305 }
3306 list_move_tail(&page->lru, &tmplist);
3307 bytes += PAGE_CACHE_SIZE;
3308 expected_index++;
3309 nr_pages++;
3310 }
3311
Jeff Layton0471ca32012-05-16 07:13:16 -04003312 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003313 if (!rdata) {
3314 /* best to give up if we're out of mem */
3315 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3316 list_del(&page->lru);
3317 lru_cache_add_file(page);
3318 unlock_page(page);
3319 page_cache_release(page);
3320 }
3321 rc = -ENOMEM;
3322 break;
3323 }
3324
Jeff Layton6993f742012-05-16 07:13:17 -04003325 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003326 rdata->mapping = mapping;
3327 rdata->offset = offset;
3328 rdata->bytes = bytes;
3329 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003330 rdata->pagesz = PAGE_CACHE_SIZE;
3331 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003332
3333 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3334 list_del(&page->lru);
3335 rdata->pages[rdata->nr_pages++] = page;
3336 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003337
Jeff Layton2a1bb132012-05-16 07:13:17 -04003338 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003339 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003340 for (i = 0; i < rdata->nr_pages; i++) {
3341 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003342 lru_cache_add_file(page);
3343 unlock_page(page);
3344 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 }
Jeff Layton6993f742012-05-16 07:13:17 -04003346 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347 break;
3348 }
Jeff Layton6993f742012-05-16 07:13:17 -04003349
3350 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 }
3352
Linus Torvalds1da177e2005-04-16 15:20:36 -07003353 return rc;
3354}
3355
3356static int cifs_readpage_worker(struct file *file, struct page *page,
3357 loff_t *poffset)
3358{
3359 char *read_data;
3360 int rc;
3361
Suresh Jayaraman56698232010-07-05 18:13:25 +05303362 /* Is the page cached? */
Al Viro496ad9a2013-01-23 17:07:38 -05003363 rc = cifs_readpage_from_fscache(file_inode(file), page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303364 if (rc == 0)
3365 goto read_complete;
3366
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 page_cache_get(page);
3368 read_data = kmap(page);
3369 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003370
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003372
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 if (rc < 0)
3374 goto io_error;
3375 else
Joe Perchesf96637b2013-05-04 22:12:25 -05003376 cifs_dbg(FYI, "Bytes read %d\n", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003377
Al Viro496ad9a2013-01-23 17:07:38 -05003378 file_inode(file)->i_atime =
3379 current_fs_time(file_inode(file)->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003380
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381 if (PAGE_CACHE_SIZE > rc)
3382 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3383
3384 flush_dcache_page(page);
3385 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303386
3387 /* send this page to the cache */
Al Viro496ad9a2013-01-23 17:07:38 -05003388 cifs_readpage_to_fscache(file_inode(file), page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303389
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003391
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003393 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303395
3396read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003397 return rc;
3398}
3399
3400static int cifs_readpage(struct file *file, struct page *page)
3401{
3402 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3403 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003404 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003406 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407
3408 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303409 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003410 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303411 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 }
3413
Joe Perchesf96637b2013-05-04 22:12:25 -05003414 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003415 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416
3417 rc = cifs_readpage_worker(file, page, &offset);
3418
3419 unlock_page(page);
3420
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003421 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 return rc;
3423}
3424
Steve Frencha403a0a2007-07-26 15:54:16 +00003425static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3426{
3427 struct cifsFileInfo *open_file;
3428
Jeff Layton44772882010-10-15 15:34:03 -04003429 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003430 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003431 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003432 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003433 return 1;
3434 }
3435 }
Jeff Layton44772882010-10-15 15:34:03 -04003436 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003437 return 0;
3438}
3439
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440/* We do not want to update the file size from server for inodes
3441 open for write - to avoid races with writepage extending
3442 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003443 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 but this is tricky to do without racing with writebehind
3445 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003446bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447{
Steve Frencha403a0a2007-07-26 15:54:16 +00003448 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003449 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003450
Steve Frencha403a0a2007-07-26 15:54:16 +00003451 if (is_inode_writable(cifsInode)) {
3452 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003453 struct cifs_sb_info *cifs_sb;
3454
Steve Frenchc32a0b62006-01-12 14:41:28 -08003455 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003456 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003457 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003458 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003459 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003460 }
3461
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003462 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003463 return true;
Steve French7ba52632007-02-08 18:14:13 +00003464
Steve French4b18f2a2008-04-29 00:06:05 +00003465 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003466 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003467 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468}
3469
Nick Piggind9414772008-09-24 11:32:59 -04003470static int cifs_write_begin(struct file *file, struct address_space *mapping,
3471 loff_t pos, unsigned len, unsigned flags,
3472 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473{
Nick Piggind9414772008-09-24 11:32:59 -04003474 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3475 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003476 loff_t page_start = pos & PAGE_MASK;
3477 loff_t i_size;
3478 struct page *page;
3479 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480
Joe Perchesf96637b2013-05-04 22:12:25 -05003481 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003482
Nick Piggin54566b22009-01-04 12:00:53 -08003483 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003484 if (!page) {
3485 rc = -ENOMEM;
3486 goto out;
3487 }
Nick Piggind9414772008-09-24 11:32:59 -04003488
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003489 if (PageUptodate(page))
3490 goto out;
Steve French8a236262007-03-06 00:31:00 +00003491
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003492 /*
3493 * If we write a full page it will be up to date, no need to read from
3494 * the server. If the write is short, we'll end up doing a sync write
3495 * instead.
3496 */
3497 if (len == PAGE_CACHE_SIZE)
3498 goto out;
3499
3500 /*
3501 * optimize away the read when we have an oplock, and we're not
3502 * expecting to use any of the data we'd be reading in. That
3503 * is, when the page lies beyond the EOF, or straddles the EOF
3504 * and the write will cover all of the existing data.
3505 */
3506 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3507 i_size = i_size_read(mapping->host);
3508 if (page_start >= i_size ||
3509 (offset == 0 && (pos + len) >= i_size)) {
3510 zero_user_segments(page, 0, offset,
3511 offset + len,
3512 PAGE_CACHE_SIZE);
3513 /*
3514 * PageChecked means that the parts of the page
3515 * to which we're not writing are considered up
3516 * to date. Once the data is copied to the
3517 * page, it can be set uptodate.
3518 */
3519 SetPageChecked(page);
3520 goto out;
3521 }
3522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523
Nick Piggind9414772008-09-24 11:32:59 -04003524 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003525 /*
3526 * might as well read a page, it is fast enough. If we get
3527 * an error, we don't need to return it. cifs_write_end will
3528 * do a sync write instead since PG_uptodate isn't set.
3529 */
3530 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003531 } else {
3532 /* we could try using another file handle if there is one -
3533 but how would we lock it to prevent close of that handle
3534 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003535 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003536 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003537out:
3538 *pagep = page;
3539 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540}
3541
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303542static int cifs_release_page(struct page *page, gfp_t gfp)
3543{
3544 if (PagePrivate(page))
3545 return 0;
3546
3547 return cifs_fscache_release_page(page, gfp);
3548}
3549
3550static void cifs_invalidate_page(struct page *page, unsigned long offset)
3551{
3552 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3553
3554 if (offset == 0)
3555 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3556}
3557
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003558static int cifs_launder_page(struct page *page)
3559{
3560 int rc = 0;
3561 loff_t range_start = page_offset(page);
3562 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3563 struct writeback_control wbc = {
3564 .sync_mode = WB_SYNC_ALL,
3565 .nr_to_write = 0,
3566 .range_start = range_start,
3567 .range_end = range_end,
3568 };
3569
Joe Perchesf96637b2013-05-04 22:12:25 -05003570 cifs_dbg(FYI, "Launder page: %p\n", page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003571
3572 if (clear_page_dirty_for_io(page))
3573 rc = cifs_writepage_locked(page, &wbc);
3574
3575 cifs_fscache_invalidate_page(page, page->mapping->host);
3576 return rc;
3577}
3578
Tejun Heo9b646972010-07-20 22:09:02 +02003579void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003580{
3581 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3582 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003583 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003584 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003585 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003586 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003587
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003588 if (!cinode->clientCanCacheAll && cinode->clientCanCacheRead &&
3589 cifs_has_mand_locks(cinode)) {
Joe Perchesf96637b2013-05-04 22:12:25 -05003590 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3591 inode);
Pavel Shilovsky63b7d3a2012-12-24 14:41:19 +04003592 cinode->clientCanCacheRead = false;
3593 }
3594
Jeff Layton3bc303c2009-09-21 06:47:50 -04003595 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003596 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003597 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003598 else
Al Viro8737c932009-12-24 06:47:55 -05003599 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003600 rc = filemap_fdatawrite(inode->i_mapping);
3601 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003602 rc = filemap_fdatawait(inode->i_mapping);
3603 mapping_set_error(inode->i_mapping, rc);
Pavel Shilovsky03eca702012-12-06 21:24:33 +04003604 cifs_invalidate_mapping(inode);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003605 }
Joe Perchesf96637b2013-05-04 22:12:25 -05003606 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003607 }
3608
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003609 rc = cifs_push_locks(cfile);
3610 if (rc)
Joe Perchesf96637b2013-05-04 22:12:25 -05003611 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003612
Jeff Layton3bc303c2009-09-21 06:47:50 -04003613 /*
3614 * releasing stale oplock after recent reconnect of smb session using
3615 * a now incorrect file handle is not a data integrity issue but do
3616 * not bother sending an oplock release if session to server still is
3617 * disconnected since oplock already released by the server
3618 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003619 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003620 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3621 cinode);
Joe Perchesf96637b2013-05-04 22:12:25 -05003622 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003623 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003624}
3625
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003626const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627 .readpage = cifs_readpage,
3628 .readpages = cifs_readpages,
3629 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003630 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003631 .write_begin = cifs_write_begin,
3632 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303634 .releasepage = cifs_release_page,
3635 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003636 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003638
3639/*
3640 * cifs_readpages requires the server to support a buffer large enough to
3641 * contain the header plus one complete page of data. Otherwise, we need
3642 * to leave cifs_readpages out of the address space operations.
3643 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003644const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003645 .readpage = cifs_readpage,
3646 .writepage = cifs_writepage,
3647 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003648 .write_begin = cifs_write_begin,
3649 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003650 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303651 .releasepage = cifs_release_page,
3652 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003653 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003654};