blob: 1adff75dfffc210f5fd2e0f93228aac368d8660a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * fs/cifs/file.c
3 *
4 * vfs operations that deal with files
Steve Frenchfb8c4b12007-07-10 01:16:18 +00005 *
Steve Frenchf19159d2010-04-21 04:12:10 +00006 * Copyright (C) International Business Machines Corp., 2002,2010
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Author(s): Steve French (sfrench@us.ibm.com)
Jeremy Allison7ee1af72006-08-02 21:56:33 +00008 * Jeremy Allison (jra@samba.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/fs.h>
Steve French37c0eb42005-10-05 14:50:29 -070025#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/stat.h>
27#include <linux/fcntl.h>
28#include <linux/pagemap.h>
29#include <linux/pagevec.h>
Steve French37c0eb42005-10-05 14:50:29 -070030#include <linux/writeback.h>
Andrew Morton6f88cc22006-12-10 02:19:44 -080031#include <linux/task_io_accounting_ops.h>
Steve French23e7dd72005-10-20 13:44:56 -070032#include <linux/delay.h>
Jeff Layton3bc303c2009-09-21 06:47:50 -040033#include <linux/mount.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Jeff Layton690c5e32011-10-19 15:30:16 -040035#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/div64.h>
37#include "cifsfs.h"
38#include "cifspdu.h"
39#include "cifsglob.h"
40#include "cifsproto.h"
41#include "cifs_unicode.h"
42#include "cifs_debug.h"
43#include "cifs_fs_sb.h"
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +053044#include "fscache.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline int cifs_convert_flags(unsigned int flags)
47{
48 if ((flags & O_ACCMODE) == O_RDONLY)
49 return GENERIC_READ;
50 else if ((flags & O_ACCMODE) == O_WRONLY)
51 return GENERIC_WRITE;
52 else if ((flags & O_ACCMODE) == O_RDWR) {
53 /* GENERIC_ALL is too much permission to request
54 can cause unnecessary access denied on create */
55 /* return GENERIC_ALL; */
56 return (GENERIC_READ | GENERIC_WRITE);
57 }
58
Jeff Laytone10f7b52008-05-14 10:21:33 -070059 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
60 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
61 FILE_READ_DATA);
Steve French7fc8f4e2009-02-23 20:43:11 +000062}
Jeff Laytone10f7b52008-05-14 10:21:33 -070063
Jeff Layton608712f2010-10-15 15:33:56 -040064static u32 cifs_posix_convert_flags(unsigned int flags)
Steve French7fc8f4e2009-02-23 20:43:11 +000065{
Jeff Layton608712f2010-10-15 15:33:56 -040066 u32 posix_flags = 0;
Jeff Laytone10f7b52008-05-14 10:21:33 -070067
Steve French7fc8f4e2009-02-23 20:43:11 +000068 if ((flags & O_ACCMODE) == O_RDONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040069 posix_flags = SMB_O_RDONLY;
Steve French7fc8f4e2009-02-23 20:43:11 +000070 else if ((flags & O_ACCMODE) == O_WRONLY)
Jeff Layton608712f2010-10-15 15:33:56 -040071 posix_flags = SMB_O_WRONLY;
72 else if ((flags & O_ACCMODE) == O_RDWR)
73 posix_flags = SMB_O_RDWR;
74
75 if (flags & O_CREAT)
76 posix_flags |= SMB_O_CREAT;
77 if (flags & O_EXCL)
78 posix_flags |= SMB_O_EXCL;
79 if (flags & O_TRUNC)
80 posix_flags |= SMB_O_TRUNC;
81 /* be safe and imply O_SYNC for O_DSYNC */
Christoph Hellwig6b2f3d12009-10-27 11:05:28 +010082 if (flags & O_DSYNC)
Jeff Layton608712f2010-10-15 15:33:56 -040083 posix_flags |= SMB_O_SYNC;
Steve French7fc8f4e2009-02-23 20:43:11 +000084 if (flags & O_DIRECTORY)
Jeff Layton608712f2010-10-15 15:33:56 -040085 posix_flags |= SMB_O_DIRECTORY;
Steve French7fc8f4e2009-02-23 20:43:11 +000086 if (flags & O_NOFOLLOW)
Jeff Layton608712f2010-10-15 15:33:56 -040087 posix_flags |= SMB_O_NOFOLLOW;
Steve French7fc8f4e2009-02-23 20:43:11 +000088 if (flags & O_DIRECT)
Jeff Layton608712f2010-10-15 15:33:56 -040089 posix_flags |= SMB_O_DIRECT;
Steve French7fc8f4e2009-02-23 20:43:11 +000090
91 return posix_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94static inline int cifs_get_disposition(unsigned int flags)
95{
96 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
97 return FILE_CREATE;
98 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
99 return FILE_OVERWRITE_IF;
100 else if ((flags & O_CREAT) == O_CREAT)
101 return FILE_OPEN_IF;
Steve French55aa2e02006-05-30 18:09:31 +0000102 else if ((flags & O_TRUNC) == O_TRUNC)
103 return FILE_OVERWRITE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 else
105 return FILE_OPEN;
106}
107
Jeff Layton608712f2010-10-15 15:33:56 -0400108int cifs_posix_open(char *full_path, struct inode **pinode,
109 struct super_block *sb, int mode, unsigned int f_flags,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400110 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
Jeff Layton608712f2010-10-15 15:33:56 -0400111{
112 int rc;
113 FILE_UNIX_BASIC_INFO *presp_data;
114 __u32 posix_flags = 0;
115 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
116 struct cifs_fattr fattr;
117 struct tcon_link *tlink;
Steve French96daf2b2011-05-27 04:34:02 +0000118 struct cifs_tcon *tcon;
Jeff Layton608712f2010-10-15 15:33:56 -0400119
120 cFYI(1, "posix open %s", full_path);
121
122 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
123 if (presp_data == NULL)
124 return -ENOMEM;
125
126 tlink = cifs_sb_tlink(cifs_sb);
127 if (IS_ERR(tlink)) {
128 rc = PTR_ERR(tlink);
129 goto posix_open_ret;
130 }
131
132 tcon = tlink_tcon(tlink);
133 mode &= ~current_umask();
134
135 posix_flags = cifs_posix_convert_flags(f_flags);
136 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
137 poplock, full_path, cifs_sb->local_nls,
138 cifs_sb->mnt_cifs_flags &
139 CIFS_MOUNT_MAP_SPECIAL_CHR);
140 cifs_put_tlink(tlink);
141
142 if (rc)
143 goto posix_open_ret;
144
145 if (presp_data->Type == cpu_to_le32(-1))
146 goto posix_open_ret; /* open ok, caller does qpathinfo */
147
148 if (!pinode)
149 goto posix_open_ret; /* caller does not need info */
150
151 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
152
153 /* get new inode and set it up */
154 if (*pinode == NULL) {
155 cifs_fill_uniqueid(sb, &fattr);
156 *pinode = cifs_iget(sb, &fattr);
157 if (!*pinode) {
158 rc = -ENOMEM;
159 goto posix_open_ret;
160 }
161 } else {
162 cifs_fattr_to_inode(*pinode, &fattr);
163 }
164
165posix_open_ret:
166 kfree(presp_data);
167 return rc;
168}
169
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300170static int
171cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700172 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
173 struct cifs_fid *fid, unsigned int xid)
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300174{
175 int rc;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700176 int desired_access;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300177 int disposition;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500178 int create_options = CREATE_NOT_DIR;
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300179 FILE_ALL_INFO *buf;
180
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700181 if (!tcon->ses->server->ops->open)
182 return -ENOSYS;
183
184 desired_access = cifs_convert_flags(f_flags);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300185
186/*********************************************************************
187 * open flag mapping table:
188 *
189 * POSIX Flag CIFS Disposition
190 * ---------- ----------------
191 * O_CREAT FILE_OPEN_IF
192 * O_CREAT | O_EXCL FILE_CREATE
193 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
194 * O_TRUNC FILE_OVERWRITE
195 * none of the above FILE_OPEN
196 *
197 * Note that there is not a direct match between disposition
198 * FILE_SUPERSEDE (ie create whether or not file exists although
199 * O_CREAT | O_TRUNC is similar but truncates the existing
200 * file rather than creating a new file as FILE_SUPERSEDE does
201 * (which uses the attributes / metadata passed in on open call)
202 *?
203 *? O_SYNC is a reasonable match to CIFS writethrough flag
204 *? and the read write flags match reasonably. O_LARGEFILE
205 *? is irrelevant because largefile support is always used
206 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
207 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
208 *********************************************************************/
209
210 disposition = cifs_get_disposition(f_flags);
211
212 /* BB pass O_SYNC flag through on file attributes .. BB */
213
214 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500218 if (backup_cred(cifs_sb))
219 create_options |= CREATE_OPEN_BACKUP_INTENT;
220
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700221 rc = tcon->ses->server->ops->open(xid, tcon, full_path, disposition,
222 desired_access, create_options, fid,
223 oplock, buf, cifs_sb);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300224
225 if (rc)
226 goto out;
227
228 if (tcon->unix_ext)
229 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
230 xid);
231 else
232 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700233 xid, &fid->netfid);
Pavel Shilovskyeeb910a2010-11-25 15:12:39 +0300234
235out:
236 kfree(buf);
237 return rc;
238}
239
Jeff Layton15ecb432010-10-15 15:34:02 -0400240struct cifsFileInfo *
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700241cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
Jeff Layton15ecb432010-10-15 15:34:02 -0400242 struct tcon_link *tlink, __u32 oplock)
243{
244 struct dentry *dentry = file->f_path.dentry;
245 struct inode *inode = dentry->d_inode;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700246 struct cifsInodeInfo *cinode = CIFS_I(inode);
247 struct cifsFileInfo *cfile;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700248 struct cifs_fid_locks *fdlocks;
Jeff Layton15ecb432010-10-15 15:34:02 -0400249
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700250 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
251 if (cfile == NULL)
252 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400253
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700254 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
255 if (!fdlocks) {
256 kfree(cfile);
257 return NULL;
258 }
259
260 INIT_LIST_HEAD(&fdlocks->locks);
261 fdlocks->cfile = cfile;
262 cfile->llist = fdlocks;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700263 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700264 list_add(&fdlocks->llist, &cinode->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700265 up_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700266
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700267 cfile->count = 1;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700268 cfile->pid = current->tgid;
269 cfile->uid = current_fsuid();
270 cfile->dentry = dget(dentry);
271 cfile->f_flags = file->f_flags;
272 cfile->invalidHandle = false;
273 cfile->tlink = cifs_get_tlink(tlink);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700274 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700275 mutex_init(&cfile->fh_mutex);
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700276 tlink_tcon(tlink)->ses->server->ops->set_fid(cfile, fid, oplock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400277
Jeff Layton44772882010-10-15 15:34:03 -0400278 spin_lock(&cifs_file_list_lock);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700279 list_add(&cfile->tlist, &(tlink_tcon(tlink)->openFileList));
Jeff Layton15ecb432010-10-15 15:34:02 -0400280 /* if readable file instance put first in list*/
281 if (file->f_mode & FMODE_READ)
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700282 list_add(&cfile->flist, &cinode->openFileList);
Jeff Layton15ecb432010-10-15 15:34:02 -0400283 else
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700284 list_add_tail(&cfile->flist, &cinode->openFileList);
Jeff Layton44772882010-10-15 15:34:03 -0400285 spin_unlock(&cifs_file_list_lock);
Jeff Layton15ecb432010-10-15 15:34:02 -0400286
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700287 file->private_data = cfile;
288 return cfile;
Jeff Layton15ecb432010-10-15 15:34:02 -0400289}
290
Jeff Layton764a1b12012-07-25 14:59:54 -0400291struct cifsFileInfo *
292cifsFileInfo_get(struct cifsFileInfo *cifs_file)
293{
294 spin_lock(&cifs_file_list_lock);
295 cifsFileInfo_get_locked(cifs_file);
296 spin_unlock(&cifs_file_list_lock);
297 return cifs_file;
298}
299
Steve Frenchcdff08e2010-10-21 22:46:14 +0000300/*
301 * Release a reference on the file private data. This may involve closing
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400302 * the filehandle out on the server. Must be called without holding
303 * cifs_file_list_lock.
Steve Frenchcdff08e2010-10-21 22:46:14 +0000304 */
Jeff Laytonb33879a2010-10-15 15:34:04 -0400305void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
306{
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300307 struct inode *inode = cifs_file->dentry->d_inode;
Steve French96daf2b2011-05-27 04:34:02 +0000308 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300309 struct cifsInodeInfo *cifsi = CIFS_I(inode);
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300310 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000311 struct cifsLockInfo *li, *tmp;
312
313 spin_lock(&cifs_file_list_lock);
Jeff Layton5f6dbc92010-10-15 15:34:06 -0400314 if (--cifs_file->count > 0) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000315 spin_unlock(&cifs_file_list_lock);
316 return;
Jeff Laytonb33879a2010-10-15 15:34:04 -0400317 }
Steve Frenchcdff08e2010-10-21 22:46:14 +0000318
319 /* remove it from the lists */
320 list_del(&cifs_file->flist);
321 list_del(&cifs_file->tlist);
322
323 if (list_empty(&cifsi->openFileList)) {
324 cFYI(1, "closing last open instance for inode %p",
325 cifs_file->dentry->d_inode);
Pavel Shilovsky25364132012-09-18 16:20:27 -0700326 /*
327 * In strict cache mode we need invalidate mapping on the last
328 * close because it may cause a error when we open this file
329 * again and get at least level II oplock.
330 */
Pavel Shilovsky4f8ba8a2010-11-21 22:36:12 +0300331 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
332 CIFS_I(inode)->invalid_mapping = true;
Pavel Shilovskyc6723622010-11-03 10:58:57 +0300333 cifs_set_oplock_level(cifsi, 0);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000334 }
335 spin_unlock(&cifs_file_list_lock);
336
Jeff Laytonad635942011-07-26 12:20:17 -0400337 cancel_work_sync(&cifs_file->oplock_break);
338
Steve Frenchcdff08e2010-10-21 22:46:14 +0000339 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700340 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400341 unsigned int xid;
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700342 int rc = -ENOSYS;
343
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400344 xid = get_xid();
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700345 if (server->ops->close)
346 rc = server->ops->close(xid, tcon, &cifs_file->fid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400347 free_xid(xid);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000348 }
349
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700350 /*
351 * Delete any outstanding lock records. We'll lose them when the file
Steve Frenchcdff08e2010-10-21 22:46:14 +0000352 * is closed anyway.
353 */
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700354 down_write(&cifsi->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700355 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
Steve Frenchcdff08e2010-10-21 22:46:14 +0000356 list_del(&li->llist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400357 cifs_del_lock_waiters(li);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000358 kfree(li);
359 }
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700360 list_del(&cifs_file->llist->llist);
361 kfree(cifs_file->llist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700362 up_write(&cifsi->lock_sem);
Steve Frenchcdff08e2010-10-21 22:46:14 +0000363
364 cifs_put_tlink(cifs_file->tlink);
365 dput(cifs_file->dentry);
366 kfree(cifs_file);
Jeff Laytonb33879a2010-10-15 15:34:04 -0400367}
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369int cifs_open(struct inode *inode, struct file *file)
370{
371 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400372 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400373 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000375 struct cifs_tcon *tcon;
Jeff Layton7ffec372010-09-29 19:51:11 -0400376 struct tcon_link *tlink;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700377 struct cifsFileInfo *cfile = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 char *full_path = NULL;
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300379 bool posix_open_ok = false;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700380 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400382 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 cifs_sb = CIFS_SB(inode->i_sb);
Jeff Layton7ffec372010-09-29 19:51:11 -0400385 tlink = cifs_sb_tlink(cifs_sb);
386 if (IS_ERR(tlink)) {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400387 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400388 return PTR_ERR(tlink);
389 }
390 tcon = tlink_tcon(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -0800392 full_path = build_path_from_dentry(file->f_path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 if (full_path == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530394 rc = -ENOMEM;
Jeff Layton232341b2010-08-05 13:58:38 -0400395 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
397
Joe Perchesb6b38f72010-04-21 03:50:45 +0000398 cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
399 inode, file->f_flags, full_path);
Steve French276a74a2009-03-03 18:00:34 +0000400
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300401 if (tcon->ses->server->oplocks)
Steve French276a74a2009-03-03 18:00:34 +0000402 oplock = REQ_OPLOCK;
403 else
404 oplock = 0;
405
Steve French64cc2c62009-03-04 19:54:08 +0000406 if (!tcon->broken_posix_open && tcon->unix_ext &&
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400407 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
408 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Steve French276a74a2009-03-03 18:00:34 +0000409 /* can not refresh inode info since size could be stale */
Jeff Layton2422f672010-06-16 13:40:16 -0400410 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
Steve Frenchfa588e02010-04-22 19:21:55 +0000411 cifs_sb->mnt_file_mode /* ignored */,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700412 file->f_flags, &oplock, &fid.netfid, xid);
Steve French276a74a2009-03-03 18:00:34 +0000413 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000414 cFYI(1, "posix open succeeded");
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300415 posix_open_ok = true;
Steve French64cc2c62009-03-04 19:54:08 +0000416 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
417 if (tcon->ses->serverNOS)
Joe Perchesb6b38f72010-04-21 03:50:45 +0000418 cERROR(1, "server %s of type %s returned"
Steve French64cc2c62009-03-04 19:54:08 +0000419 " unexpected error on SMB posix open"
420 ", disabling posix open support."
421 " Check if server update available.",
422 tcon->ses->serverName,
Joe Perchesb6b38f72010-04-21 03:50:45 +0000423 tcon->ses->serverNOS);
Steve French64cc2c62009-03-04 19:54:08 +0000424 tcon->broken_posix_open = true;
Steve French276a74a2009-03-03 18:00:34 +0000425 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
426 (rc != -EOPNOTSUPP)) /* path not found or net err */
427 goto out;
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700428 /*
429 * Else fallthrough to retry open the old way on network i/o
430 * or DFS errors.
431 */
Steve French276a74a2009-03-03 18:00:34 +0000432 }
433
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300434 if (!posix_open_ok) {
435 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700436 file->f_flags, &oplock, &fid, xid);
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300437 if (rc)
438 goto out;
439 }
Jeff Layton47c78b72010-06-16 13:40:17 -0400440
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700441 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
442 if (cfile == NULL) {
Pavel Shilovsky0ff78a22012-09-18 16:20:26 -0700443 if (tcon->ses->server->ops->close)
444 tcon->ses->server->ops->close(xid, tcon, &fid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 rc = -ENOMEM;
446 goto out;
447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Suresh Jayaraman9451a9a2010-07-05 18:12:45 +0530449 cifs_fscache_set_inode_cookie(inode, file);
450
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300451 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700452 /*
453 * Time to set mode which we can not set earlier due to
454 * problems creating new read-only files.
455 */
Pavel Shilovsky7e12edd2010-11-25 17:20:20 +0300456 struct cifs_unix_set_info_args args = {
457 .mode = inode->i_mode,
458 .uid = NO_CHANGE_64,
459 .gid = NO_CHANGE_64,
460 .ctime = NO_CHANGE_64,
461 .atime = NO_CHANGE_64,
462 .mtime = NO_CHANGE_64,
463 .device = 0,
464 };
Pavel Shilovskyfb1214e2012-09-18 16:20:26 -0700465 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
466 cfile->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 }
468
469out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400471 free_xid(xid);
Jeff Layton7ffec372010-09-29 19:51:11 -0400472 cifs_put_tlink(tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 return rc;
474}
475
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700476/*
477 * Try to reacquire byte range locks that were released when session
478 * to server was lost
479 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480static int cifs_relock_file(struct cifsFileInfo *cifsFile)
481{
482 int rc = 0;
483
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700484 /* BB list all locks open on this file and relock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
486 return rc;
487}
488
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700489static int
490cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
492 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400493 unsigned int xid;
Jeff Layton590a3fe2009-09-12 11:54:28 -0400494 __u32 oplock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 struct cifs_sb_info *cifs_sb;
Steve French96daf2b2011-05-27 04:34:02 +0000496 struct cifs_tcon *tcon;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700497 struct TCP_Server_Info *server;
498 struct cifsInodeInfo *cinode;
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000499 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 char *full_path = NULL;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700501 int desired_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 int disposition = FILE_OPEN;
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500503 int create_options = CREATE_NOT_DIR;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700504 struct cifs_fid fid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400506 xid = get_xid();
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700507 mutex_lock(&cfile->fh_mutex);
508 if (!cfile->invalidHandle) {
509 mutex_unlock(&cfile->fh_mutex);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530510 rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400511 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +0530512 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
514
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700515 inode = cfile->dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 cifs_sb = CIFS_SB(inode->i_sb);
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700517 tcon = tlink_tcon(cfile->tlink);
518 server = tcon->ses->server;
Steve French3a9f4622007-04-04 17:10:24 +0000519
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700520 /*
521 * Can not grab rename sem here because various ops, including those
522 * that already have the rename sem can end up causing writepage to get
523 * called and if the server was down that means we end up here, and we
524 * can never tell if the caller already has the rename_sem.
525 */
526 full_path = build_path_from_dentry(cfile->dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 if (full_path == NULL) {
Steve French3a9f4622007-04-04 17:10:24 +0000528 rc = -ENOMEM;
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700529 mutex_unlock(&cfile->fh_mutex);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400530 free_xid(xid);
Steve French3a9f4622007-04-04 17:10:24 +0000531 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
533
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700534 cFYI(1, "inode = 0x%p file flags 0x%x for %s", inode, cfile->f_flags,
535 full_path);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Pavel Shilovsky10b9b982012-03-20 12:55:09 +0300537 if (tcon->ses->server->oplocks)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 oplock = REQ_OPLOCK;
539 else
Steve French4b18f2a2008-04-29 00:06:05 +0000540 oplock = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400542 if (tcon->unix_ext && cap_unix(tcon->ses) &&
Steve French7fc8f4e2009-02-23 20:43:11 +0000543 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
Pavel Shilovsky29e20f92012-07-13 13:58:14 +0400544 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
Jeff Layton608712f2010-10-15 15:33:56 -0400545 /*
546 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
547 * original open. Must mask them off for a reopen.
548 */
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700549 unsigned int oflags = cfile->f_flags &
Jeff Layton15886172010-10-15 15:33:59 -0400550 ~(O_CREAT | O_EXCL | O_TRUNC);
Jeff Layton608712f2010-10-15 15:33:56 -0400551
Jeff Layton2422f672010-06-16 13:40:16 -0400552 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700553 cifs_sb->mnt_file_mode /* ignored */,
554 oflags, &oplock, &fid.netfid, xid);
Steve French7fc8f4e2009-02-23 20:43:11 +0000555 if (rc == 0) {
Joe Perchesb6b38f72010-04-21 03:50:45 +0000556 cFYI(1, "posix reopen succeeded");
Steve French7fc8f4e2009-02-23 20:43:11 +0000557 goto reopen_success;
558 }
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700559 /*
560 * fallthrough to retry open the old way on errors, especially
561 * in the reconnect path it is important to retry hard
562 */
Steve French7fc8f4e2009-02-23 20:43:11 +0000563 }
564
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700565 desired_access = cifs_convert_flags(cfile->f_flags);
Steve French7fc8f4e2009-02-23 20:43:11 +0000566
Shirish Pargaonkar3d3ea8e2011-09-26 09:56:44 -0500567 if (backup_cred(cifs_sb))
568 create_options |= CREATE_OPEN_BACKUP_INTENT;
569
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700570 /*
571 * Can not refresh inode by passing in file_info buf to be returned by
572 * CIFSSMBOpen and then calling get_inode_info with returned buf since
573 * file might have write behind data that needs to be flushed and server
574 * version of file size can be stale. If we knew for sure that inode was
575 * not dirty locally we could do this.
576 */
577 rc = server->ops->open(xid, tcon, full_path, disposition,
578 desired_access, create_options, &fid, &oplock,
579 NULL, cifs_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 if (rc) {
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700581 mutex_unlock(&cfile->fh_mutex);
582 cFYI(1, "cifs_reopen returned 0x%x", rc);
Joe Perchesb6b38f72010-04-21 03:50:45 +0000583 cFYI(1, "oplock: %d", oplock);
Jeff Layton15886172010-10-15 15:33:59 -0400584 goto reopen_error_exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 }
Jeff Layton15886172010-10-15 15:33:59 -0400586
587reopen_success:
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700588 cfile->invalidHandle = false;
589 mutex_unlock(&cfile->fh_mutex);
590 cinode = CIFS_I(inode);
Jeff Layton15886172010-10-15 15:33:59 -0400591
592 if (can_flush) {
593 rc = filemap_write_and_wait(inode->i_mapping);
Jeff Laytoneb4b7562010-10-22 14:52:29 -0400594 mapping_set_error(inode->i_mapping, rc);
Jeff Layton15886172010-10-15 15:33:59 -0400595
Jeff Layton15886172010-10-15 15:33:59 -0400596 if (tcon->unix_ext)
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700597 rc = cifs_get_inode_info_unix(&inode, full_path,
598 inode->i_sb, xid);
Jeff Layton15886172010-10-15 15:33:59 -0400599 else
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700600 rc = cifs_get_inode_info(&inode, full_path, NULL,
601 inode->i_sb, xid, NULL);
602 }
603 /*
604 * Else we are writing out data to server already and could deadlock if
605 * we tried to flush data, and since we do not know if we have data that
606 * would invalidate the current end of file on the server we can not go
607 * to the server to get the new inode info.
608 */
Pavel Shilovskye66673e2010-11-02 12:00:42 +0300609
Pavel Shilovsky2ae78ba2012-09-18 16:20:27 -0700610 server->ops->set_fid(cfile, &fid, oplock);
611 cifs_relock_file(cfile);
Jeff Layton15886172010-10-15 15:33:59 -0400612
613reopen_error_exit:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 kfree(full_path);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400615 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 return rc;
617}
618
619int cifs_close(struct inode *inode, struct file *file)
620{
Jeff Layton77970692011-04-05 16:23:47 -0700621 if (file->private_data != NULL) {
622 cifsFileInfo_put(file->private_data);
623 file->private_data = NULL;
624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Steve Frenchcdff08e2010-10-21 22:46:14 +0000626 /* return code from the ->release op is always ignored */
627 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}
629
630int cifs_closedir(struct inode *inode, struct file *file)
631{
632 int rc = 0;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400633 unsigned int xid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700634 struct cifsFileInfo *cfile = file->private_data;
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700635 struct cifs_tcon *tcon;
636 struct TCP_Server_Info *server;
637 char *buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Joe Perchesb6b38f72010-04-21 03:50:45 +0000639 cFYI(1, "Closedir inode = 0x%p", inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700641 if (cfile == NULL)
642 return rc;
643
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400644 xid = get_xid();
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700645 tcon = tlink_tcon(cfile->tlink);
646 server = tcon->ses->server;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700648 cFYI(1, "Freeing private data in close dir");
649 spin_lock(&cifs_file_list_lock);
650 if (!cfile->srch_inf.endOfSearch && !cfile->invalidHandle) {
651 cfile->invalidHandle = true;
652 spin_unlock(&cifs_file_list_lock);
653 if (server->ops->close_dir)
654 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
655 else
656 rc = -ENOSYS;
657 cFYI(1, "Closing uncompleted readdir with rc %d", rc);
658 /* not much we can do if it fails anyway, ignore rc */
659 rc = 0;
660 } else
661 spin_unlock(&cifs_file_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700663 buf = cfile->srch_inf.ntwrk_buf_start;
664 if (buf) {
665 cFYI(1, "closedir free smb buf in srch struct");
666 cfile->srch_inf.ntwrk_buf_start = NULL;
667 if (cfile->srch_inf.smallBuf)
668 cifs_small_buf_release(buf);
669 else
670 cifs_buf_release(buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 }
Pavel Shilovsky92fc65a2012-09-18 16:20:32 -0700672
673 cifs_put_tlink(cfile->tlink);
674 kfree(file->private_data);
675 file->private_data = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 /* BB can we lock the filestruct while this is going on? */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400677 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return rc;
679}
680
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400681static struct cifsLockInfo *
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300682cifs_lock_init(__u64 offset, __u64 length, __u8 type)
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000683{
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400684 struct cifsLockInfo *lock =
Steve Frenchfb8c4b12007-07-10 01:16:18 +0000685 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400686 if (!lock)
687 return lock;
688 lock->offset = offset;
689 lock->length = length;
690 lock->type = type;
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400691 lock->pid = current->tgid;
692 INIT_LIST_HEAD(&lock->blist);
693 init_waitqueue_head(&lock->block_q);
694 return lock;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400695}
696
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -0700697void
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400698cifs_del_lock_waiters(struct cifsLockInfo *lock)
699{
700 struct cifsLockInfo *li, *tmp;
701 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
702 list_del_init(&li->blist);
703 wake_up(&li->block_q);
704 }
705}
706
707static bool
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700708cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
709 __u64 length, __u8 type, struct cifsFileInfo *cfile,
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300710 struct cifsLockInfo **conf_lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400711{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300712 struct cifsLockInfo *li;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700713 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300714 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400715
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700716 list_for_each_entry(li, &fdlocks->locks, llist) {
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400717 if (offset + length <= li->offset ||
718 offset >= li->offset + li->length)
719 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700720 if ((type & server->vals->shared_lock_type) &&
721 ((server->ops->compare_fids(cfile, cur_cfile) &&
722 current->tgid == li->pid) || type == li->type))
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400723 continue;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700724 *conf_lock = li;
725 return true;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400726 }
727 return false;
728}
729
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400730static bool
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300731cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
732 __u8 type, struct cifsLockInfo **conf_lock)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400733{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300734 bool rc = false;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700735 struct cifs_fid_locks *cur;
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300736 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300737
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700738 list_for_each_entry(cur, &cinode->llist, llist) {
739 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300740 cfile, conf_lock);
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300741 if (rc)
742 break;
743 }
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300744
745 return rc;
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400746}
747
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300748/*
749 * Check if there is another lock that prevents us to set the lock (mandatory
750 * style). If such a lock exists, update the flock structure with its
751 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
752 * or leave it the same if we can't. Returns 0 if we don't need to request to
753 * the server or 1 otherwise.
754 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400755static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300756cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
757 __u8 type, struct file_lock *flock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400758{
759 int rc = 0;
760 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300761 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300762 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400763 bool exist;
764
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700765 down_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400766
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300767 exist = cifs_find_lock_conflict(cfile, offset, length, type,
768 &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400769 if (exist) {
770 flock->fl_start = conf_lock->offset;
771 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
772 flock->fl_pid = conf_lock->pid;
Pavel Shilovsky106dc532012-02-28 14:23:34 +0300773 if (conf_lock->type & server->vals->shared_lock_type)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400774 flock->fl_type = F_RDLCK;
775 else
776 flock->fl_type = F_WRLCK;
777 } else if (!cinode->can_cache_brlcks)
778 rc = 1;
779 else
780 flock->fl_type = F_UNLCK;
781
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700782 up_read(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400783 return rc;
784}
785
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400786static void
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300787cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400788{
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300789 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700790 down_write(&cinode->lock_sem);
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700791 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700792 up_write(&cinode->lock_sem);
Jeremy Allison7ee1af72006-08-02 21:56:33 +0000793}
794
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300795/*
796 * Set the byte-range lock (mandatory style). Returns:
797 * 1) 0, if we set the lock and don't need to request to the server;
798 * 2) 1, if no locks prevent us but we need to request to the server;
799 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
800 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400801static int
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300802cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400803 bool wait)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400804{
Pavel Shilovsky161ebf92011-10-29 17:17:58 +0400805 struct cifsLockInfo *conf_lock;
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +0300806 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400807 bool exist;
808 int rc = 0;
809
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400810try_again:
811 exist = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700812 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400813
Pavel Shilovsky55157df2012-02-28 14:04:17 +0300814 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
815 lock->type, &conf_lock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400816 if (!exist && cinode->can_cache_brlcks) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700817 list_add_tail(&lock->llist, &cfile->llist->locks);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700818 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400819 return rc;
820 }
821
822 if (!exist)
823 rc = 1;
824 else if (!wait)
825 rc = -EACCES;
826 else {
827 list_add_tail(&lock->blist, &conf_lock->blist);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700828 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400829 rc = wait_event_interruptible(lock->block_q,
830 (lock->blist.prev == &lock->blist) &&
831 (lock->blist.next == &lock->blist));
832 if (!rc)
833 goto try_again;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700834 down_write(&cinode->lock_sem);
Pavel Shilovskya88b4702011-10-29 17:17:59 +0400835 list_del_init(&lock->blist);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400836 }
837
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700838 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400839 return rc;
840}
841
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300842/*
843 * Check if there is another lock that prevents us to set the lock (posix
844 * style). If such a lock exists, update the flock structure with its
845 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
846 * or leave it the same if we can't. Returns 0 if we don't need to request to
847 * the server or 1 otherwise.
848 */
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400849static int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400850cifs_posix_lock_test(struct file *file, struct file_lock *flock)
851{
852 int rc = 0;
853 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
854 unsigned char saved_type = flock->fl_type;
855
Pavel Shilovsky50792762011-10-29 17:17:57 +0400856 if ((flock->fl_flags & FL_POSIX) == 0)
857 return 1;
858
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700859 down_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400860 posix_test_lock(file, flock);
861
862 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
863 flock->fl_type = saved_type;
864 rc = 1;
865 }
866
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700867 up_read(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400868 return rc;
869}
870
Pavel Shilovsky9a5101c2011-11-07 16:11:24 +0300871/*
872 * Set the byte-range lock (posix style). Returns:
873 * 1) 0, if we set the lock and don't need to request to the server;
874 * 2) 1, if we need to request to the server;
875 * 3) <0, if the error occurs while setting the lock.
876 */
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400877static int
878cifs_posix_lock_set(struct file *file, struct file_lock *flock)
879{
880 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400881 int rc = 1;
882
883 if ((flock->fl_flags & FL_POSIX) == 0)
884 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400885
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400886try_again:
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700887 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400888 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700889 up_write(&cinode->lock_sem);
Pavel Shilovsky50792762011-10-29 17:17:57 +0400890 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400891 }
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400892
893 rc = posix_lock_file(file, flock, NULL);
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700894 up_write(&cinode->lock_sem);
Pavel Shilovsky66189be2012-03-28 21:56:19 +0400895 if (rc == FILE_LOCK_DEFERRED) {
896 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
897 if (!rc)
898 goto try_again;
899 locks_delete_block(flock);
900 }
Steve French9ebb3892012-04-01 13:52:54 -0500901 return rc;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400902}
903
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -0700904int
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400905cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400906{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400907 unsigned int xid;
908 int rc = 0, stored_rc;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400909 struct cifsLockInfo *li, *tmp;
910 struct cifs_tcon *tcon;
911 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400912 unsigned int num, max_num, max_buf;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400913 LOCKING_ANDX_RANGE *buf, *cur;
914 int types[] = {LOCKING_ANDX_LARGE_FILES,
915 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
916 int i;
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400917
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400918 xid = get_xid();
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400919 tcon = tlink_tcon(cfile->tlink);
920
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700921 /* we are going to update can_cache_brlcks here - need a write access */
922 down_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400923 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700924 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400925 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400926 return rc;
927 }
928
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400929 /*
930 * Accessing maxBuf is racy with cifs_reconnect - need to store value
931 * and check it for zero before using.
932 */
933 max_buf = tcon->ses->server->maxBuf;
934 if (!max_buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700935 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400936 free_xid(xid);
Pavel Shilovsky0013fb42012-05-31 13:03:26 +0400937 return -EINVAL;
938 }
939
940 max_num = (max_buf - sizeof(struct smb_hdr)) /
941 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400942 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
943 if (!buf) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700944 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400945 free_xid(xid);
Pavel Shilovskye2f28862012-08-29 21:13:38 +0400946 return -ENOMEM;
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400947 }
948
949 for (i = 0; i < 2; i++) {
950 cur = buf;
951 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -0700952 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400953 if (li->type != types[i])
954 continue;
955 cur->Pid = cpu_to_le16(li->pid);
956 cur->LengthLow = cpu_to_le32((u32)li->length);
957 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
958 cur->OffsetLow = cpu_to_le32((u32)li->offset);
959 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
960 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700961 stored_rc = cifs_lockv(xid, tcon,
962 cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300963 (__u8)li->type, 0, num,
964 buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400965 if (stored_rc)
966 rc = stored_rc;
967 cur = buf;
968 num = 0;
969 } else
970 cur++;
971 }
972
973 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -0700974 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +0300975 (__u8)types[i], 0, num, buf);
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400976 if (stored_rc)
977 rc = stored_rc;
978 }
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400979 }
980
981 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -0700982 up_write(&cinode->lock_sem);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400983
Pavel Shilovsky32b9aaf2011-10-22 15:33:32 +0400984 kfree(buf);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +0400985 free_xid(xid);
Pavel Shilovsky85160e02011-10-22 15:33:29 +0400986 return rc;
987}
988
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +0400989/* copied from fs/locks.c with a name change */
990#define cifs_for_each_lock(inode, lockp) \
991 for (lockp = &inode->i_flock; *lockp != NULL; \
992 lockp = &(*lockp)->fl_next)
993
Pavel Shilovskyd5751462012-03-05 09:39:20 +0300994struct lock_to_push {
995 struct list_head llist;
996 __u64 offset;
997 __u64 length;
998 __u32 pid;
999 __u16 netfid;
1000 __u8 type;
1001};
1002
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001003static int
1004cifs_push_posix_locks(struct cifsFileInfo *cfile)
1005{
1006 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1007 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1008 struct file_lock *flock, **before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001009 unsigned int count = 0, i = 0;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001010 int rc = 0, xid, type;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001011 struct list_head locks_to_send, *el;
1012 struct lock_to_push *lck, *tmp;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001013 __u64 length;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001014
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001015 xid = get_xid();
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001016
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001017 /* we are going to update can_cache_brlcks here - need a write access */
1018 down_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001019 if (!cinode->can_cache_brlcks) {
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001020 up_write(&cinode->lock_sem);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001021 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001022 return rc;
1023 }
1024
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001025 lock_flocks();
1026 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001027 if ((*before)->fl_flags & FL_POSIX)
1028 count++;
1029 }
1030 unlock_flocks();
1031
1032 INIT_LIST_HEAD(&locks_to_send);
1033
1034 /*
Pavel Shilovskyce858522012-03-17 09:46:55 +03001035 * Allocating count locks is enough because no FL_POSIX locks can be
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001036 * added to the list while we are holding cinode->lock_sem that
Pavel Shilovskyce858522012-03-17 09:46:55 +03001037 * protects locking operations of this inode.
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001038 */
1039 for (; i < count; i++) {
1040 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1041 if (!lck) {
1042 rc = -ENOMEM;
1043 goto err_out;
1044 }
1045 list_add_tail(&lck->llist, &locks_to_send);
1046 }
1047
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001048 el = locks_to_send.next;
1049 lock_flocks();
1050 cifs_for_each_lock(cfile->dentry->d_inode, before) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001051 flock = *before;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001052 if ((flock->fl_flags & FL_POSIX) == 0)
1053 continue;
Pavel Shilovskyce858522012-03-17 09:46:55 +03001054 if (el == &locks_to_send) {
1055 /*
1056 * The list ended. We don't have enough allocated
1057 * structures - something is really wrong.
1058 */
1059 cERROR(1, "Can't push all brlocks!");
1060 break;
1061 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001062 length = 1 + flock->fl_end - flock->fl_start;
1063 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1064 type = CIFS_RDLCK;
1065 else
1066 type = CIFS_WRLCK;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001067 lck = list_entry(el, struct lock_to_push, llist);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001068 lck->pid = flock->fl_pid;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001069 lck->netfid = cfile->fid.netfid;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001070 lck->length = length;
1071 lck->type = type;
1072 lck->offset = flock->fl_start;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001073 el = el->next;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001074 }
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001075 unlock_flocks();
1076
1077 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001078 int stored_rc;
1079
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001080 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001081 lck->offset, lck->length, NULL,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001082 lck->type, 0);
1083 if (stored_rc)
1084 rc = stored_rc;
1085 list_del(&lck->llist);
1086 kfree(lck);
1087 }
1088
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001089out:
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001090 cinode->can_cache_brlcks = false;
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001091 up_write(&cinode->lock_sem);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001092
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001093 free_xid(xid);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001094 return rc;
Pavel Shilovskyd5751462012-03-05 09:39:20 +03001095err_out:
1096 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1097 list_del(&lck->llist);
1098 kfree(lck);
1099 }
1100 goto out;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001101}
1102
1103static int
1104cifs_push_locks(struct cifsFileInfo *cfile)
1105{
1106 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1107 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1108
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001109 if (cap_unix(tcon->ses) &&
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001110 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1111 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1112 return cifs_push_posix_locks(cfile);
1113
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001114 return tcon->ses->server->ops->push_mand_locks(cfile);
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001115}
1116
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001117static void
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001118cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001119 bool *wait_flag, struct TCP_Server_Info *server)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120{
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001121 if (flock->fl_flags & FL_POSIX)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001122 cFYI(1, "Posix");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001123 if (flock->fl_flags & FL_FLOCK)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001124 cFYI(1, "Flock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001125 if (flock->fl_flags & FL_SLEEP) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001126 cFYI(1, "Blocking lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001127 *wait_flag = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 }
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001129 if (flock->fl_flags & FL_ACCESS)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001130 cFYI(1, "Process suspended by mandatory locking - "
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001131 "not implemented yet");
1132 if (flock->fl_flags & FL_LEASE)
Joe Perchesb6b38f72010-04-21 03:50:45 +00001133 cFYI(1, "Lease on file - not implemented yet");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001134 if (flock->fl_flags &
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001136 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001138 *type = server->vals->large_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001139 if (flock->fl_type == F_WRLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001140 cFYI(1, "F_WRLCK ");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001141 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001142 *lock = 1;
1143 } else if (flock->fl_type == F_UNLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001144 cFYI(1, "F_UNLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001145 *type |= server->vals->unlock_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001146 *unlock = 1;
1147 /* Check if unlock includes more than one lock range */
1148 } else if (flock->fl_type == F_RDLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001149 cFYI(1, "F_RDLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001150 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001151 *lock = 1;
1152 } else if (flock->fl_type == F_EXLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001153 cFYI(1, "F_EXLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001154 *type |= server->vals->exclusive_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001155 *lock = 1;
1156 } else if (flock->fl_type == F_SHLCK) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001157 cFYI(1, "F_SHLCK");
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001158 *type |= server->vals->shared_lock_type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001159 *lock = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 } else
Joe Perchesb6b38f72010-04-21 03:50:45 +00001161 cFYI(1, "Unknown type of lock");
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001162}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001164static int
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001165cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001166 bool wait_flag, bool posix_lck, unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001167{
1168 int rc = 0;
1169 __u64 length = 1 + flock->fl_end - flock->fl_start;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001170 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1171 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001172 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001173 __u16 netfid = cfile->fid.netfid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001175 if (posix_lck) {
1176 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001177
1178 rc = cifs_posix_lock_test(file, flock);
1179 if (!rc)
1180 return rc;
1181
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001182 if (type & server->vals->shared_lock_type)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001183 posix_lock_type = CIFS_RDLCK;
1184 else
1185 posix_lock_type = CIFS_WRLCK;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001186 rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
Jeff Laytonc5fd3632012-07-23 13:28:37 -04001187 flock->fl_start, length, flock,
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001188 posix_lock_type, wait_flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189 return rc;
1190 }
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001191
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001192 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001193 if (!rc)
1194 return rc;
1195
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001196 /* BB we could chain these into one lock request BB */
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001197 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1198 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001199 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001200 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1201 type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001202 flock->fl_type = F_UNLCK;
1203 if (rc != 0)
1204 cERROR(1, "Error unlocking previously locked "
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001205 "range %d during test of lock", rc);
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001206 return 0;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001207 }
1208
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001209 if (type & server->vals->shared_lock_type) {
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001210 flock->fl_type = F_WRLCK;
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001211 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001212 }
1213
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001214 type &= ~server->vals->exclusive_lock_type;
1215
1216 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1217 type | server->vals->shared_lock_type,
1218 1, 0, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001219 if (rc == 0) {
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001220 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1221 type | server->vals->shared_lock_type, 0, 1, false);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001222 flock->fl_type = F_RDLCK;
1223 if (rc != 0)
1224 cERROR(1, "Error unlocking previously locked "
1225 "range %d during test of lock", rc);
1226 } else
1227 flock->fl_type = F_WRLCK;
1228
Pavel Shilovskya88b4702011-10-29 17:17:59 +04001229 return 0;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001230}
1231
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001232void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001233cifs_move_llist(struct list_head *source, struct list_head *dest)
1234{
1235 struct list_head *li, *tmp;
1236 list_for_each_safe(li, tmp, source)
1237 list_move(li, dest);
1238}
1239
Pavel Shilovskyf7ba7fe2012-09-19 06:22:43 -07001240void
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001241cifs_free_llist(struct list_head *llist)
1242{
1243 struct cifsLockInfo *li, *tmp;
1244 list_for_each_entry_safe(li, tmp, llist, llist) {
1245 cifs_del_lock_waiters(li);
1246 list_del(&li->llist);
1247 kfree(li);
1248 }
1249}
1250
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001251int
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001252cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1253 unsigned int xid)
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001254{
1255 int rc = 0, stored_rc;
1256 int types[] = {LOCKING_ANDX_LARGE_FILES,
1257 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1258 unsigned int i;
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001259 unsigned int max_num, num, max_buf;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001260 LOCKING_ANDX_RANGE *buf, *cur;
1261 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1262 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1263 struct cifsLockInfo *li, *tmp;
1264 __u64 length = 1 + flock->fl_end - flock->fl_start;
1265 struct list_head tmp_llist;
1266
1267 INIT_LIST_HEAD(&tmp_llist);
1268
Pavel Shilovsky0013fb42012-05-31 13:03:26 +04001269 /*
1270 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1271 * and check it for zero before using.
1272 */
1273 max_buf = tcon->ses->server->maxBuf;
1274 if (!max_buf)
1275 return -EINVAL;
1276
1277 max_num = (max_buf - sizeof(struct smb_hdr)) /
1278 sizeof(LOCKING_ANDX_RANGE);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001279 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1280 if (!buf)
1281 return -ENOMEM;
1282
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001283 down_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001284 for (i = 0; i < 2; i++) {
1285 cur = buf;
1286 num = 0;
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001287 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001288 if (flock->fl_start > li->offset ||
1289 (flock->fl_start + length) <
1290 (li->offset + li->length))
1291 continue;
1292 if (current->tgid != li->pid)
1293 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001294 if (types[i] != li->type)
1295 continue;
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001296 if (cinode->can_cache_brlcks) {
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001297 /*
1298 * We can cache brlock requests - simply remove
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001299 * a lock from the file's list.
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001300 */
1301 list_del(&li->llist);
1302 cifs_del_lock_waiters(li);
1303 kfree(li);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001304 continue;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001305 }
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001306 cur->Pid = cpu_to_le16(li->pid);
1307 cur->LengthLow = cpu_to_le32((u32)li->length);
1308 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1309 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1310 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1311 /*
1312 * We need to save a lock here to let us add it again to
1313 * the file's list if the unlock range request fails on
1314 * the server.
1315 */
1316 list_move(&li->llist, &tmp_llist);
1317 if (++num == max_num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001318 stored_rc = cifs_lockv(xid, tcon,
1319 cfile->fid.netfid,
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001320 li->type, num, 0, buf);
1321 if (stored_rc) {
1322 /*
1323 * We failed on the unlock range
1324 * request - add all locks from the tmp
1325 * list to the head of the file's list.
1326 */
1327 cifs_move_llist(&tmp_llist,
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001328 &cfile->llist->locks);
Pavel Shilovskyea319d52012-05-31 13:59:36 +04001329 rc = stored_rc;
1330 } else
1331 /*
1332 * The unlock range request succeed -
1333 * free the tmp list.
1334 */
1335 cifs_free_llist(&tmp_llist);
1336 cur = buf;
1337 num = 0;
1338 } else
1339 cur++;
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001340 }
1341 if (num) {
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001342 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001343 types[i], num, 0, buf);
1344 if (stored_rc) {
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001345 cifs_move_llist(&tmp_llist,
1346 &cfile->llist->locks);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001347 rc = stored_rc;
1348 } else
1349 cifs_free_llist(&tmp_llist);
1350 }
1351 }
1352
Pavel Shilovsky1b4b55a2012-09-19 06:22:44 -07001353 up_write(&cinode->lock_sem);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001354 kfree(buf);
1355 return rc;
1356}
1357
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001358static int
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001359cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001360 bool wait_flag, bool posix_lck, int lock, int unlock,
1361 unsigned int xid)
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001362{
1363 int rc = 0;
1364 __u64 length = 1 + flock->fl_end - flock->fl_start;
1365 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1366 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001367 struct TCP_Server_Info *server = tcon->ses->server;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001368
1369 if (posix_lck) {
Steve French08547b02006-02-28 22:39:25 +00001370 int posix_lock_type;
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001371
1372 rc = cifs_posix_lock_set(file, flock);
1373 if (!rc || rc < 0)
1374 return rc;
1375
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001376 if (type & server->vals->shared_lock_type)
Steve French08547b02006-02-28 22:39:25 +00001377 posix_lock_type = CIFS_RDLCK;
1378 else
1379 posix_lock_type = CIFS_WRLCK;
Steve French50c2f752007-07-13 00:33:32 +00001380
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001381 if (unlock == 1)
Steve Frenchbeb84dc2006-03-03 23:36:34 +00001382 posix_lock_type = CIFS_UNLCK;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001383
Pavel Shilovskyf45d3412012-09-19 06:22:43 -07001384 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1385 current->tgid, flock->fl_start, length,
1386 NULL, posix_lock_type, wait_flag);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001387 goto out;
Jeremy Allison7ee1af72006-08-02 21:56:33 +00001388 }
1389
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001390 if (lock) {
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001391 struct cifsLockInfo *lock;
1392
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001393 lock = cifs_lock_init(flock->fl_start, length, type);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001394 if (!lock)
1395 return -ENOMEM;
1396
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001397 rc = cifs_lock_add_if(cfile, lock, wait_flag);
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001398 if (rc < 0)
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001399 kfree(lock);
1400 if (rc <= 0)
Pavel Shilovsky85160e02011-10-22 15:33:29 +04001401 goto out;
1402
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001403 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1404 type, 1, 0, wait_flag);
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001405 if (rc) {
1406 kfree(lock);
1407 goto out;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001408 }
Pavel Shilovsky161ebf92011-10-29 17:17:58 +04001409
Pavel Shilovskyfbd35ac2012-02-24 15:41:06 +03001410 cifs_lock_add(cfile, lock);
Pavel Shilovsky9ee305b2011-10-22 15:33:31 +04001411 } else if (unlock)
Pavel Shilovskyd39a4f72012-09-19 06:22:43 -07001412 rc = server->ops->mand_unlock_range(cfile, flock, xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001413
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001414out:
1415 if (flock->fl_flags & FL_POSIX)
Steve French9ebb3892012-04-01 13:52:54 -05001416 posix_lock_file_wait(file, flock);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001417 return rc;
1418}
1419
1420int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1421{
1422 int rc, xid;
1423 int lock = 0, unlock = 0;
1424 bool wait_flag = false;
1425 bool posix_lck = false;
1426 struct cifs_sb_info *cifs_sb;
1427 struct cifs_tcon *tcon;
1428 struct cifsInodeInfo *cinode;
1429 struct cifsFileInfo *cfile;
1430 __u16 netfid;
Pavel Shilovsky04a6aa82012-02-28 14:16:55 +03001431 __u32 type;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001432
1433 rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001434 xid = get_xid();
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001435
1436 cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
1437 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1438 flock->fl_start, flock->fl_end);
1439
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001440 cfile = (struct cifsFileInfo *)file->private_data;
1441 tcon = tlink_tcon(cfile->tlink);
Pavel Shilovsky106dc532012-02-28 14:23:34 +03001442
1443 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1444 tcon->ses->server);
1445
1446 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky4b4de762012-09-18 16:20:26 -07001447 netfid = cfile->fid.netfid;
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001448 cinode = CIFS_I(file->f_path.dentry->d_inode);
1449
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04001450 if (cap_unix(tcon->ses) &&
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001451 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1452 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1453 posix_lck = true;
1454 /*
1455 * BB add code here to normalize offset and length to account for
1456 * negative length which we can not accept over the wire.
1457 */
1458 if (IS_GETLK(cmd)) {
Pavel Shilovsky4f6bcec2011-10-22 15:33:30 +04001459 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001460 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001461 return rc;
1462 }
1463
1464 if (!lock && !unlock) {
1465 /*
1466 * if no lock or unlock then nothing to do since we do not
1467 * know what it is
1468 */
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001469 free_xid(xid);
Pavel Shilovsky03776f42010-08-17 11:26:00 +04001470 return -EOPNOTSUPP;
1471 }
1472
1473 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1474 xid);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001475 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 return rc;
1477}
1478
Jeff Layton597b0272012-03-23 14:40:56 -04001479/*
1480 * update the file size (if needed) after a write. Should be called with
1481 * the inode->i_lock held
1482 */
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05001483void
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001484cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1485 unsigned int bytes_written)
1486{
1487 loff_t end_of_write = offset + bytes_written;
1488
1489 if (end_of_write > cifsi->server_eof)
1490 cifsi->server_eof = end_of_write;
1491}
1492
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001493static ssize_t
1494cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1495 size_t write_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496{
1497 int rc = 0;
1498 unsigned int bytes_written = 0;
1499 unsigned int total_written;
1500 struct cifs_sb_info *cifs_sb;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001501 struct cifs_tcon *tcon;
1502 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001503 unsigned int xid;
Jeff Layton7da4b492010-10-15 15:34:00 -04001504 struct dentry *dentry = open_file->dentry;
1505 struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001506 struct cifs_io_parms io_parms;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Jeff Layton7da4b492010-10-15 15:34:00 -04001508 cifs_sb = CIFS_SB(dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Joe Perchesb6b38f72010-04-21 03:50:45 +00001510 cFYI(1, "write %zd bytes to offset %lld of %s", write_size,
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001511 *offset, dentry->d_name.name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001513 tcon = tlink_tcon(open_file->tlink);
1514 server = tcon->ses->server;
1515
1516 if (!server->ops->sync_write)
1517 return -ENOSYS;
Steve French50c2f752007-07-13 00:33:32 +00001518
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001519 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 for (total_written = 0; write_size > total_written;
1522 total_written += bytes_written) {
1523 rc = -EAGAIN;
1524 while (rc == -EAGAIN) {
Jeff Laytonca83ce32011-04-12 09:13:44 -04001525 struct kvec iov[2];
1526 unsigned int len;
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 if (open_file->invalidHandle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 /* we could deadlock if we called
1530 filemap_fdatawait from here so tell
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001531 reopen_file not to flush data to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 server now */
Jeff Layton15886172010-10-15 15:33:59 -04001533 rc = cifs_reopen_file(open_file, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 if (rc != 0)
1535 break;
1536 }
Steve French3e844692005-10-03 13:37:24 -07001537
Jeff Laytonca83ce32011-04-12 09:13:44 -04001538 len = min((size_t)cifs_sb->wsize,
1539 write_size - total_written);
1540 /* iov[0] is reserved for smb header */
1541 iov[1].iov_base = (char *)write_data + total_written;
1542 iov[1].iov_len = len;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001543 io_parms.pid = pid;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001544 io_parms.tcon = tcon;
1545 io_parms.offset = *offset;
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001546 io_parms.length = len;
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001547 rc = server->ops->sync_write(xid, open_file, &io_parms,
1548 &bytes_written, iov, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 }
1550 if (rc || (bytes_written == 0)) {
1551 if (total_written)
1552 break;
1553 else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001554 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 return rc;
1556 }
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001557 } else {
Jeff Layton597b0272012-03-23 14:40:56 -04001558 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001559 cifs_update_eof(cifsi, *offset, bytes_written);
Jeff Layton597b0272012-03-23 14:40:56 -04001560 spin_unlock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001561 *offset += bytes_written;
Jeff Laytonfbec9ab2009-04-03 13:44:00 -04001562 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 }
1564
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001565 cifs_stats_bytes_written(tcon, total_written);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Jeff Layton7da4b492010-10-15 15:34:00 -04001567 if (total_written > 0) {
1568 spin_lock(&dentry->d_inode->i_lock);
Pavel Shilovskyba9ad7252012-09-18 16:20:30 -07001569 if (*offset > dentry->d_inode->i_size)
1570 i_size_write(dentry->d_inode, *offset);
Jeff Layton7da4b492010-10-15 15:34:00 -04001571 spin_unlock(&dentry->d_inode->i_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 }
Jeff Layton7da4b492010-10-15 15:34:00 -04001573 mark_inode_dirty_sync(dentry->d_inode);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001574 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 return total_written;
1576}
1577
Jeff Layton6508d902010-09-29 19:51:11 -04001578struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1579 bool fsuid_only)
Steve French630f3f0c2007-10-25 21:17:17 +00001580{
1581 struct cifsFileInfo *open_file = NULL;
Jeff Layton6508d902010-09-29 19:51:11 -04001582 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1583
1584 /* only filter by fsuid on multiuser mounts */
1585 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1586 fsuid_only = false;
Steve French630f3f0c2007-10-25 21:17:17 +00001587
Jeff Layton44772882010-10-15 15:34:03 -04001588 spin_lock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001589 /* we could simply get the first_list_entry since write-only entries
1590 are always at the end of the list but since the first entry might
1591 have a close pending, we go through the whole list */
1592 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001593 if (fsuid_only && open_file->uid != current_fsuid())
1594 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001595 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
Steve French630f3f0c2007-10-25 21:17:17 +00001596 if (!open_file->invalidHandle) {
1597 /* found a good file */
1598 /* lock it so it will not be closed on us */
Jeff Layton764a1b12012-07-25 14:59:54 -04001599 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001600 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001601 return open_file;
1602 } /* else might as well continue, and look for
1603 another, or simply have the caller reopen it
1604 again rather than trying to fix this handle */
1605 } else /* write only file */
1606 break; /* write only files are last so must be done */
1607 }
Jeff Layton44772882010-10-15 15:34:03 -04001608 spin_unlock(&cifs_file_list_lock);
Steve French630f3f0c2007-10-25 21:17:17 +00001609 return NULL;
1610}
Steve French630f3f0c2007-10-25 21:17:17 +00001611
Jeff Layton6508d902010-09-29 19:51:11 -04001612struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1613 bool fsuid_only)
Steve French6148a742005-10-05 12:23:19 -07001614{
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001615 struct cifsFileInfo *open_file, *inv_file = NULL;
Jeff Laytond3892292010-11-02 16:22:50 -04001616 struct cifs_sb_info *cifs_sb;
Jeff Layton2846d382008-09-22 21:33:33 -04001617 bool any_available = false;
Steve Frenchdd99cd82005-10-05 19:32:49 -07001618 int rc;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001619 unsigned int refind = 0;
Steve French6148a742005-10-05 12:23:19 -07001620
Steve French60808232006-04-22 15:53:05 +00001621 /* Having a null inode here (because mapping->host was set to zero by
1622 the VFS or MM) should not happen but we had reports of on oops (due to
1623 it being zero) during stress testcases so we need to check for it */
1624
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001625 if (cifs_inode == NULL) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001626 cERROR(1, "Null inode passed to cifs_writeable_file");
Steve French60808232006-04-22 15:53:05 +00001627 dump_stack();
1628 return NULL;
1629 }
1630
Jeff Laytond3892292010-11-02 16:22:50 -04001631 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1632
Jeff Layton6508d902010-09-29 19:51:11 -04001633 /* only filter by fsuid on multiuser mounts */
1634 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1635 fsuid_only = false;
1636
Jeff Layton44772882010-10-15 15:34:03 -04001637 spin_lock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001638refind_writable:
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001639 if (refind > MAX_REOPEN_ATT) {
1640 spin_unlock(&cifs_file_list_lock);
1641 return NULL;
1642 }
Steve French6148a742005-10-05 12:23:19 -07001643 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton6508d902010-09-29 19:51:11 -04001644 if (!any_available && open_file->pid != current->tgid)
1645 continue;
1646 if (fsuid_only && open_file->uid != current_fsuid())
1647 continue;
Jeff Layton2e396b82010-10-15 15:34:01 -04001648 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Steve French9b22b0b2007-10-02 01:11:08 +00001649 if (!open_file->invalidHandle) {
1650 /* found a good writable file */
Jeff Layton764a1b12012-07-25 14:59:54 -04001651 cifsFileInfo_get_locked(open_file);
Jeff Layton44772882010-10-15 15:34:03 -04001652 spin_unlock(&cifs_file_list_lock);
Steve French9b22b0b2007-10-02 01:11:08 +00001653 return open_file;
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001654 } else {
1655 if (!inv_file)
1656 inv_file = open_file;
Steve French9b22b0b2007-10-02 01:11:08 +00001657 }
Steve French6148a742005-10-05 12:23:19 -07001658 }
1659 }
Jeff Layton2846d382008-09-22 21:33:33 -04001660 /* couldn't find useable FH with same pid, try any available */
1661 if (!any_available) {
1662 any_available = true;
1663 goto refind_writable;
1664 }
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001665
1666 if (inv_file) {
1667 any_available = false;
Jeff Layton764a1b12012-07-25 14:59:54 -04001668 cifsFileInfo_get_locked(inv_file);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001669 }
1670
Jeff Layton44772882010-10-15 15:34:03 -04001671 spin_unlock(&cifs_file_list_lock);
Shirish Pargaonkar2c0c2a02012-05-21 09:20:12 -05001672
1673 if (inv_file) {
1674 rc = cifs_reopen_file(inv_file, false);
1675 if (!rc)
1676 return inv_file;
1677 else {
1678 spin_lock(&cifs_file_list_lock);
1679 list_move_tail(&inv_file->flist,
1680 &cifs_inode->openFileList);
1681 spin_unlock(&cifs_file_list_lock);
1682 cifsFileInfo_put(inv_file);
1683 spin_lock(&cifs_file_list_lock);
1684 ++refind;
1685 goto refind_writable;
1686 }
1687 }
1688
Steve French6148a742005-10-05 12:23:19 -07001689 return NULL;
1690}
1691
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1693{
1694 struct address_space *mapping = page->mapping;
1695 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1696 char *write_data;
1697 int rc = -EFAULT;
1698 int bytes_written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 struct inode *inode;
Steve French6148a742005-10-05 12:23:19 -07001700 struct cifsFileInfo *open_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
1702 if (!mapping || !mapping->host)
1703 return -EFAULT;
1704
1705 inode = page->mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 offset += (loff_t)from;
1708 write_data = kmap(page);
1709 write_data += from;
1710
1711 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1712 kunmap(page);
1713 return -EIO;
1714 }
1715
1716 /* racing with truncate? */
1717 if (offset > mapping->host->i_size) {
1718 kunmap(page);
1719 return 0; /* don't care */
1720 }
1721
1722 /* check to make sure that we are not extending the file */
1723 if (mapping->host->i_size - offset < (loff_t)to)
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001724 to = (unsigned)(mapping->host->i_size - offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
Jeff Layton6508d902010-09-29 19:51:11 -04001726 open_file = find_writable_file(CIFS_I(mapping->host), false);
Steve French6148a742005-10-05 12:23:19 -07001727 if (open_file) {
Pavel Shilovskyfa2989f2011-05-26 10:01:59 +04001728 bytes_written = cifs_write(open_file, open_file->pid,
1729 write_data, to - from, &offset);
Dave Kleikamp6ab409b2009-08-31 11:07:12 -04001730 cifsFileInfo_put(open_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 /* Does mm or vfs already set times? */
Steve French6148a742005-10-05 12:23:19 -07001732 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001733 if ((bytes_written > 0) && (offset))
Steve French6148a742005-10-05 12:23:19 -07001734 rc = 0;
Steve Frenchbb5a9a02007-12-31 04:21:29 +00001735 else if (bytes_written < 0)
1736 rc = bytes_written;
Steve French6148a742005-10-05 12:23:19 -07001737 } else {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001738 cFYI(1, "No writeable filehandles for inode");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 rc = -EIO;
1740 }
1741
1742 kunmap(page);
1743 return rc;
1744}
1745
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746static int cifs_writepages(struct address_space *mapping,
Steve French37c0eb42005-10-05 14:50:29 -07001747 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748{
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001749 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
1750 bool done = false, scanned = false, range_whole = false;
1751 pgoff_t end, index;
1752 struct cifs_writedata *wdata;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001753 struct TCP_Server_Info *server;
Steve French37c0eb42005-10-05 14:50:29 -07001754 struct page *page;
Steve French37c0eb42005-10-05 14:50:29 -07001755 int rc = 0;
Jeff Laytoneddb0792012-09-18 16:20:35 -07001756 loff_t isize = i_size_read(mapping->host);
Steve French50c2f752007-07-13 00:33:32 +00001757
Steve French37c0eb42005-10-05 14:50:29 -07001758 /*
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001759 * If wsize is smaller than the page cache size, default to writing
Steve French37c0eb42005-10-05 14:50:29 -07001760 * one page at a time via cifs_writepage
1761 */
1762 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1763 return generic_writepages(mapping, wbc);
1764
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001765 if (wbc->range_cyclic) {
Steve French37c0eb42005-10-05 14:50:29 -07001766 index = mapping->writeback_index; /* Start from prev offset */
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001767 end = -1;
1768 } else {
1769 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1770 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1771 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001772 range_whole = true;
1773 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001774 }
1775retry:
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001776 while (!done && index <= end) {
1777 unsigned int i, nr_pages, found_pages;
1778 pgoff_t next = 0, tofind;
1779 struct page **pages;
Steve French37c0eb42005-10-05 14:50:29 -07001780
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001781 tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
1782 end - index) + 1;
Steve French37c0eb42005-10-05 14:50:29 -07001783
Jeff Laytonc2e87642012-03-23 14:40:55 -04001784 wdata = cifs_writedata_alloc((unsigned int)tofind,
1785 cifs_writev_complete);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001786 if (!wdata) {
1787 rc = -ENOMEM;
1788 break;
1789 }
1790
1791 /*
1792 * find_get_pages_tag seems to return a max of 256 on each
1793 * iteration, so we must call it several times in order to
1794 * fill the array or the wsize is effectively limited to
1795 * 256 * PAGE_CACHE_SIZE.
1796 */
1797 found_pages = 0;
1798 pages = wdata->pages;
1799 do {
1800 nr_pages = find_get_pages_tag(mapping, &index,
1801 PAGECACHE_TAG_DIRTY,
1802 tofind, pages);
1803 found_pages += nr_pages;
1804 tofind -= nr_pages;
1805 pages += nr_pages;
1806 } while (nr_pages && tofind && index <= end);
1807
1808 if (found_pages == 0) {
1809 kref_put(&wdata->refcount, cifs_writedata_release);
1810 break;
1811 }
1812
1813 nr_pages = 0;
1814 for (i = 0; i < found_pages; i++) {
1815 page = wdata->pages[i];
Steve French37c0eb42005-10-05 14:50:29 -07001816 /*
1817 * At this point we hold neither mapping->tree_lock nor
1818 * lock on the page itself: the page may be truncated or
1819 * invalidated (changing page->mapping to NULL), or even
1820 * swizzled back from swapper_space to tmpfs file
1821 * mapping
1822 */
1823
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001824 if (nr_pages == 0)
Steve French37c0eb42005-10-05 14:50:29 -07001825 lock_page(page);
Nick Piggin529ae9a2008-08-02 12:01:03 +02001826 else if (!trylock_page(page))
Steve French37c0eb42005-10-05 14:50:29 -07001827 break;
1828
1829 if (unlikely(page->mapping != mapping)) {
1830 unlock_page(page);
1831 break;
1832 }
1833
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001834 if (!wbc->range_cyclic && page->index > end) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001835 done = true;
Steve French37c0eb42005-10-05 14:50:29 -07001836 unlock_page(page);
1837 break;
1838 }
1839
1840 if (next && (page->index != next)) {
1841 /* Not next consecutive page */
1842 unlock_page(page);
1843 break;
1844 }
1845
1846 if (wbc->sync_mode != WB_SYNC_NONE)
1847 wait_on_page_writeback(page);
1848
1849 if (PageWriteback(page) ||
Linus Torvaldscb876f42006-12-23 16:19:07 -08001850 !clear_page_dirty_for_io(page)) {
Steve French37c0eb42005-10-05 14:50:29 -07001851 unlock_page(page);
1852 break;
1853 }
Steve French84d2f072005-10-12 15:32:05 -07001854
Linus Torvaldscb876f42006-12-23 16:19:07 -08001855 /*
1856 * This actually clears the dirty bit in the radix tree.
1857 * See cifs_writepage() for more commentary.
1858 */
1859 set_page_writeback(page);
1860
Jeff Laytoneddb0792012-09-18 16:20:35 -07001861 if (page_offset(page) >= isize) {
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001862 done = true;
Steve French84d2f072005-10-12 15:32:05 -07001863 unlock_page(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001864 end_page_writeback(page);
Steve French84d2f072005-10-12 15:32:05 -07001865 break;
1866 }
1867
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001868 wdata->pages[i] = page;
Steve French37c0eb42005-10-05 14:50:29 -07001869 next = page->index + 1;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001870 ++nr_pages;
Steve French37c0eb42005-10-05 14:50:29 -07001871 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001872
1873 /* reset index to refind any pages skipped */
1874 if (nr_pages == 0)
1875 index = wdata->pages[0]->index + 1;
1876
1877 /* put any pages we aren't going to use */
1878 for (i = nr_pages; i < found_pages; i++) {
1879 page_cache_release(wdata->pages[i]);
1880 wdata->pages[i] = NULL;
1881 }
1882
1883 /* nothing to write? */
1884 if (nr_pages == 0) {
1885 kref_put(&wdata->refcount, cifs_writedata_release);
1886 continue;
1887 }
1888
1889 wdata->sync_mode = wbc->sync_mode;
1890 wdata->nr_pages = nr_pages;
1891 wdata->offset = page_offset(wdata->pages[0]);
Jeff Laytoneddb0792012-09-18 16:20:35 -07001892 wdata->pagesz = PAGE_CACHE_SIZE;
1893 wdata->tailsz =
1894 min(isize - page_offset(wdata->pages[nr_pages - 1]),
1895 (loff_t)PAGE_CACHE_SIZE);
1896 wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) +
1897 wdata->tailsz;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001898
1899 do {
1900 if (wdata->cfile != NULL)
1901 cifsFileInfo_put(wdata->cfile);
1902 wdata->cfile = find_writable_file(CIFS_I(mapping->host),
1903 false);
1904 if (!wdata->cfile) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00001905 cERROR(1, "No writable handles for inode");
Steve French23e7dd72005-10-20 13:44:56 -07001906 rc = -EBADF;
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001907 break;
Steve French37c0eb42005-10-05 14:50:29 -07001908 }
Jeff Laytonfe5f5d22012-03-23 14:40:55 -04001909 wdata->pid = wdata->cfile->pid;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07001910 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
1911 rc = server->ops->async_writev(wdata);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001912 } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
Jeff Laytonf3983c22010-09-22 16:17:40 -07001913
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001914 for (i = 0; i < nr_pages; ++i)
1915 unlock_page(wdata->pages[i]);
Jeff Layton941b8532011-01-11 07:24:01 -05001916
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001917 /* send failure -- clean up the mess */
1918 if (rc != 0) {
1919 for (i = 0; i < nr_pages; ++i) {
Jeff Layton941b8532011-01-11 07:24:01 -05001920 if (rc == -EAGAIN)
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001921 redirty_page_for_writepage(wbc,
1922 wdata->pages[i]);
1923 else
1924 SetPageError(wdata->pages[i]);
1925 end_page_writeback(wdata->pages[i]);
1926 page_cache_release(wdata->pages[i]);
Steve French37c0eb42005-10-05 14:50:29 -07001927 }
Jeff Layton941b8532011-01-11 07:24:01 -05001928 if (rc != -EAGAIN)
1929 mapping_set_error(mapping, rc);
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001930 }
1931 kref_put(&wdata->refcount, cifs_writedata_release);
Jeff Layton941b8532011-01-11 07:24:01 -05001932
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001933 wbc->nr_to_write -= nr_pages;
1934 if (wbc->nr_to_write <= 0)
1935 done = true;
Dave Kleikampb066a482008-11-18 03:49:05 +00001936
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001937 index = next;
Steve French37c0eb42005-10-05 14:50:29 -07001938 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001939
Steve French37c0eb42005-10-05 14:50:29 -07001940 if (!scanned && !done) {
1941 /*
1942 * We hit the last page and there is more work to be done: wrap
1943 * back to the start of the file
1944 */
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001945 scanned = true;
Steve French37c0eb42005-10-05 14:50:29 -07001946 index = 0;
1947 goto retry;
1948 }
Jeff Laytonc3d17b62011-05-19 16:22:57 -04001949
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -07001950 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steve French37c0eb42005-10-05 14:50:29 -07001951 mapping->writeback_index = index;
1952
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 return rc;
1954}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001956static int
1957cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958{
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001959 int rc;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001960 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001962 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963/* BB add check for wbc flags */
1964 page_cache_get(page);
Steve Frenchad7a2922008-02-07 23:25:02 +00001965 if (!PageUptodate(page))
Joe Perchesb6b38f72010-04-21 03:50:45 +00001966 cFYI(1, "ppw - page not up to date");
Linus Torvaldscb876f42006-12-23 16:19:07 -08001967
1968 /*
1969 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1970 *
1971 * A writepage() implementation always needs to do either this,
1972 * or re-dirty the page with "redirty_page_for_writepage()" in
1973 * the case of a failure.
1974 *
1975 * Just unlocking the page will cause the radix tree tag-bits
1976 * to fail to update with the state of the page correctly.
1977 */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00001978 set_page_writeback(page);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001979retry_write:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001981 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
1982 goto retry_write;
1983 else if (rc == -EAGAIN)
1984 redirty_page_for_writepage(wbc, page);
1985 else if (rc != 0)
1986 SetPageError(page);
1987 else
1988 SetPageUptodate(page);
Linus Torvaldscb876f42006-12-23 16:19:07 -08001989 end_page_writeback(page);
1990 page_cache_release(page);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04001991 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 return rc;
1993}
1994
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04001995static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1996{
1997 int rc = cifs_writepage_locked(page, wbc);
1998 unlock_page(page);
1999 return rc;
2000}
2001
Nick Piggind9414772008-09-24 11:32:59 -04002002static int cifs_write_end(struct file *file, struct address_space *mapping,
2003 loff_t pos, unsigned len, unsigned copied,
2004 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
Nick Piggind9414772008-09-24 11:32:59 -04002006 int rc;
2007 struct inode *inode = mapping->host;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002008 struct cifsFileInfo *cfile = file->private_data;
2009 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2010 __u32 pid;
2011
2012 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2013 pid = cfile->pid;
2014 else
2015 pid = current->tgid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
Joe Perchesb6b38f72010-04-21 03:50:45 +00002017 cFYI(1, "write_end for page %p from pos %lld with %d bytes",
2018 page, pos, copied);
Steve Frenchad7a2922008-02-07 23:25:02 +00002019
Jeff Laytona98ee8c2008-11-26 19:32:33 +00002020 if (PageChecked(page)) {
2021 if (copied == len)
2022 SetPageUptodate(page);
2023 ClearPageChecked(page);
2024 } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
Nick Piggind9414772008-09-24 11:32:59 -04002025 SetPageUptodate(page);
2026
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 if (!PageUptodate(page)) {
Nick Piggind9414772008-09-24 11:32:59 -04002028 char *page_data;
2029 unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002030 unsigned int xid;
Nick Piggind9414772008-09-24 11:32:59 -04002031
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002032 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 /* this is probably better than directly calling
2034 partialpage_write since in this function the file handle is
2035 known which we might as well leverage */
2036 /* BB check if anything else missing out of ppw
2037 such as updating last write time */
2038 page_data = kmap(page);
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002039 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
Nick Piggind9414772008-09-24 11:32:59 -04002040 /* if (rc < 0) should we set writebehind rc? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 kunmap(page);
Nick Piggind9414772008-09-24 11:32:59 -04002042
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002043 free_xid(xid);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002044 } else {
Nick Piggind9414772008-09-24 11:32:59 -04002045 rc = copied;
2046 pos += copied;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 set_page_dirty(page);
2048 }
2049
Nick Piggind9414772008-09-24 11:32:59 -04002050 if (rc > 0) {
2051 spin_lock(&inode->i_lock);
2052 if (pos > inode->i_size)
2053 i_size_write(inode, pos);
2054 spin_unlock(&inode->i_lock);
2055 }
2056
2057 unlock_page(page);
2058 page_cache_release(page);
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 return rc;
2061}
2062
Josef Bacik02c24a82011-07-16 20:44:56 -04002063int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2064 int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002066 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002068 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002069 struct TCP_Server_Info *server;
Joe Perchesc21dfb62010-07-12 13:50:14 -07002070 struct cifsFileInfo *smbfile = file->private_data;
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002071 struct inode *inode = file->f_path.dentry->d_inode;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002072 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Josef Bacik02c24a82011-07-16 20:44:56 -04002074 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2075 if (rc)
2076 return rc;
2077 mutex_lock(&inode->i_mutex);
2078
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002079 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
Joe Perchesb6b38f72010-04-21 03:50:45 +00002081 cFYI(1, "Sync file - name: %s datasync: 0x%x",
Christoph Hellwig7ea80852010-05-26 17:53:25 +02002082 file->f_path.dentry->d_name.name, datasync);
Steve French50c2f752007-07-13 00:33:32 +00002083
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002084 if (!CIFS_I(inode)->clientCanCacheRead) {
2085 rc = cifs_invalidate_mapping(inode);
2086 if (rc) {
2087 cFYI(1, "rc: %d during invalidate phase", rc);
2088 rc = 0; /* don't care about it in fsync */
2089 }
2090 }
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002091
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002092 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002093 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2094 server = tcon->ses->server;
2095 if (server->ops->flush)
2096 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2097 else
2098 rc = -ENOSYS;
2099 }
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002100
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002101 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002102 mutex_unlock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002103 return rc;
2104}
2105
Josef Bacik02c24a82011-07-16 20:44:56 -04002106int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002107{
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002108 unsigned int xid;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002109 int rc = 0;
Steve French96daf2b2011-05-27 04:34:02 +00002110 struct cifs_tcon *tcon;
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002111 struct TCP_Server_Info *server;
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002112 struct cifsFileInfo *smbfile = file->private_data;
2113 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Josef Bacik02c24a82011-07-16 20:44:56 -04002114 struct inode *inode = file->f_mapping->host;
2115
2116 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2117 if (rc)
2118 return rc;
2119 mutex_lock(&inode->i_mutex);
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002120
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002121 xid = get_xid();
Pavel Shilovsky8be7e6b2010-12-12 13:11:13 +03002122
2123 cFYI(1, "Sync file - name: %s datasync: 0x%x",
2124 file->f_path.dentry->d_name.name, datasync);
2125
2126 tcon = tlink_tcon(smbfile->tlink);
Pavel Shilovsky1d8c4c02012-09-18 16:20:27 -07002127 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2128 server = tcon->ses->server;
2129 if (server->ops->flush)
2130 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2131 else
2132 rc = -ENOSYS;
2133 }
Steve Frenchb298f222009-02-21 21:17:43 +00002134
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002135 free_xid(xid);
Josef Bacik02c24a82011-07-16 20:44:56 -04002136 mutex_unlock(&inode->i_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 return rc;
2138}
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140/*
2141 * As file closes, flush all cached write data for this inode checking
2142 * for write behind errors.
2143 */
Miklos Szeredi75e1fcc2006-06-23 02:05:12 -07002144int cifs_flush(struct file *file, fl_owner_t id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145{
Steve Frenchfb8c4b12007-07-10 01:16:18 +00002146 struct inode *inode = file->f_path.dentry->d_inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 int rc = 0;
2148
Jeff Laytoneb4b7562010-10-22 14:52:29 -04002149 if (file->f_mode & FMODE_WRITE)
Jeff Laytond3f13222010-10-15 15:34:07 -04002150 rc = filemap_write_and_wait(inode->i_mapping);
Steve French50c2f752007-07-13 00:33:32 +00002151
Joe Perchesb6b38f72010-04-21 03:50:45 +00002152 cFYI(1, "Flush inode %p file %p rc %d", inode, file, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
2154 return rc;
2155}
2156
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002157static int
2158cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2159{
2160 int rc = 0;
2161 unsigned long i;
2162
2163 for (i = 0; i < num_pages; i++) {
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002164 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002165 if (!pages[i]) {
2166 /*
2167 * save number of pages we have already allocated and
2168 * return with ENOMEM error
2169 */
2170 num_pages = i;
2171 rc = -ENOMEM;
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002172 break;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002173 }
2174 }
2175
Jeff Laytone94f7ba2012-03-23 14:40:56 -04002176 if (rc) {
2177 for (i = 0; i < num_pages; i++)
2178 put_page(pages[i]);
2179 }
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002180 return rc;
2181}
2182
2183static inline
2184size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2185{
2186 size_t num_pages;
2187 size_t clen;
2188
2189 clen = min_t(const size_t, len, wsize);
Jeff Laytona7103b92012-03-23 14:40:56 -04002190 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002191
2192 if (cur_len)
2193 *cur_len = clen;
2194
2195 return num_pages;
2196}
2197
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002198static void
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002199cifs_uncached_writev_complete(struct work_struct *work)
2200{
2201 int i;
2202 struct cifs_writedata *wdata = container_of(work,
2203 struct cifs_writedata, work);
2204 struct inode *inode = wdata->cfile->dentry->d_inode;
2205 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2206
2207 spin_lock(&inode->i_lock);
2208 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2209 if (cifsi->server_eof > inode->i_size)
2210 i_size_write(inode, cifsi->server_eof);
2211 spin_unlock(&inode->i_lock);
2212
2213 complete(&wdata->done);
2214
2215 if (wdata->result != -EAGAIN) {
2216 for (i = 0; i < wdata->nr_pages; i++)
2217 put_page(wdata->pages[i]);
2218 }
2219
2220 kref_put(&wdata->refcount, cifs_writedata_release);
2221}
2222
2223/* attempt to send write to server, retry on any -EAGAIN errors */
2224static int
2225cifs_uncached_retry_writev(struct cifs_writedata *wdata)
2226{
2227 int rc;
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002228 struct TCP_Server_Info *server;
2229
2230 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002231
2232 do {
2233 if (wdata->cfile->invalidHandle) {
2234 rc = cifs_reopen_file(wdata->cfile, false);
2235 if (rc != 0)
2236 continue;
2237 }
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002238 rc = server->ops->async_writev(wdata);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002239 } while (rc == -EAGAIN);
2240
2241 return rc;
2242}
2243
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002244static ssize_t
2245cifs_iovec_write(struct file *file, const struct iovec *iov,
2246 unsigned long nr_segs, loff_t *poffset)
2247{
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002248 unsigned long nr_pages, i;
Pavel Shilovsky76429c12011-01-31 16:03:08 +03002249 size_t copied, len, cur_len;
2250 ssize_t total_written = 0;
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002251 loff_t offset;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002252 struct iov_iter it;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002253 struct cifsFileInfo *open_file;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002254 struct cifs_tcon *tcon;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002255 struct cifs_sb_info *cifs_sb;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002256 struct cifs_writedata *wdata, *tmp;
2257 struct list_head wdata_list;
2258 int rc;
2259 pid_t pid;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002260
2261 len = iov_length(iov, nr_segs);
2262 if (!len)
2263 return 0;
2264
2265 rc = generic_write_checks(file, poffset, &len, 0);
2266 if (rc)
2267 return rc;
2268
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002269 INIT_LIST_HEAD(&wdata_list);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002270 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002271 open_file = file->private_data;
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002272 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyc9de5c82012-09-18 16:20:29 -07002273
2274 if (!tcon->ses->server->ops->async_writev)
2275 return -ENOSYS;
2276
Jeff Layton3af9d8f2012-04-13 17:16:59 -04002277 offset = *poffset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002278
2279 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2280 pid = open_file->pid;
2281 else
2282 pid = current->tgid;
2283
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002284 iov_iter_init(&it, iov, nr_segs, len, 0);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002285 do {
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002286 size_t save_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002287
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002288 nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
2289 wdata = cifs_writedata_alloc(nr_pages,
2290 cifs_uncached_writev_complete);
2291 if (!wdata) {
2292 rc = -ENOMEM;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002293 break;
2294 }
2295
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002296 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2297 if (rc) {
2298 kfree(wdata);
2299 break;
2300 }
2301
2302 save_len = cur_len;
2303 for (i = 0; i < nr_pages; i++) {
2304 copied = min_t(const size_t, cur_len, PAGE_SIZE);
2305 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2306 0, copied);
2307 cur_len -= copied;
2308 iov_iter_advance(&it, copied);
2309 }
2310 cur_len = save_len - cur_len;
2311
2312 wdata->sync_mode = WB_SYNC_ALL;
2313 wdata->nr_pages = nr_pages;
2314 wdata->offset = (__u64)offset;
2315 wdata->cfile = cifsFileInfo_get(open_file);
2316 wdata->pid = pid;
2317 wdata->bytes = cur_len;
Jeff Laytoneddb0792012-09-18 16:20:35 -07002318 wdata->pagesz = PAGE_SIZE;
2319 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002320 rc = cifs_uncached_retry_writev(wdata);
2321 if (rc) {
2322 kref_put(&wdata->refcount, cifs_writedata_release);
2323 break;
2324 }
2325
2326 list_add_tail(&wdata->list, &wdata_list);
2327 offset += cur_len;
2328 len -= cur_len;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002329 } while (len > 0);
2330
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002331 /*
2332 * If at least one write was successfully sent, then discard any rc
2333 * value from the later writes. If the other write succeeds, then
2334 * we'll end up returning whatever was written. If it fails, then
2335 * we'll get a new rc value from that.
2336 */
2337 if (!list_empty(&wdata_list))
2338 rc = 0;
2339
2340 /*
2341 * Wait for and collect replies for any successful sends in order of
2342 * increasing offset. Once an error is hit or we get a fatal signal
2343 * while waiting, then return without waiting for any more replies.
2344 */
2345restart_loop:
2346 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2347 if (!rc) {
2348 /* FIXME: freezable too? */
2349 rc = wait_for_completion_killable(&wdata->done);
2350 if (rc)
2351 rc = -EINTR;
2352 else if (wdata->result)
2353 rc = wdata->result;
2354 else
2355 total_written += wdata->bytes;
2356
2357 /* resend call if it's a retryable error */
2358 if (rc == -EAGAIN) {
2359 rc = cifs_uncached_retry_writev(wdata);
2360 goto restart_loop;
2361 }
2362 }
2363 list_del_init(&wdata->list);
2364 kref_put(&wdata->refcount, cifs_writedata_release);
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002365 }
2366
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002367 if (total_written > 0)
2368 *poffset += total_written;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002369
Jeff Laytonda82f7e2012-03-23 14:40:56 -04002370 cifs_stats_bytes_written(tcon, total_written);
2371 return total_written ? total_written : (ssize_t)rc;
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002372}
2373
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002374ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovsky72432ff2011-01-24 14:16:35 -05002375 unsigned long nr_segs, loff_t pos)
2376{
2377 ssize_t written;
2378 struct inode *inode;
2379
2380 inode = iocb->ki_filp->f_path.dentry->d_inode;
2381
2382 /*
2383 * BB - optimize the way when signing is disabled. We can drop this
2384 * extra memory-to-memory copying and use iovec buffers for constructing
2385 * write request.
2386 */
2387
2388 written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
2389 if (written > 0) {
2390 CIFS_I(inode)->invalid_mapping = true;
2391 iocb->ki_pos = pos;
2392 }
2393
2394 return written;
2395}
2396
2397ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2398 unsigned long nr_segs, loff_t pos)
2399{
2400 struct inode *inode;
2401
2402 inode = iocb->ki_filp->f_path.dentry->d_inode;
2403
2404 if (CIFS_I(inode)->clientCanCacheAll)
2405 return generic_file_aio_write(iocb, iov, nr_segs, pos);
2406
2407 /*
2408 * In strict cache mode we need to write the data to the server exactly
2409 * from the pos to pos+len-1 rather than flush all affected pages
2410 * because it may cause a error with mandatory locks on these pages but
2411 * not on the region from pos to ppos+len-1.
2412 */
2413
2414 return cifs_user_writev(iocb, iov, nr_segs, pos);
2415}
2416
Jeff Layton0471ca32012-05-16 07:13:16 -04002417static struct cifs_readdata *
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002418cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
Jeff Layton0471ca32012-05-16 07:13:16 -04002419{
2420 struct cifs_readdata *rdata;
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002421
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002422 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2423 GFP_KERNEL);
Jeff Layton0471ca32012-05-16 07:13:16 -04002424 if (rdata != NULL) {
Jeff Layton6993f742012-05-16 07:13:17 -04002425 kref_init(&rdata->refcount);
Jeff Layton1c892542012-05-16 07:13:17 -04002426 INIT_LIST_HEAD(&rdata->list);
2427 init_completion(&rdata->done);
Jeff Layton0471ca32012-05-16 07:13:16 -04002428 INIT_WORK(&rdata->work, complete);
Jeff Layton0471ca32012-05-16 07:13:16 -04002429 }
Jeff Laytonf4e49cd2012-09-18 16:20:36 -07002430
Jeff Layton0471ca32012-05-16 07:13:16 -04002431 return rdata;
2432}
2433
Jeff Layton6993f742012-05-16 07:13:17 -04002434void
2435cifs_readdata_release(struct kref *refcount)
Jeff Layton0471ca32012-05-16 07:13:16 -04002436{
Jeff Layton6993f742012-05-16 07:13:17 -04002437 struct cifs_readdata *rdata = container_of(refcount,
2438 struct cifs_readdata, refcount);
2439
2440 if (rdata->cfile)
2441 cifsFileInfo_put(rdata->cfile);
2442
Jeff Layton0471ca32012-05-16 07:13:16 -04002443 kfree(rdata);
2444}
2445
Jeff Layton2a1bb132012-05-16 07:13:17 -04002446static int
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002447cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
Jeff Layton1c892542012-05-16 07:13:17 -04002448{
2449 int rc = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002450 struct page *page;
Jeff Layton1c892542012-05-16 07:13:17 -04002451 unsigned int i;
2452
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002453 for (i = 0; i < nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002454 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2455 if (!page) {
2456 rc = -ENOMEM;
2457 break;
2458 }
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002459 rdata->pages[i] = page;
Jeff Layton1c892542012-05-16 07:13:17 -04002460 }
2461
2462 if (rc) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002463 for (i = 0; i < nr_pages; i++) {
2464 put_page(rdata->pages[i]);
2465 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002466 }
2467 }
2468 return rc;
2469}
2470
2471static void
2472cifs_uncached_readdata_release(struct kref *refcount)
2473{
Jeff Layton1c892542012-05-16 07:13:17 -04002474 struct cifs_readdata *rdata = container_of(refcount,
2475 struct cifs_readdata, refcount);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002476 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002477
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002478 for (i = 0; i < rdata->nr_pages; i++) {
2479 put_page(rdata->pages[i]);
2480 rdata->pages[i] = NULL;
Jeff Layton1c892542012-05-16 07:13:17 -04002481 }
2482 cifs_readdata_release(refcount);
2483}
2484
2485static int
Jeff Layton2a1bb132012-05-16 07:13:17 -04002486cifs_retry_async_readv(struct cifs_readdata *rdata)
2487{
2488 int rc;
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002489 struct TCP_Server_Info *server;
2490
2491 server = tlink_tcon(rdata->cfile->tlink)->ses->server;
Jeff Layton2a1bb132012-05-16 07:13:17 -04002492
2493 do {
2494 if (rdata->cfile->invalidHandle) {
2495 rc = cifs_reopen_file(rdata->cfile, true);
2496 if (rc != 0)
2497 continue;
2498 }
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002499 rc = server->ops->async_readv(rdata);
Jeff Layton2a1bb132012-05-16 07:13:17 -04002500 } while (rc == -EAGAIN);
2501
2502 return rc;
2503}
2504
Jeff Layton1c892542012-05-16 07:13:17 -04002505/**
2506 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2507 * @rdata: the readdata response with list of pages holding data
2508 * @iov: vector in which we should copy the data
2509 * @nr_segs: number of segments in vector
2510 * @offset: offset into file of the first iovec
2511 * @copied: used to return the amount of data copied to the iov
2512 *
2513 * This function copies data from a list of pages in a readdata response into
2514 * an array of iovecs. It will first calculate where the data should go
2515 * based on the info in the readdata and then copy the data into that spot.
2516 */
2517static ssize_t
2518cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2519 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2520{
2521 int rc = 0;
2522 struct iov_iter ii;
2523 size_t pos = rdata->offset - offset;
Jeff Layton1c892542012-05-16 07:13:17 -04002524 ssize_t remaining = rdata->bytes;
2525 unsigned char *pdata;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002526 unsigned int i;
Jeff Layton1c892542012-05-16 07:13:17 -04002527
2528 /* set up iov_iter and advance to the correct offset */
2529 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2530 iov_iter_advance(&ii, pos);
2531
2532 *copied = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002533 for (i = 0; i < rdata->nr_pages; i++) {
Jeff Layton1c892542012-05-16 07:13:17 -04002534 ssize_t copy;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002535 struct page *page = rdata->pages[i];
Jeff Layton1c892542012-05-16 07:13:17 -04002536
2537 /* copy a whole page or whatever's left */
2538 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2539
2540 /* ...but limit it to whatever space is left in the iov */
2541 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2542
2543 /* go while there's data to be copied and no errors */
2544 if (copy && !rc) {
2545 pdata = kmap(page);
2546 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2547 (int)copy);
2548 kunmap(page);
2549 if (!rc) {
2550 *copied += copy;
2551 remaining -= copy;
2552 iov_iter_advance(&ii, copy);
2553 }
2554 }
Jeff Layton1c892542012-05-16 07:13:17 -04002555 }
2556
2557 return rc;
2558}
2559
2560static void
2561cifs_uncached_readv_complete(struct work_struct *work)
2562{
2563 struct cifs_readdata *rdata = container_of(work,
2564 struct cifs_readdata, work);
Jeff Layton1c892542012-05-16 07:13:17 -04002565
2566 complete(&rdata->done);
2567 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2568}
2569
2570static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002571cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2572 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton1c892542012-05-16 07:13:17 -04002573{
Jeff Layton8321fec2012-09-19 06:22:32 -07002574 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002575 unsigned int i;
2576 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002577 struct kvec iov;
Jeff Layton1c892542012-05-16 07:13:17 -04002578
Jeff Layton8321fec2012-09-19 06:22:32 -07002579 rdata->tailsz = PAGE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002580 for (i = 0; i < nr_pages; i++) {
2581 struct page *page = rdata->pages[i];
2582
Jeff Layton8321fec2012-09-19 06:22:32 -07002583 if (len >= PAGE_SIZE) {
Jeff Layton1c892542012-05-16 07:13:17 -04002584 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002585 iov.iov_base = kmap(page);
2586 iov.iov_len = PAGE_SIZE;
2587 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2588 i, iov.iov_base, iov.iov_len);
2589 len -= PAGE_SIZE;
2590 } else if (len > 0) {
Jeff Layton1c892542012-05-16 07:13:17 -04002591 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002592 iov.iov_base = kmap(page);
2593 iov.iov_len = len;
2594 cFYI(1, "%u: iov_base=%p iov_len=%zu",
2595 i, iov.iov_base, iov.iov_len);
2596 memset(iov.iov_base + len, '\0', PAGE_SIZE - len);
2597 rdata->tailsz = len;
2598 len = 0;
Jeff Layton1c892542012-05-16 07:13:17 -04002599 } else {
2600 /* no need to hold page hostage */
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002601 rdata->pages[i] = NULL;
2602 rdata->nr_pages--;
Jeff Layton1c892542012-05-16 07:13:17 -04002603 put_page(page);
Jeff Layton8321fec2012-09-19 06:22:32 -07002604 continue;
Jeff Layton1c892542012-05-16 07:13:17 -04002605 }
Jeff Layton8321fec2012-09-19 06:22:32 -07002606
2607 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
2608 kunmap(page);
2609 if (result < 0)
2610 break;
2611
2612 total_read += result;
Jeff Layton1c892542012-05-16 07:13:17 -04002613 }
2614
Jeff Layton8321fec2012-09-19 06:22:32 -07002615 return total_read > 0 ? total_read : result;
Jeff Layton1c892542012-05-16 07:13:17 -04002616}
2617
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002618static ssize_t
2619cifs_iovec_read(struct file *file, const struct iovec *iov,
2620 unsigned long nr_segs, loff_t *poffset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621{
Jeff Layton1c892542012-05-16 07:13:17 -04002622 ssize_t rc;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002623 size_t len, cur_len;
Jeff Layton1c892542012-05-16 07:13:17 -04002624 ssize_t total_read = 0;
2625 loff_t offset = *poffset;
2626 unsigned int npages;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 struct cifs_sb_info *cifs_sb;
Jeff Layton1c892542012-05-16 07:13:17 -04002628 struct cifs_tcon *tcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 struct cifsFileInfo *open_file;
Jeff Layton1c892542012-05-16 07:13:17 -04002630 struct cifs_readdata *rdata, *tmp;
2631 struct list_head rdata_list;
2632 pid_t pid;
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002633
2634 if (!nr_segs)
2635 return 0;
2636
2637 len = iov_length(iov, nr_segs);
2638 if (!len)
2639 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640
Jeff Layton1c892542012-05-16 07:13:17 -04002641 INIT_LIST_HEAD(&rdata_list);
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002642 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Joe Perchesc21dfb62010-07-12 13:50:14 -07002643 open_file = file->private_data;
Jeff Layton1c892542012-05-16 07:13:17 -04002644 tcon = tlink_tcon(open_file->tlink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645
Pavel Shilovskyfc9c5962012-09-18 16:20:28 -07002646 if (!tcon->ses->server->ops->async_readv)
2647 return -ENOSYS;
2648
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2650 pid = open_file->pid;
2651 else
2652 pid = current->tgid;
2653
Steve Frenchad7a2922008-02-07 23:25:02 +00002654 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002655 cFYI(1, "attempting read on write only file instance");
Steve Frenchad7a2922008-02-07 23:25:02 +00002656
Jeff Layton1c892542012-05-16 07:13:17 -04002657 do {
2658 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2659 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002660
Jeff Layton1c892542012-05-16 07:13:17 -04002661 /* allocate a readdata struct */
2662 rdata = cifs_readdata_alloc(npages,
2663 cifs_uncached_readv_complete);
2664 if (!rdata) {
2665 rc = -ENOMEM;
2666 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002668
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002669 rc = cifs_read_allocate_pages(rdata, npages);
Jeff Layton1c892542012-05-16 07:13:17 -04002670 if (rc)
2671 goto error;
2672
2673 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002674 rdata->nr_pages = npages;
Jeff Layton1c892542012-05-16 07:13:17 -04002675 rdata->offset = offset;
2676 rdata->bytes = cur_len;
2677 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07002678 rdata->pagesz = PAGE_SIZE;
2679 rdata->read_into_pages = cifs_uncached_read_into_pages;
Jeff Layton1c892542012-05-16 07:13:17 -04002680
2681 rc = cifs_retry_async_readv(rdata);
2682error:
2683 if (rc) {
2684 kref_put(&rdata->refcount,
2685 cifs_uncached_readdata_release);
2686 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 }
Jeff Layton1c892542012-05-16 07:13:17 -04002688
2689 list_add_tail(&rdata->list, &rdata_list);
2690 offset += cur_len;
2691 len -= cur_len;
2692 } while (len > 0);
2693
2694 /* if at least one read request send succeeded, then reset rc */
2695 if (!list_empty(&rdata_list))
2696 rc = 0;
2697
2698 /* the loop below should proceed in the order of increasing offsets */
2699restart_loop:
2700 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2701 if (!rc) {
2702 ssize_t copied;
2703
2704 /* FIXME: freezable sleep too? */
2705 rc = wait_for_completion_killable(&rdata->done);
2706 if (rc)
2707 rc = -EINTR;
2708 else if (rdata->result)
2709 rc = rdata->result;
2710 else {
2711 rc = cifs_readdata_to_iov(rdata, iov,
2712 nr_segs, *poffset,
2713 &copied);
2714 total_read += copied;
2715 }
2716
2717 /* resend call if it's a retryable error */
2718 if (rc == -EAGAIN) {
2719 rc = cifs_retry_async_readv(rdata);
2720 goto restart_loop;
2721 }
2722 }
2723 list_del_init(&rdata->list);
2724 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 }
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002726
Jeff Layton1c892542012-05-16 07:13:17 -04002727 cifs_stats_bytes_read(tcon, total_read);
2728 *poffset += total_read;
2729
Pavel Shilovsky09a47072012-09-18 16:20:29 -07002730 /* mask nodata case */
2731 if (rc == -ENODATA)
2732 rc = 0;
2733
Jeff Layton1c892542012-05-16 07:13:17 -04002734 return total_read ? total_read : rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735}
2736
Pavel Shilovsky0b81c1c2011-03-10 10:11:05 +03002737ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
Pavel Shilovskya70307e2010-12-14 11:50:41 +03002738 unsigned long nr_segs, loff_t pos)
2739{
2740 ssize_t read;
2741
2742 read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
2743 if (read > 0)
2744 iocb->ki_pos = pos;
2745
2746 return read;
2747}
2748
2749ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
2750 unsigned long nr_segs, loff_t pos)
2751{
2752 struct inode *inode;
2753
2754 inode = iocb->ki_filp->f_path.dentry->d_inode;
2755
2756 if (CIFS_I(inode)->clientCanCacheRead)
2757 return generic_file_aio_read(iocb, iov, nr_segs, pos);
2758
2759 /*
2760 * In strict cache mode we need to read from the server all the time
2761 * if we don't have level II oplock because the server can delay mtime
2762 * change - so we can't make a decision about inode invalidating.
2763 * And we can also fail with pagereading if there are mandatory locks
2764 * on pages affected by this read but not on the region from pos to
2765 * pos+len-1.
2766 */
2767
2768 return cifs_user_readv(iocb, iov, nr_segs, pos);
2769}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002771static ssize_t
2772cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773{
2774 int rc = -EACCES;
2775 unsigned int bytes_read = 0;
2776 unsigned int total_read;
2777 unsigned int current_read_size;
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002778 unsigned int rsize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 struct cifs_sb_info *cifs_sb;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002780 struct cifs_tcon *tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002781 struct TCP_Server_Info *server;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002782 unsigned int xid;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002783 char *cur_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 struct cifsFileInfo *open_file;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002785 struct cifs_io_parms io_parms;
Steve Frenchec637e32005-12-12 20:53:18 -08002786 int buf_type = CIFS_NO_BUFFER;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002787 __u32 pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002789 xid = get_xid();
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08002790 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002792 /* FIXME: set up handlers for larger reads and/or convert to async */
2793 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2794
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302796 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002797 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05302798 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 }
Joe Perchesc21dfb62010-07-12 13:50:14 -07002800 open_file = file->private_data;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002801 tcon = tlink_tcon(open_file->tlink);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002802 server = tcon->ses->server;
2803
2804 if (!server->ops->sync_read) {
2805 free_xid(xid);
2806 return -ENOSYS;
2807 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002809 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2810 pid = open_file->pid;
2811 else
2812 pid = current->tgid;
2813
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
Joe Perchesb6b38f72010-04-21 03:50:45 +00002815 cFYI(1, "attempting read on write only file instance");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002817 for (total_read = 0, cur_offset = read_data; read_size > total_read;
2818 total_read += bytes_read, cur_offset += bytes_read) {
Jeff Layton5eba8ab2011-10-19 15:30:26 -04002819 current_read_size = min_t(uint, read_size - total_read, rsize);
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002820 /*
2821 * For windows me and 9x we do not want to request more than it
2822 * negotiated since it will refuse the read then.
2823 */
2824 if ((tcon->ses) && !(tcon->ses->capabilities &
2825 tcon->ses->server->vals->cap_large_files)) {
Dan Carpenter7748dd62011-10-18 12:41:35 +03002826 current_read_size = min_t(uint, current_read_size,
Jeff Laytonc974bef2011-10-11 06:41:32 -04002827 CIFSMaxBufSize);
Steve Frenchf9f5c8172005-09-15 23:06:38 -07002828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 rc = -EAGAIN;
2830 while (rc == -EAGAIN) {
Steve Frenchcdff08e2010-10-21 22:46:14 +00002831 if (open_file->invalidHandle) {
Jeff Layton15886172010-10-15 15:33:59 -04002832 rc = cifs_reopen_file(open_file, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 if (rc != 0)
2834 break;
2835 }
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002836 io_parms.pid = pid;
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002837 io_parms.tcon = tcon;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002838 io_parms.offset = *offset;
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00002839 io_parms.length = current_read_size;
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002840 rc = server->ops->sync_read(xid, open_file, &io_parms,
2841 &bytes_read, &cur_offset,
2842 &buf_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 }
2844 if (rc || (bytes_read == 0)) {
2845 if (total_read) {
2846 break;
2847 } else {
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002848 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 return rc;
2850 }
2851 } else {
Pavel Shilovsky29e20f92012-07-13 13:58:14 +04002852 cifs_stats_bytes_read(tcon, total_read);
Pavel Shilovskyf9c6e232012-09-18 16:20:29 -07002853 *offset += bytes_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 }
2855 }
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002856 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 return total_read;
2858}
2859
Jeff Laytonca83ce32011-04-12 09:13:44 -04002860/*
2861 * If the page is mmap'ed into a process' page tables, then we need to make
2862 * sure that it doesn't change while being written back.
2863 */
2864static int
2865cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2866{
2867 struct page *page = vmf->page;
2868
2869 lock_page(page);
2870 return VM_FAULT_LOCKED;
2871}
2872
2873static struct vm_operations_struct cifs_file_vm_ops = {
2874 .fault = filemap_fault,
2875 .page_mkwrite = cifs_page_mkwrite,
2876};
2877
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002878int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2879{
2880 int rc, xid;
2881 struct inode *inode = file->f_path.dentry->d_inode;
2882
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002883 xid = get_xid();
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002884
Pavel Shilovsky6feb9892011-04-07 18:18:11 +04002885 if (!CIFS_I(inode)->clientCanCacheRead) {
2886 rc = cifs_invalidate_mapping(inode);
2887 if (rc)
2888 return rc;
2889 }
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002890
2891 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002892 if (rc == 0)
2893 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002894 free_xid(xid);
Pavel Shilovsky7a6a19b2010-12-14 11:29:51 +03002895 return rc;
2896}
2897
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2899{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 int rc, xid;
2901
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002902 xid = get_xid();
Jeff Laytonabab0952010-02-12 07:44:18 -05002903 rc = cifs_revalidate_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 if (rc) {
Joe Perchesb6b38f72010-04-21 03:50:45 +00002905 cFYI(1, "Validation prior to mmap failed, error=%d", rc);
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002906 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 return rc;
2908 }
2909 rc = generic_file_mmap(file, vma);
Jeff Laytonca83ce32011-04-12 09:13:44 -04002910 if (rc == 0)
2911 vma->vm_ops = &cifs_file_vm_ops;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04002912 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 return rc;
2914}
2915
Jeff Layton0471ca32012-05-16 07:13:16 -04002916static void
2917cifs_readv_complete(struct work_struct *work)
2918{
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002919 unsigned int i;
Jeff Layton0471ca32012-05-16 07:13:16 -04002920 struct cifs_readdata *rdata = container_of(work,
2921 struct cifs_readdata, work);
Jeff Layton0471ca32012-05-16 07:13:16 -04002922
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002923 for (i = 0; i < rdata->nr_pages; i++) {
2924 struct page *page = rdata->pages[i];
2925
Jeff Layton0471ca32012-05-16 07:13:16 -04002926 lru_cache_add_file(page);
2927
2928 if (rdata->result == 0) {
Jeff Layton0471ca32012-05-16 07:13:16 -04002929 flush_dcache_page(page);
2930 SetPageUptodate(page);
2931 }
2932
2933 unlock_page(page);
2934
2935 if (rdata->result == 0)
2936 cifs_readpage_to_fscache(rdata->mapping->host, page);
2937
2938 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002939 rdata->pages[i] = NULL;
Jeff Layton0471ca32012-05-16 07:13:16 -04002940 }
Jeff Layton6993f742012-05-16 07:13:17 -04002941 kref_put(&rdata->refcount, cifs_readdata_release);
Jeff Layton0471ca32012-05-16 07:13:16 -04002942}
2943
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002944static int
Jeff Layton8321fec2012-09-19 06:22:32 -07002945cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
2946 struct cifs_readdata *rdata, unsigned int len)
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002947{
Jeff Layton8321fec2012-09-19 06:22:32 -07002948 int total_read = 0, result = 0;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002949 unsigned int i;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002950 u64 eof;
2951 pgoff_t eof_index;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002952 unsigned int nr_pages = rdata->nr_pages;
Jeff Layton8321fec2012-09-19 06:22:32 -07002953 struct kvec iov;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002954
2955 /* determine the eof that the server (probably) has */
2956 eof = CIFS_I(rdata->mapping->host)->server_eof;
2957 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2958 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2959
Jeff Layton8321fec2012-09-19 06:22:32 -07002960 rdata->tailsz = PAGE_CACHE_SIZE;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002961 for (i = 0; i < nr_pages; i++) {
2962 struct page *page = rdata->pages[i];
2963
Jeff Layton8321fec2012-09-19 06:22:32 -07002964 if (len >= PAGE_CACHE_SIZE) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002965 /* enough data to fill the page */
Jeff Layton8321fec2012-09-19 06:22:32 -07002966 iov.iov_base = kmap(page);
2967 iov.iov_len = PAGE_CACHE_SIZE;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002968 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07002969 i, page->index, iov.iov_base, iov.iov_len);
2970 len -= PAGE_CACHE_SIZE;
2971 } else if (len > 0) {
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002972 /* enough for partial page, fill and zero the rest */
Jeff Layton8321fec2012-09-19 06:22:32 -07002973 iov.iov_base = kmap(page);
2974 iov.iov_len = len;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002975 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
Jeff Layton8321fec2012-09-19 06:22:32 -07002976 i, page->index, iov.iov_base, iov.iov_len);
2977 memset(iov.iov_base + len,
2978 '\0', PAGE_CACHE_SIZE - len);
2979 rdata->tailsz = len;
2980 len = 0;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002981 } else if (page->index > eof_index) {
2982 /*
2983 * The VFS will not try to do readahead past the
2984 * i_size, but it's possible that we have outstanding
2985 * writes with gaps in the middle and the i_size hasn't
2986 * caught up yet. Populate those with zeroed out pages
2987 * to prevent the VFS from repeatedly attempting to
2988 * fill them until the writes are flushed.
2989 */
2990 zero_user(page, 0, PAGE_CACHE_SIZE);
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002991 lru_cache_add_file(page);
2992 flush_dcache_page(page);
2993 SetPageUptodate(page);
2994 unlock_page(page);
2995 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07002996 rdata->pages[i] = NULL;
2997 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07002998 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04002999 } else {
3000 /* no need to hold page hostage */
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003001 lru_cache_add_file(page);
3002 unlock_page(page);
3003 page_cache_release(page);
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003004 rdata->pages[i] = NULL;
3005 rdata->nr_pages--;
Jeff Layton8321fec2012-09-19 06:22:32 -07003006 continue;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003007 }
Jeff Layton8321fec2012-09-19 06:22:32 -07003008
3009 result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len);
3010 kunmap(page);
3011 if (result < 0)
3012 break;
3013
3014 total_read += result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003015 }
3016
Jeff Layton8321fec2012-09-19 06:22:32 -07003017 return total_read > 0 ? total_read : result;
Jeff Layton8d5ce4d2012-05-16 07:13:16 -04003018}
3019
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020static int cifs_readpages(struct file *file, struct address_space *mapping,
3021 struct list_head *page_list, unsigned num_pages)
3022{
Jeff Layton690c5e32011-10-19 15:30:16 -04003023 int rc;
3024 struct list_head tmplist;
3025 struct cifsFileInfo *open_file = file->private_data;
3026 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
3027 unsigned int rsize = cifs_sb->rsize;
3028 pid_t pid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029
Jeff Layton690c5e32011-10-19 15:30:16 -04003030 /*
3031 * Give up immediately if rsize is too small to read an entire page.
3032 * The VFS will fall back to readpage. We should never reach this
3033 * point however since we set ra_pages to 0 when the rsize is smaller
3034 * than a cache page.
3035 */
3036 if (unlikely(rsize < PAGE_CACHE_SIZE))
3037 return 0;
Steve Frenchbfa0d752005-08-31 21:50:37 -07003038
Suresh Jayaraman56698232010-07-05 18:13:25 +05303039 /*
3040 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3041 * immediately if the cookie is negative
3042 */
3043 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3044 &num_pages);
3045 if (rc == 0)
Jeff Layton690c5e32011-10-19 15:30:16 -04003046 return rc;
Suresh Jayaraman56698232010-07-05 18:13:25 +05303047
Pavel Shilovskyd4ffff12011-05-26 06:02:00 +00003048 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3049 pid = open_file->pid;
3050 else
3051 pid = current->tgid;
3052
Jeff Layton690c5e32011-10-19 15:30:16 -04003053 rc = 0;
3054 INIT_LIST_HEAD(&tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055
Jeff Layton690c5e32011-10-19 15:30:16 -04003056 cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
3057 mapping, num_pages);
3058
3059 /*
3060 * Start with the page at end of list and move it to private
3061 * list. Do the same with any following pages until we hit
3062 * the rsize limit, hit an index discontinuity, or run out of
3063 * pages. Issue the async read and then start the loop again
3064 * until the list is empty.
3065 *
3066 * Note that list order is important. The page_list is in
3067 * the order of declining indexes. When we put the pages in
3068 * the rdata->pages, then we want them in increasing order.
3069 */
3070 while (!list_empty(page_list)) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003071 unsigned int i;
Jeff Layton690c5e32011-10-19 15:30:16 -04003072 unsigned int bytes = PAGE_CACHE_SIZE;
3073 unsigned int expected_index;
3074 unsigned int nr_pages = 1;
3075 loff_t offset;
3076 struct page *page, *tpage;
3077 struct cifs_readdata *rdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078
3079 page = list_entry(page_list->prev, struct page, lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080
Jeff Layton690c5e32011-10-19 15:30:16 -04003081 /*
3082 * Lock the page and put it in the cache. Since no one else
3083 * should have access to this page, we're safe to simply set
3084 * PG_locked without checking it first.
3085 */
3086 __set_page_locked(page);
3087 rc = add_to_page_cache_locked(page, mapping,
3088 page->index, GFP_KERNEL);
3089
3090 /* give up if we can't stick it in the cache */
3091 if (rc) {
3092 __clear_page_locked(page);
3093 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003095
Jeff Layton690c5e32011-10-19 15:30:16 -04003096 /* move first page to the tmplist */
3097 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3098 list_move_tail(&page->lru, &tmplist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099
Jeff Layton690c5e32011-10-19 15:30:16 -04003100 /* now try and add more pages onto the request */
3101 expected_index = page->index + 1;
3102 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3103 /* discontinuity ? */
3104 if (page->index != expected_index)
3105 break;
3106
3107 /* would this page push the read over the rsize? */
3108 if (bytes + PAGE_CACHE_SIZE > rsize)
3109 break;
3110
3111 __set_page_locked(page);
3112 if (add_to_page_cache_locked(page, mapping,
3113 page->index, GFP_KERNEL)) {
3114 __clear_page_locked(page);
3115 break;
3116 }
3117 list_move_tail(&page->lru, &tmplist);
3118 bytes += PAGE_CACHE_SIZE;
3119 expected_index++;
3120 nr_pages++;
3121 }
3122
Jeff Layton0471ca32012-05-16 07:13:16 -04003123 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
Jeff Layton690c5e32011-10-19 15:30:16 -04003124 if (!rdata) {
3125 /* best to give up if we're out of mem */
3126 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3127 list_del(&page->lru);
3128 lru_cache_add_file(page);
3129 unlock_page(page);
3130 page_cache_release(page);
3131 }
3132 rc = -ENOMEM;
3133 break;
3134 }
3135
Jeff Layton6993f742012-05-16 07:13:17 -04003136 rdata->cfile = cifsFileInfo_get(open_file);
Jeff Layton690c5e32011-10-19 15:30:16 -04003137 rdata->mapping = mapping;
3138 rdata->offset = offset;
3139 rdata->bytes = bytes;
3140 rdata->pid = pid;
Jeff Layton8321fec2012-09-19 06:22:32 -07003141 rdata->pagesz = PAGE_CACHE_SIZE;
3142 rdata->read_into_pages = cifs_readpages_read_into_pages;
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003143
3144 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3145 list_del(&page->lru);
3146 rdata->pages[rdata->nr_pages++] = page;
3147 }
Jeff Layton690c5e32011-10-19 15:30:16 -04003148
Jeff Layton2a1bb132012-05-16 07:13:17 -04003149 rc = cifs_retry_async_readv(rdata);
Jeff Layton690c5e32011-10-19 15:30:16 -04003150 if (rc != 0) {
Jeff Laytonc5fab6f2012-09-19 06:22:30 -07003151 for (i = 0; i < rdata->nr_pages; i++) {
3152 page = rdata->pages[i];
Jeff Layton690c5e32011-10-19 15:30:16 -04003153 lru_cache_add_file(page);
3154 unlock_page(page);
3155 page_cache_release(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 }
Jeff Layton6993f742012-05-16 07:13:17 -04003157 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 break;
3159 }
Jeff Layton6993f742012-05-16 07:13:17 -04003160
3161 kref_put(&rdata->refcount, cifs_readdata_release);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162 }
3163
Linus Torvalds1da177e2005-04-16 15:20:36 -07003164 return rc;
3165}
3166
3167static int cifs_readpage_worker(struct file *file, struct page *page,
3168 loff_t *poffset)
3169{
3170 char *read_data;
3171 int rc;
3172
Suresh Jayaraman56698232010-07-05 18:13:25 +05303173 /* Is the page cached? */
3174 rc = cifs_readpage_from_fscache(file->f_path.dentry->d_inode, page);
3175 if (rc == 0)
3176 goto read_complete;
3177
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 page_cache_get(page);
3179 read_data = kmap(page);
3180 /* for reads over a certain size could initiate async read ahead */
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003181
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003183
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 if (rc < 0)
3185 goto io_error;
3186 else
Joe Perchesb6b38f72010-04-21 03:50:45 +00003187 cFYI(1, "Bytes read %d", rc);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003188
Josef "Jeff" Sipeke6a00292006-12-08 02:36:48 -08003189 file->f_path.dentry->d_inode->i_atime =
3190 current_fs_time(file->f_path.dentry->d_inode->i_sb);
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003191
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 if (PAGE_CACHE_SIZE > rc)
3193 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
3194
3195 flush_dcache_page(page);
3196 SetPageUptodate(page);
Suresh Jayaraman9dc06552010-07-05 18:13:11 +05303197
3198 /* send this page to the cache */
3199 cifs_readpage_to_fscache(file->f_path.dentry->d_inode, page);
3200
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 rc = 0;
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003202
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203io_error:
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003204 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 page_cache_release(page);
Suresh Jayaraman56698232010-07-05 18:13:25 +05303206
3207read_complete:
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 return rc;
3209}
3210
3211static int cifs_readpage(struct file *file, struct page *page)
3212{
3213 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
3214 int rc = -EACCES;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003215 unsigned int xid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003217 xid = get_xid();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
3219 if (file->private_data == NULL) {
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303220 rc = -EBADF;
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003221 free_xid(xid);
Suresh Jayaraman0f3bc092009-06-25 18:12:34 +05303222 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 }
3224
Jeff Laytonac3aa2f2012-07-23 13:14:28 -04003225 cFYI(1, "readpage %p at offset %d 0x%x",
Joe Perchesb6b38f72010-04-21 03:50:45 +00003226 page, (int)offset, (int)offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227
3228 rc = cifs_readpage_worker(file, page, &offset);
3229
3230 unlock_page(page);
3231
Pavel Shilovsky6d5786a2012-06-20 11:21:16 +04003232 free_xid(xid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 return rc;
3234}
3235
Steve Frencha403a0a2007-07-26 15:54:16 +00003236static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3237{
3238 struct cifsFileInfo *open_file;
3239
Jeff Layton44772882010-10-15 15:34:03 -04003240 spin_lock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003241 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
Jeff Layton2e396b82010-10-15 15:34:01 -04003242 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
Jeff Layton44772882010-10-15 15:34:03 -04003243 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003244 return 1;
3245 }
3246 }
Jeff Layton44772882010-10-15 15:34:03 -04003247 spin_unlock(&cifs_file_list_lock);
Steve Frencha403a0a2007-07-26 15:54:16 +00003248 return 0;
3249}
3250
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251/* We do not want to update the file size from server for inodes
3252 open for write - to avoid races with writepage extending
3253 the file - in the future we could consider allowing
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003254 refreshing the inode only on increases in the file size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 but this is tricky to do without racing with writebehind
3256 page caching in the current Linux kernel design */
Steve French4b18f2a2008-04-29 00:06:05 +00003257bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258{
Steve Frencha403a0a2007-07-26 15:54:16 +00003259 if (!cifsInode)
Steve French4b18f2a2008-04-29 00:06:05 +00003260 return true;
Steve French23e7dd72005-10-20 13:44:56 -07003261
Steve Frencha403a0a2007-07-26 15:54:16 +00003262 if (is_inode_writable(cifsInode)) {
3263 /* This inode is open for write at least once */
Steve Frenchc32a0b62006-01-12 14:41:28 -08003264 struct cifs_sb_info *cifs_sb;
3265
Steve Frenchc32a0b62006-01-12 14:41:28 -08003266 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
Steve Frenchad7a2922008-02-07 23:25:02 +00003267 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003268 /* since no page cache to corrupt on directio
Steve Frenchc32a0b62006-01-12 14:41:28 -08003269 we can change size safely */
Steve French4b18f2a2008-04-29 00:06:05 +00003270 return true;
Steve Frenchc32a0b62006-01-12 14:41:28 -08003271 }
3272
Steve Frenchfb8c4b12007-07-10 01:16:18 +00003273 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
Steve French4b18f2a2008-04-29 00:06:05 +00003274 return true;
Steve French7ba52632007-02-08 18:14:13 +00003275
Steve French4b18f2a2008-04-29 00:06:05 +00003276 return false;
Steve French23e7dd72005-10-20 13:44:56 -07003277 } else
Steve French4b18f2a2008-04-29 00:06:05 +00003278 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279}
3280
Nick Piggind9414772008-09-24 11:32:59 -04003281static int cifs_write_begin(struct file *file, struct address_space *mapping,
3282 loff_t pos, unsigned len, unsigned flags,
3283 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284{
Nick Piggind9414772008-09-24 11:32:59 -04003285 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
3286 loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003287 loff_t page_start = pos & PAGE_MASK;
3288 loff_t i_size;
3289 struct page *page;
3290 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
Joe Perchesb6b38f72010-04-21 03:50:45 +00003292 cFYI(1, "write_begin from %lld len %d", (long long)pos, len);
Nick Piggind9414772008-09-24 11:32:59 -04003293
Nick Piggin54566b22009-01-04 12:00:53 -08003294 page = grab_cache_page_write_begin(mapping, index, flags);
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003295 if (!page) {
3296 rc = -ENOMEM;
3297 goto out;
3298 }
Nick Piggind9414772008-09-24 11:32:59 -04003299
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003300 if (PageUptodate(page))
3301 goto out;
Steve French8a236262007-03-06 00:31:00 +00003302
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003303 /*
3304 * If we write a full page it will be up to date, no need to read from
3305 * the server. If the write is short, we'll end up doing a sync write
3306 * instead.
3307 */
3308 if (len == PAGE_CACHE_SIZE)
3309 goto out;
3310
3311 /*
3312 * optimize away the read when we have an oplock, and we're not
3313 * expecting to use any of the data we'd be reading in. That
3314 * is, when the page lies beyond the EOF, or straddles the EOF
3315 * and the write will cover all of the existing data.
3316 */
3317 if (CIFS_I(mapping->host)->clientCanCacheRead) {
3318 i_size = i_size_read(mapping->host);
3319 if (page_start >= i_size ||
3320 (offset == 0 && (pos + len) >= i_size)) {
3321 zero_user_segments(page, 0, offset,
3322 offset + len,
3323 PAGE_CACHE_SIZE);
3324 /*
3325 * PageChecked means that the parts of the page
3326 * to which we're not writing are considered up
3327 * to date. Once the data is copied to the
3328 * page, it can be set uptodate.
3329 */
3330 SetPageChecked(page);
3331 goto out;
3332 }
3333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334
Nick Piggind9414772008-09-24 11:32:59 -04003335 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003336 /*
3337 * might as well read a page, it is fast enough. If we get
3338 * an error, we don't need to return it. cifs_write_end will
3339 * do a sync write instead since PG_uptodate isn't set.
3340 */
3341 cifs_readpage_worker(file, page, &page_start);
Steve French8a236262007-03-06 00:31:00 +00003342 } else {
3343 /* we could try using another file handle if there is one -
3344 but how would we lock it to prevent close of that handle
3345 racing with this read? In any case
Nick Piggind9414772008-09-24 11:32:59 -04003346 this will be written out by write_end so is fine */
Steve French8a236262007-03-06 00:31:00 +00003347 }
Jeff Laytona98ee8c2008-11-26 19:32:33 +00003348out:
3349 *pagep = page;
3350 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351}
3352
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303353static int cifs_release_page(struct page *page, gfp_t gfp)
3354{
3355 if (PagePrivate(page))
3356 return 0;
3357
3358 return cifs_fscache_release_page(page, gfp);
3359}
3360
3361static void cifs_invalidate_page(struct page *page, unsigned long offset)
3362{
3363 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3364
3365 if (offset == 0)
3366 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3367}
3368
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003369static int cifs_launder_page(struct page *page)
3370{
3371 int rc = 0;
3372 loff_t range_start = page_offset(page);
3373 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
3374 struct writeback_control wbc = {
3375 .sync_mode = WB_SYNC_ALL,
3376 .nr_to_write = 0,
3377 .range_start = range_start,
3378 .range_end = range_end,
3379 };
3380
3381 cFYI(1, "Launder page: %p", page);
3382
3383 if (clear_page_dirty_for_io(page))
3384 rc = cifs_writepage_locked(page, &wbc);
3385
3386 cifs_fscache_invalidate_page(page, page->mapping->host);
3387 return rc;
3388}
3389
Tejun Heo9b646972010-07-20 22:09:02 +02003390void cifs_oplock_break(struct work_struct *work)
Jeff Layton3bc303c2009-09-21 06:47:50 -04003391{
3392 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3393 oplock_break);
Jeff Laytona5e18bc2010-10-11 15:07:18 -04003394 struct inode *inode = cfile->dentry->d_inode;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003395 struct cifsInodeInfo *cinode = CIFS_I(inode);
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003396 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003397 int rc = 0;
Jeff Layton3bc303c2009-09-21 06:47:50 -04003398
3399 if (inode && S_ISREG(inode->i_mode)) {
Steve Frenchd54ff732010-04-27 04:38:15 +00003400 if (cinode->clientCanCacheRead)
Al Viro8737c932009-12-24 06:47:55 -05003401 break_lease(inode, O_RDONLY);
Steve Frenchd54ff732010-04-27 04:38:15 +00003402 else
Al Viro8737c932009-12-24 06:47:55 -05003403 break_lease(inode, O_WRONLY);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003404 rc = filemap_fdatawrite(inode->i_mapping);
3405 if (cinode->clientCanCacheRead == 0) {
Jeff Laytoneb4b7562010-10-22 14:52:29 -04003406 rc = filemap_fdatawait(inode->i_mapping);
3407 mapping_set_error(inode->i_mapping, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003408 invalidate_remote_inode(inode);
3409 }
Joe Perchesb6b38f72010-04-21 03:50:45 +00003410 cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003411 }
3412
Pavel Shilovsky85160e02011-10-22 15:33:29 +04003413 rc = cifs_push_locks(cfile);
3414 if (rc)
3415 cERROR(1, "Push locks rc = %d", rc);
3416
Jeff Layton3bc303c2009-09-21 06:47:50 -04003417 /*
3418 * releasing stale oplock after recent reconnect of smb session using
3419 * a now incorrect file handle is not a data integrity issue but do
3420 * not bother sending an oplock release if session to server still is
3421 * disconnected since oplock already released by the server
3422 */
Steve Frenchcdff08e2010-10-21 22:46:14 +00003423 if (!cfile->oplock_break_cancelled) {
Pavel Shilovsky95a3f2f2012-09-18 16:20:33 -07003424 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3425 cinode);
Joe Perchesb6b38f72010-04-21 03:50:45 +00003426 cFYI(1, "Oplock release rc = %d", rc);
Jeff Layton3bc303c2009-09-21 06:47:50 -04003427 }
Jeff Layton3bc303c2009-09-21 06:47:50 -04003428}
3429
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003430const struct address_space_operations cifs_addr_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 .readpage = cifs_readpage,
3432 .readpages = cifs_readpages,
3433 .writepage = cifs_writepage,
Steve French37c0eb42005-10-05 14:50:29 -07003434 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003435 .write_begin = cifs_write_begin,
3436 .write_end = cifs_write_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303438 .releasepage = cifs_release_page,
3439 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003440 .launder_page = cifs_launder_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441};
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003442
3443/*
3444 * cifs_readpages requires the server to support a buffer large enough to
3445 * contain the header plus one complete page of data. Otherwise, we need
3446 * to leave cifs_readpages out of the address space operations.
3447 */
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07003448const struct address_space_operations cifs_addr_ops_smallbuf = {
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003449 .readpage = cifs_readpage,
3450 .writepage = cifs_writepage,
3451 .writepages = cifs_writepages,
Nick Piggind9414772008-09-24 11:32:59 -04003452 .write_begin = cifs_write_begin,
3453 .write_end = cifs_write_end,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003454 .set_page_dirty = __set_page_dirty_nobuffers,
Suresh Jayaraman85f2d6b2010-07-05 18:13:00 +05303455 .releasepage = cifs_release_page,
3456 .invalidatepage = cifs_invalidate_page,
Pavel Shilovsky9ad15062011-04-08 05:29:10 +04003457 .launder_page = cifs_launder_page,
Dave Kleikamp273d81d2006-06-01 19:41:23 +00003458};